From b3f7ec099596f2979e1e14ffa73d406eafb07908 Mon Sep 17 00:00:00 2001 From: Jonathan Hsu Date: Wed, 15 Dec 2021 13:46:22 +0800 Subject: [PATCH] [ALPS06428859] scsi: ufs: Add HPB&TW support Add feature HPB&TW,kconfig MTK-Commit-Id: 2d5afd5fdf170e4314e14c6702451c7b10eebb3d Signed-off-by: Jonathan Hsu Signed-off-by: Qilin Tan CR-Id: ALPS06428859 Feature: UFS(Universal Flash Storage) Change-Id: I01de24bbeb62fd036ce24522b700aec008906043 --- drivers/scsi/ufs/Kconfig | 24 + drivers/scsi/ufs/Makefile | 4 + drivers/scsi/ufs/ufs-mediatek-dbg.c | 5 +- drivers/scsi/ufs/ufs-sysfs.c | 2 +- drivers/scsi/ufs/ufs.h | 78 +- drivers/scsi/ufs/ufsfeature.c | 700 +++++ drivers/scsi/ufs/ufsfeature.h | 180 ++ drivers/scsi/ufs/ufshcd.c | 180 +- drivers/scsi/ufs/ufshcd.h | 41 + drivers/scsi/ufs/ufshpb.c | 3762 +++++++++++++++++++++++++++ drivers/scsi/ufs/ufshpb.h | 292 +++ drivers/scsi/ufs/ufshpb_skh.c | 3266 +++++++++++++++++++++++ drivers/scsi/ufs/ufshpb_skh.h | 482 ++++ drivers/scsi/ufs/ufstw.c | 1714 ++++++++++++ drivers/scsi/ufs/ufstw.h | 147 ++ include/linux/blkdev.h | 3 + include/scsi/ufs/ufs-mtk-ioctl.h | 2 +- 17 files changed, 10864 insertions(+), 18 deletions(-) create mode 100644 drivers/scsi/ufs/ufsfeature.c create mode 100644 drivers/scsi/ufs/ufsfeature.h create mode 100644 drivers/scsi/ufs/ufshpb.c create mode 100644 drivers/scsi/ufs/ufshpb.h create mode 100644 drivers/scsi/ufs/ufshpb_skh.c create mode 100644 drivers/scsi/ufs/ufshpb_skh.h create mode 100644 drivers/scsi/ufs/ufstw.c create mode 100644 drivers/scsi/ufs/ufstw.h diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 21420a00833e..98d8e61c8c6b 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -145,6 +145,30 @@ config SCSI_UFS_BSG Select this if you need a bsg device node for your UFS controller. If unsure, say N. +config SCSI_UFS_FEATURE + bool "UFS feature activate" + depends on SCSI_UFSHCD + ---help--- + UFS feature activate such as hpb, tw and etc. + +config SCSI_UFS_HPB + bool "UFSHPB" + depends on SCSI_UFSHCD && SCSI_UFS_FEATURE + ---help--- + UFS HPB Feature Enable + +config SCSI_UFS_TW + bool "UFSTW" + depends on SCSI_UFSHCD && SCSI_UFS_FEATURE + ---help--- + UFS TW Feature Enable + +config SCSI_SKHPB + bool "Activate HPB Host-aware Performance Booster" + depends on SCSI_UFSHCD + help + Activate or deactive SKHPB driver + config SCSI_UFS_CRYPTO bool "UFS Crypto Engine Support" depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index eaa9e642d697..12964dc2586f 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -29,6 +29,10 @@ obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o ufshcd-core-objs := ufshcd.o ufs-sysfs.o ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o +obj-$(CONFIG_SCSI_UFS_FEATURE) += ufsfeature.o +obj-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o +obj-$(CONFIG_SCSI_UFS_TW) += ufstw.o +obj-$(CONFIG_SCSI_SKHPB) += ufshpb_skh.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o diff --git a/drivers/scsi/ufs/ufs-mediatek-dbg.c b/drivers/scsi/ufs/ufs-mediatek-dbg.c index db4dafc36c50..d463dd3395e2 100644 --- a/drivers/scsi/ufs/ufs-mediatek-dbg.c +++ b/drivers/scsi/ufs/ufs-mediatek-dbg.c @@ -104,9 +104,10 @@ void ufsdbg_print_info(char **buff, unsigned long *size, struct seq_file *m) /* Device info */ SPREAD_PRINTF(buff, size, m, - "Device vendor=0x%X, model=%s\n", + "Device vendor=0x%X, model=%s, ufs version=0x%X\n", hba->dev_info.wmanufacturerid, - hba->dev_info.model); + hba->dev_info.model, + hba->dev_info.wspecversion); /* Error history */ ufshcd_print_all_evt_hist(hba, m, buff, size); diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index dbdf8b01abed..1760460e8f6c 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -713,7 +713,7 @@ static ssize_t _pname##_show(struct device *dev, \ struct scsi_device *sdev = to_scsi_device(dev); \ struct ufs_hba *hba = shost_priv(sdev->host); \ u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \ - if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \ + if (!ufs_is_valid_unit_desc_lun(lun)) \ return -EINVAL; \ return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ lun, _duname##_DESC_PARAM##_puname, buf, _size); \ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index e69dc90b172d..f2c5d25edb2f 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -40,6 +40,7 @@ #include #include +#define MAX_CDB_SIZE 16 #define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req)) #define QUERY_DESC_MAX_SIZE 255 #define QUERY_DESC_MIN_SIZE 2 @@ -63,6 +64,7 @@ #define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F #define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID) #define UFS_UPIU_WLUN_ID (1 << 7) +#define UFS_UPIU_MAX_GENERAL_LUN 8 /* Well known logical unit id in LUN field of UPIU */ enum { @@ -140,6 +142,14 @@ enum flag_idn { QUERY_FLAG_IDN_BUSY_RTC = 0x09, QUERY_FLAG_IDN_RESERVED3 = 0x0A, QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B, +#if defined(CONFIG_SCSI_UFS_TW) + QUERY_FLAG_IDN_TW_EN = 0x0E, + QUERY_FLAG_IDN_TW_BUF_FLUSH_EN = 0x0F, + QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN = 0x10, +#endif +#if defined(CONFIG_SCSI_SKHPB) + QUERY_FLAG_IDN_HPB_RESET = 0x11, /* JEDEC version */ +#endif }; /* Attribute idn for Query requests */ @@ -168,6 +178,15 @@ enum attr_idn { QUERY_ATTR_IDN_PSA_STATE = 0x15, QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16, QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17, +#if defined(CONFIG_SCSI_UFS_TW) + QUERY_ATTR_IDN_TW_FLUSH_STATUS = 0x1C, + QUERY_ATTR_IDN_TW_BUF_SIZE = 0x1D, + QUERY_ATTR_IDN_TW_BUF_LIFETIME_EST = 0x1E, + QUERY_ATTR_CUR_TW_BUF_SIZE = 0x1F, +#endif +#if defined(CONFIG_SCSI_UFS_FEATURE) + QUERY_ATTR_IDN_SUP_VENDOR_OPTIONS = 0xFF, +#endif }; /* Descriptor idn for Query requests */ @@ -219,6 +238,14 @@ enum unit_desc_param { UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18, UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20, UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, +#if defined(CONFIG_SCSI_UFS_HPB) || defined(CONFIG_SCSI_SKHPB) + UNIT_DESC_HPB_LU_MAX_ACTIVE_REGIONS = 0x23, + UNIT_DESC_HPB_LU_PIN_REGION_START_OFFSET = 0x25, + UNIT_DESC_HPB_LU_NUM_PIN_REGIONS = 0x27, +#endif +#if defined(CONFIG_SCSI_UFS_TW) + UNIT_DESC_TW_LU_MAX_BUF_SIZE = 0x29, +#endif }; /* Device descriptor parameters offsets in bytes*/ @@ -258,6 +285,18 @@ enum device_desc_param { DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25, DEVICE_DESC_PARAM_PSA_TMT = 0x29, DEVICE_DESC_PARAM_PRDCT_REV = 0x2A, +#if defined(CONFIG_SCSI_UFS_HPB) || defined(CONFIG_SCSI_SKHPB) + DEVICE_DESC_PARAM_HPB_VER = 0x40, + DEVICE_DESC_PARAM_HPB_CONTROL = 0x42, /* JEDEC version */ +#endif +#if defined(CONFIG_SCSI_UFS_FEATURE) + DEVICE_DESC_PARAM_EX_FEAT_SUP = 0x4F, +#endif +#if defined(CONFIG_SCSI_UFS_TW) + DEVICE_DESC_PARAM_TW_RETURN_TO_USER = 0x53, + DEVICE_DESC_PARAM_TW_BUF_TYPE = 0x54, + DEVICE_DESC_PARAM_NUM_SHARED_WB_BUF_AU = 0x55, /* JEDEC version */ +#endif }; /* Interconnect descriptor parameters offsets in bytes*/ @@ -302,6 +341,20 @@ enum geometry_desc_param { GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E, GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42, GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44, +#if defined(CONFIG_SCSI_UFS_HPB) || defined(CONFIG_SCSI_SKHPB) + GEOMETRY_DESC_HPB_REGION_SIZE = 0x48, + GEOMETRY_DESC_HPB_NUMBER_LU = 0x49, + GEOMETRY_DESC_HPB_SUBREGION_SIZE = 0x4A, + GEOMETRY_DESC_HPB_DEVICE_MAX_ACTIVE_REGIONS = 0x4B, +#endif +#if defined(CONFIG_SCSI_UFS_TW) + GEOMETRY_DESC_TW_MAX_SIZE = 0x4F, + GEOMETRY_DESC_TW_NUMBER_LU = 0x53, + GEOMETRY_DESC_TW_CAP_ADJ_FAC = 0x54, + GEOMETRY_DESC_TW_SUPPORT_USER_REDUCTION_TYPES = 0x55, + GEOMETRY_DESC_TW_SUPPORT_BUF_TYPE = 0x56, + GEOMETRY_DESC_TW_GROUP_NUM_CAP = 0x57, +#endif }; /* Health descriptor parameters offsets in bytes*/ @@ -354,8 +407,20 @@ enum power_desc_param_offset { enum { MASK_EE_STATUS = 0xFFFF, MASK_EE_URGENT_BKOPS = (1 << 2), +#if defined(CONFIG_SCSI_UFS_TW) + MASK_EE_TW = (1 << 5), +#endif }; +#if defined(CONFIG_SCSI_UFS_TW) +/* TW buffer type */ +enum { + WB_LU_DEDICATED_BUFFER_TYPE = 0x0, + WB_SINGLE_SHARE_BUFFER_TYPE = 0x1 +}; +#endif + + /* Background operation status */ enum bkops_status { BKOPS_STATUS_NO_OP = 0x0, @@ -429,6 +494,9 @@ enum { MASK_RSP_EXCEPTION_EVENT = 0x10000, MASK_TM_SERVICE_RESP = 0xFF, MASK_TM_FUNC = 0xFF, +#if defined(CONFIG_SCSI_UFS_HPB) || defined(CONFIG_SCSI_SKHPB) + MASK_RSP_UPIU_HPB_UPDATE_ALERT = 0x20000, /* JEDEC version */ +#endif }; /* Task management service response */ @@ -575,15 +643,9 @@ struct ufs_dev_info { * @lun: LU number to check * @return: true if the lun has a matching unit descriptor, false otherwise */ -static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, - u8 lun) +static inline bool ufs_is_valid_unit_desc_lun(u8 lun) { - if (!dev_info || !dev_info->max_lu_supported) { - pr_err("Max General LU supported by UFS isn't initialized\n"); - return false; - } - - return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported); + return (lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN)); } #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufsfeature.c b/drivers/scsi/ufs/ufsfeature.c new file mode 100644 index 000000000000..a01bebb385db --- /dev/null +++ b/drivers/scsi/ufs/ufsfeature.c @@ -0,0 +1,700 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd. + */ + + +#include "ufsfeature.h" +#include "ufshcd.h" +#include "ufs_quirks.h" + +#if defined(CONFIG_SCSI_UFS_HPB) +#include "ufshpb.h" +#endif + +#define QUERY_REQ_TIMEOUT 1500 /* msec */ + +static inline void ufsf_init_query(struct ufs_hba *hba, + struct ufs_query_req **request, + struct ufs_query_res **response, + enum query_opcode opcode, u8 idn, + u8 index, u8 selector) +{ + *request = &hba->dev_cmd.query.request; + *response = &hba->dev_cmd.query.response; + memset(*request, 0, sizeof(struct ufs_query_req)); + memset(*response, 0, sizeof(struct ufs_query_res)); + (*request)->upiu_req.opcode = opcode; + (*request)->upiu_req.idn = idn; + (*request)->upiu_req.index = index; + (*request)->upiu_req.selector = selector; +} + +/* + * ufs feature common functions. + */ +int ufsf_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, u8 index, bool *flag_res) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + u8 selector; + int err; + + BUG_ON(!hba); + + ufshcd_hold(hba, false); + mutex_lock(&hba->dev_cmd.lock); + + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SAMSUNG || + hba->dev_info.wmanufacturerid == UFS_VENDOR_MICRON) + selector = UFSFEATURE_SELECTOR; + else + selector = 0; + + /* + * Init the query response and request parameters + */ + ufsf_init_query(hba, &request, &response, opcode, idn, index, + selector); + + switch (opcode) { + case UPIU_QUERY_OPCODE_SET_FLAG: + case UPIU_QUERY_OPCODE_CLEAR_FLAG: + case UPIU_QUERY_OPCODE_TOGGLE_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + if (!flag_res) { + /* No dummy reads */ + dev_err(hba->dev, "%s: Invalid argument for read request\n", + __func__); + err = -EINVAL; + goto out_unlock; + } + break; + default: + dev_err(hba->dev, + "%s: Expected query flag opcode but got = %d\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + + /* Send query request */ + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + if (err) { + dev_err(hba->dev, + "%s: Sending flag query for idn %d failed, err = %d\n", + __func__, idn, err); + goto out_unlock; + } + + if (flag_res) + *flag_res = (be32_to_cpu(response->upiu_res.value) & + MASK_QUERY_UPIU_FLAG_LOC) & 0x1; + +out_unlock: + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + return err; +} + +int ufsf_query_flag_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, u8 idx, bool *flag_res) +{ + int ret; + int retries; + + for (retries = 0; retries < UFSF_QUERY_REQ_RETRIES; retries++) { + ret = ufsf_query_flag(hba, opcode, idn, idx, flag_res); + if (ret) + dev_dbg(hba->dev, + "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + if (ret) + dev_err(hba->dev, + "%s: query flag, opcode %d, idn %d, failed with error %d after %d retires\n", + __func__, opcode, idn, ret, retries); + return ret; +} + +int ufsf_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 idx, u32 *attr_val) +{ + int ret; + int retries; + u8 selector; + + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SAMSUNG || + hba->dev_info.wmanufacturerid == UFS_VENDOR_MICRON) + selector = UFSFEATURE_SELECTOR; + else + selector = 0; + + for (retries = 0; retries < UFSF_QUERY_REQ_RETRIES; retries++) { + ret = ufshcd_query_attr(hba, opcode, idn, idx, + selector, attr_val); + if (ret) + dev_dbg(hba->dev, + "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + if (ret) + dev_err(hba->dev, + "%s: query attr, opcode %d, idn %d, failed with error %d after %d retires\n", + __func__, opcode, idn, ret, retries); + return ret; +} + +static int ufsf_read_desc(struct ufs_hba *hba, u8 desc_id, u8 desc_index, + u8 selector, u8 *desc_buf, u32 size) +{ + int err = 0; + + pm_runtime_get_sync(hba->dev); + + err = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, + selector, + desc_buf, &size); + if (err) + ERR_MSG("reading Device Desc failed. err = %d", err); + + pm_runtime_put_sync(hba->dev); + + return err; +} + +static int ufsf_read_dev_desc(struct ufsf_feature *ufsf, u8 selector) +{ + u8 desc_buf[UFSF_QUERY_DESC_DEVICE_MAX_SIZE] = {0}; + int ret; + + ret = ufsf_read_desc(ufsf->hba, QUERY_DESC_IDN_DEVICE, 0, selector, + desc_buf, UFSF_QUERY_DESC_DEVICE_MAX_SIZE); + if (ret) + return ret; + + ufsf->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU]; + INIT_INFO("device lu count %d", ufsf->num_lu); + + INIT_INFO("sel=%u length=%u(0x%x) bSupport=0x%.2x, extend=0x%.2x_%.2x", + selector, desc_buf[DEVICE_DESC_PARAM_LEN], + desc_buf[DEVICE_DESC_PARAM_LEN], + desc_buf[DEVICE_DESC_PARAM_UFS_FEAT], + desc_buf[DEVICE_DESC_PARAM_EX_FEAT_SUP+2], + desc_buf[DEVICE_DESC_PARAM_EX_FEAT_SUP+3]); + +#if defined(CONFIG_SCSI_UFS_HPB) + ufshpb_get_dev_info(&ufsf->hpb_dev_info, desc_buf); +#endif + +#if defined(CONFIG_SCSI_UFS_TW) + ufstw_get_dev_info(&ufsf->tw_dev_info, desc_buf); +#endif + return 0; +} + +static int ufsf_read_geo_desc(struct ufsf_feature *ufsf, u8 selector) +{ + u8 geo_buf[UFSF_QUERY_DESC_GEOMETRY_MAX_SIZE]; + int ret; + + ret = ufsf_read_desc(ufsf->hba, QUERY_DESC_IDN_GEOMETRY, 0, selector, + geo_buf, UFSF_QUERY_DESC_GEOMETRY_MAX_SIZE); + if (ret) + return ret; + +#if defined(CONFIG_SCSI_UFS_HPB) + if (ufsf->hpb_dev_info.hpb_device) + ufshpb_get_geo_info(&ufsf->hpb_dev_info, geo_buf); +#endif + +#if defined(CONFIG_SCSI_UFS_TW) + if (ufsf->tw_dev_info.tw_device) + ufstw_get_geo_info(&ufsf->tw_dev_info, geo_buf); +#endif + return 0; +} + +static int ufsf_read_unit_desc(struct ufsf_feature *ufsf, + unsigned int lun, u8 selector) +{ + u8 unit_buf[UFSF_QUERY_DESC_UNIT_MAX_SIZE]; + int lu_enable, ret = 0; + + ret = ufsf_read_desc(ufsf->hba, QUERY_DESC_IDN_UNIT, lun, selector, + unit_buf, UFSF_QUERY_DESC_UNIT_MAX_SIZE); + if (ret) { + ERR_MSG("read unit desc failed. ret %d", ret); + goto out; + } + + lu_enable = unit_buf[UNIT_DESC_PARAM_LU_ENABLE]; + if (!lu_enable) + return 0; + +#if defined(CONFIG_SCSI_UFS_HPB) + if (ufsf->hpb_dev_info.hpb_device) { + ret = ufshpb_get_lu_info(ufsf, lun, unit_buf); + if (ret == -ENOMEM) + goto out; + } +#endif + +#if defined(CONFIG_SCSI_UFS_TW) + if (ufsf->tw_dev_info.tw_device) { + ret = ufstw_get_lu_info(ufsf, lun, unit_buf); + if (ret == -ENOMEM) + goto out; + } +#endif +out: + return ret; +} + +void ufsf_device_check(struct ufs_hba *hba) +{ + struct ufsf_feature *ufsf = &hba->ufsf; + int ret; + unsigned int lun; + u8 selector = 0; + + ufsf->slave_conf_cnt = 0; + + ufsf->hba = hba; + + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SAMSUNG || + hba->dev_info.wmanufacturerid == UFS_VENDOR_MICRON) + selector = UFSFEATURE_SELECTOR; + + ret = ufsf_read_dev_desc(ufsf, selector); + if (ret) + return; + + ret = ufsf_read_geo_desc(ufsf, selector); + if (ret) + return; + + seq_scan_lu(lun) { + ret = ufsf_read_unit_desc(ufsf, lun, selector); + if (ret == -ENOMEM) + goto out_free_mem; + } + + return; +out_free_mem: +#if defined(CONFIG_SCSI_UFS_HPB) + seq_scan_lu(lun) + kfree(ufsf->ufshpb_lup[lun]); + + /* don't call init handler */ + ufsf->ufshpb_state = HPB_NOT_SUPPORTED; +#endif +#if defined(CONFIG_SCSI_UFS_TW) + seq_scan_lu(lun) + kfree(ufsf->tw_lup[lun]); + + ufsf->tw_dev_info.tw_device = false; + atomic_set(&ufsf->tw_state, TW_NOT_SUPPORTED); +#endif + return; +} + +static void ufsf_print_query_buf(unsigned char *field, int size) +{ + unsigned char buf[255]; + unsigned int count = 0; + int i; + + count += snprintf(buf, 8, "(0x00):"); + + for (i = 0; i < size; i++) { + count += snprintf(buf + count, 4, " %.2X", field[i]); + + if ((i + 1) % 16 == 0) { + buf[count] = '\n'; + buf[count + 1] = '\0'; + printk(buf); + count = 0; + count += snprintf(buf, 8, "(0x%.2X):", i + 1); + } else if ((i + 1) % 4 == 0) + count += snprintf(buf + count, 3, " :"); + } + buf[count] = '\n'; + buf[count + 1] = '\0'; + printk(buf); +} + +inline int ufsf_check_query(__u32 opcode) +{ + return (opcode & 0xffff0000) >> 16 == UFSFEATURE_QUERY_OPCODE; +} + +int ufsf_query_ioctl(struct ufsf_feature *ufsf, unsigned int lun, + void __user *buffer, + struct ufs_ioctl_query_data_hpb *ioctl_data, u8 selector) +{ + unsigned char *kernel_buf; + int opcode; + int err = 0; + int index = 0; + int length = 0; + int buf_len = 0; + + opcode = ioctl_data->opcode & 0xffff; + + INFO_MSG("op %u idn %u sel %u size %u(0x%X)", opcode, ioctl_data->idn, + selector, ioctl_data->buf_size, ioctl_data->buf_size); + + buf_len = (ioctl_data->idn == QUERY_DESC_IDN_STRING) ? + IOCTL_DEV_CTX_MAX_SIZE : QUERY_DESC_MAX_SIZE; + if (ioctl_data->buf_size > buf_len) { + err = -EINVAL; + goto out; + } + + kernel_buf = kzalloc(buf_len, GFP_KERNEL); + if (!kernel_buf) { + err = -ENOMEM; + goto out; + } + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_DESC: + err = copy_from_user(kernel_buf, buffer + + sizeof(struct ufs_ioctl_query_data_hpb), + ioctl_data->buf_size); + INFO_MSG("buf size %d", ioctl_data->buf_size); + ufsf_print_query_buf(kernel_buf, ioctl_data->buf_size); + if (err) + goto out_release_mem; + break; + + case UPIU_QUERY_OPCODE_READ_DESC: + switch (ioctl_data->idn) { + case QUERY_DESC_IDN_UNIT: + if (!ufs_is_valid_unit_desc_lun(lun)) { + ERR_MSG("No unit descriptor for lun 0x%x", lun); + err = -EINVAL; + goto out_release_mem; + } + index = lun; + INFO_MSG("read lu desc lun: %d", index); + break; + + case QUERY_DESC_IDN_STRING: +#if defined(CONFIG_SCSI_UFS_HPB) + if (!ufs_is_valid_unit_desc_lun(lun)) { + ERR_MSG("No unit descriptor for lun 0x%x", lun); + err = -EINVAL; + goto out_release_mem; + } + err = ufshpb_issue_req_dev_ctx(ufsf->ufshpb_lup[lun], + kernel_buf, + ioctl_data->buf_size); + if (err < 0) + goto out_release_mem; + + goto copy_buffer; +#endif + case QUERY_DESC_IDN_DEVICE: + case QUERY_DESC_IDN_GEOMETRY: + case QUERY_DESC_IDN_CONFIGURATION: + break; + + default: + ERR_MSG("invalid idn %d", ioctl_data->idn); + err = -EINVAL; + goto out_release_mem; + } + break; + default: + ERR_MSG("invalid opcode %d", opcode); + err = -EINVAL; + goto out_release_mem; + } + + length = ioctl_data->buf_size; + + err = ufshcd_query_descriptor_retry(ufsf->hba, opcode, ioctl_data->idn, + index, selector, kernel_buf, + &length); + if (err) + goto out_release_mem; + +#if defined(CONFIG_SCSI_UFS_HPB) +copy_buffer: +#endif + if (opcode == UPIU_QUERY_OPCODE_READ_DESC) { + err = copy_to_user(buffer, ioctl_data, + sizeof(struct ufs_ioctl_query_data_hpb)); + if (err) + ERR_MSG("Failed copying back to user."); + + err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data_hpb), + kernel_buf, ioctl_data->buf_size); + if (err) + ERR_MSG("Fail: copy rsp_buffer to user space."); + } +out_release_mem: + kfree(kernel_buf); +out: + return err; +} + +inline bool ufsf_is_valid_lun(int lun) +{ + return lun < UFS_UPIU_MAX_GENERAL_LUN; +} + +inline int ufsf_get_ee_status(struct ufs_hba *hba, u32 *status) +{ + return ufsf_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_EE_STATUS, 0, status); +} + +/* + * Wrapper functions for ufshpb. + */ +#if defined(CONFIG_SCSI_UFS_HPB) +inline int ufsf_hpb_prepare_pre_req(struct ufsf_feature *ufsf, + struct scsi_cmnd *cmd, int lun) +{ + if (ufsf->ufshpb_state == HPB_PRESENT) + return ufshpb_prepare_pre_req(ufsf, cmd, lun); + return -ENODEV; +} + +inline int ufsf_hpb_prepare_add_lrbp(struct ufsf_feature *ufsf, int add_tag) +{ + if (ufsf->ufshpb_state == HPB_PRESENT) + return ufshpb_prepare_add_lrbp(ufsf, add_tag); + return -ENODEV; +} + +inline void ufsf_hpb_end_pre_req(struct ufsf_feature *ufsf, + struct request *req) +{ + ufshpb_end_pre_req(ufsf, req); +} + +inline void ufsf_hpb_change_lun(struct ufsf_feature *ufsf, + struct ufshcd_lrb *lrbp) +{ + int ctx_lba = LI_EN_32(lrbp->cmd->cmnd + 2); + + if (ufsf->ufshpb_state == HPB_PRESENT && + ufsf->issue_ioctl == true && ctx_lba == READ10_DEBUG_LBA) { + lrbp->lun = READ10_DEBUG_LUN; + INFO_MSG("lun 0x%X lba 0x%X", lrbp->lun, ctx_lba); + } +} + +inline void ufsf_hpb_prep_fn(struct ufsf_feature *ufsf, + struct ufshcd_lrb *lrbp) +{ + if (ufsf->ufshpb_state == HPB_PRESENT + && ufsf->issue_ioctl == false) + ufshpb_prep_fn(ufsf, lrbp); +} + +inline void ufsf_hpb_noti_rb(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp) +{ + if (ufsf->ufshpb_state == HPB_PRESENT) + ufshpb_rsp_upiu(ufsf, lrbp); +} + +inline void ufsf_hpb_reset_lu(struct ufsf_feature *ufsf) +{ + ufsf->ufshpb_state = HPB_RESET; + schedule_work(&ufsf->ufshpb_reset_work); +} + +inline void ufsf_hpb_reset_host(struct ufsf_feature *ufsf) +{ + if (ufsf->ufshpb_state == HPB_PRESENT) + ufsf->ufshpb_state = HPB_RESET; +} + +inline void ufsf_hpb_init(struct ufsf_feature *ufsf) +{ + if (ufsf->hpb_dev_info.hpb_device && + ufsf->ufshpb_state == HPB_NEED_INIT) { + INIT_WORK(&ufsf->ufshpb_init_work, ufshpb_init_handler); + schedule_work(&ufsf->ufshpb_init_work); + } +} + +inline void ufsf_hpb_reset(struct ufsf_feature *ufsf) +{ + if (ufsf->hpb_dev_info.hpb_device && + ufsf->ufshpb_state == HPB_RESET) + schedule_work(&ufsf->ufshpb_reset_work); +} + +inline void ufsf_hpb_suspend(struct ufsf_feature *ufsf) +{ + if (ufsf->ufshpb_state == HPB_PRESENT) + ufshpb_suspend(ufsf); +} + +inline void ufsf_hpb_resume(struct ufsf_feature *ufsf) +{ + if (ufsf->ufshpb_state == HPB_PRESENT) + ufshpb_resume(ufsf); +} + +inline void ufsf_hpb_release(struct ufsf_feature *ufsf) +{ + ufshpb_release(ufsf, HPB_NEED_INIT); +} + +inline void ufsf_hpb_set_init_state(struct ufsf_feature *ufsf) +{ + ufsf->ufshpb_state = HPB_NEED_INIT; +} +#else +inline int ufsf_hpb_prepare_pre_req(struct ufsf_feature *ufsf, + struct scsi_cmnd *cmd, int lun) +{ + return 0; +} + +inline int ufsf_hpb_prepare_add_lrbp(struct ufsf_feature *ufsf, int add_tag) +{ + return 0; +} + +inline void ufsf_hpb_end_pre_req(struct ufsf_feature *ufsf, + struct request *req) {} +inline void ufsf_hpb_change_lun(struct ufsf_feature *ufsf, + struct ufshcd_lrb *lrbp) {} +inline void ufsf_hpb_prep_fn(struct ufsf_feature *ufsf, + struct ufshcd_lrb *lrbp) {} +inline void ufsf_hpb_noti_rb(struct ufsf_feature *ufsf, + struct ufshcd_lrb *lrbp) {} +inline void ufsf_hpb_reset_lu(struct ufsf_feature *ufsf) {} +inline void ufsf_hpb_reset_host(struct ufsf_feature *ufsf) {} +inline void ufsf_hpb_init(struct ufsf_feature *ufsf) {} +inline void ufsf_hpb_reset(struct ufsf_feature *ufsf) {} +inline void ufsf_hpb_suspend(struct ufsf_feature *ufsf) {} +inline void ufsf_hpb_resume(struct ufsf_feature *ufsf) {} +inline void ufsf_hpb_release(struct ufsf_feature *ufsf) {} +inline void ufsf_hpb_set_init_state(struct ufsf_feature *ufsf) {} +#endif + +/* + * Wrapper functions for ufstw. + */ + +#if defined(CONFIG_SCSI_UFS_TW) +inline void ufsf_tw_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp) +{ + ufstw_prep_fn(ufsf, lrbp); +} + +inline void ufsf_tw_init(struct ufsf_feature *ufsf) +{ + INIT_INFO("init start.. tw_state %d\n", + atomic_read(&ufsf->tw_state)); + + if (ufsf->tw_dev_info.tw_device && + atomic_read(&ufsf->tw_state) == TW_NEED_INIT) { + INIT_WORK(&ufsf->tw_init_work, ufstw_init_work_fn); + schedule_work(&ufsf->tw_init_work); + } +} + +inline void ufsf_tw_reset(struct ufsf_feature *ufsf) +{ + INIT_INFO("reset start.. tw_state %d\n", + atomic_read(&ufsf->tw_state)); + + if (ufsf->tw_dev_info.tw_device && + atomic_read(&ufsf->tw_state) == TW_RESET) + schedule_work(&ufsf->tw_reset_work); +} + +inline void ufsf_tw_suspend(struct ufsf_feature *ufsf) +{ + if (atomic_read(&ufsf->tw_state) == TW_PRESENT) + ufstw_suspend(ufsf); +} + +inline void ufsf_tw_resume(struct ufsf_feature *ufsf) +{ + if (atomic_read(&ufsf->tw_state) == TW_PRESENT) + ufstw_resume(ufsf); +} + +inline void ufsf_tw_release(struct ufsf_feature *ufsf) +{ + ufstw_release(&ufsf->tw_kref); +} + +inline void ufsf_tw_set_init_state(struct ufsf_feature *ufsf) +{ + atomic_set(&ufsf->tw_state, TW_NEED_INIT); +} + +inline void ufsf_tw_reset_lu(struct ufsf_feature *ufsf) +{ + INFO_MSG("run reset_lu.. tw_state(%d) -> TW_RESET", + atomic_read(&ufsf->tw_state)); + atomic_set(&ufsf->tw_state, TW_RESET); + if (ufsf->tw_dev_info.tw_device) + schedule_work(&ufsf->tw_reset_work); +} + +inline void ufsf_tw_reset_host(struct ufsf_feature *ufsf) +{ + INFO_MSG("run reset_host.. tw_state(%d) -> TW_RESET", + atomic_read(&ufsf->tw_state)); + if (atomic_read(&ufsf->tw_state) == TW_PRESENT) + atomic_set(&ufsf->tw_state, TW_RESET); +} + +inline void ufsf_tw_ee_handler(struct ufsf_feature *ufsf) +{ + u32 status = 0; + int err; + + if (ufsf->tw_debug && (atomic_read(&ufsf->tw_state) != TW_PRESENT)) { + ERR_MSG("tw_state %d", atomic_read(&ufsf->tw_state)); + return; + } + + if ((atomic_read(&ufsf->tw_state) == TW_PRESENT) + && (ufsf->tw_ee_mode == TW_EE_MODE_AUTO)) { + err = ufsf_get_ee_status(ufsf->hba, &status); + if (err) { + dev_err(ufsf->hba->dev, + "%s: failed to get tw ee status %d\n", + __func__, err); + return; + } + if (status & MASK_EE_TW) + ufstw_ee_handler(ufsf); + } +} +#else +inline void ufsf_tw_prep_fn(struct ufsf_feature *ufsf, + struct ufshcd_lrb *lrbp) {} +inline void ufsf_tw_init(struct ufsf_feature *ufsf) {} +inline void ufsf_tw_reset(struct ufsf_feature *ufsf) {} +inline void ufsf_tw_suspend(struct ufsf_feature *ufsf) {} +inline void ufsf_tw_resume(struct ufsf_feature *ufsf) {} +inline void ufsf_tw_release(struct ufsf_feature *ufsf) {} +inline void ufsf_tw_set_init_state(struct ufsf_feature *ufsf) {} +inline void ufsf_tw_reset_lu(struct ufsf_feature *ufsf) {} +inline void ufsf_tw_reset_host(struct ufsf_feature *ufsf) {} +inline void ufsf_tw_ee_handler(struct ufsf_feature *ufsf) {} +#endif diff --git a/drivers/scsi/ufs/ufsfeature.h b/drivers/scsi/ufs/ufsfeature.h new file mode 100644 index 000000000000..a0f67b098fd3 --- /dev/null +++ b/drivers/scsi/ufs/ufsfeature.h @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd. + */ + +#ifndef _UFSFEATURE_H_ +#define _UFSFEATURE_H_ + +#include "ufs.h" +#include + +#if defined(CONFIG_SCSI_UFS_HPB) +#include "ufshpb.h" +#endif +#include + +#if defined(CONFIG_SCSI_UFS_TW) +#include "ufstw.h" +#endif + +/* Constant value*/ +#define SECTOR 512 +#define BLOCK 4096 +#define SECTORS_PER_BLOCK (BLOCK / SECTOR) +#define BITS_PER_DWORD 32 + +#define IOCTL_DEV_CTX_MAX_SIZE OS_PAGE_SIZE +#define OS_PAGE_SIZE 4096 +#define OS_PAGE_SHIFT 12 + +#define UFSF_QUERY_REQ_RETRIES 1 + +/* Description */ +#define UFSF_QUERY_DESC_DEVICE_MAX_SIZE 0x57 +#define UFSF_QUERY_DESC_CONFIGURAION_MAX_SIZE 0xE2 +#define UFSF_QUERY_DESC_UNIT_MAX_SIZE 0x2D +#define UFSF_QUERY_DESC_GEOMETRY_MAX_SIZE 0x58 + +#define UFSFEATURE_SELECTOR 0x01 + +/* Extended UFS Feature Support */ +#define UFSF_EFS_TURBO_WRITE 0x100 + +/* query_flag */ +#define MASK_QUERY_UPIU_FLAG_LOC 0xFF + +/* BIG -> LI */ +#define LI_EN_16(x) be16_to_cpu(*(__be16 *)(x)) +#define LI_EN_32(x) be32_to_cpu(*(__be32 *)(x)) +#define LI_EN_64(x) be64_to_cpu(*(__be64 *)(x)) + +/* LI -> BIG */ +#define GET_BYTE_0(num) (((num) >> 0) & 0xff) +#define GET_BYTE_1(num) (((num) >> 8) & 0xff) +#define GET_BYTE_2(num) (((num) >> 16) & 0xff) +#define GET_BYTE_3(num) (((num) >> 24) & 0xff) +#define GET_BYTE_4(num) (((num) >> 32) & 0xff) +#define GET_BYTE_5(num) (((num) >> 40) & 0xff) +#define GET_BYTE_6(num) (((num) >> 48) & 0xff) +#define GET_BYTE_7(num) (((num) >> 56) & 0xff) + +#define INFO_MSG(msg, args...) printk(KERN_INFO "%s:%d " msg "\n", \ + __func__, __LINE__, ##args) +#define INIT_INFO(msg, args...) INFO_MSG(msg, ##args) +#define RELEASE_INFO(msg, args...) INFO_MSG(msg, ##args) +#define SYSFS_INFO(msg, args...) INFO_MSG(msg, ##args) +#define ERR_MSG(msg, args...) printk(KERN_ERR "%s:%d " msg "\n", \ + __func__, __LINE__, ##args) +#define WARNING_MSG(msg, args...) printk(KERN_WARNING "%s:%d " msg "\n", \ + __func__, __LINE__, ##args) + +#define seq_scan_lu(lun) for (lun = 0; lun < UFS_UPIU_MAX_GENERAL_LUN; lun++) + +#define TMSG(ufsf, lun, msg, args...) \ + do { if (ufsf->sdev_ufs_lu[lun] && \ + ufsf->sdev_ufs_lu[lun]->request_queue) \ + blk_add_trace_msg( \ + ufsf->sdev_ufs_lu[lun]->request_queue, \ + msg, ##args); \ + } while (0) \ + +struct ufsf_lu_desc { + /* Common info */ + int lu_enable; /* 03h bLUEnable */ + int lu_queue_depth; /* 06h lu queue depth info*/ + int lu_logblk_size; /* 0Ah bLogicalBlockSize. default 0x0C = 4KB */ + u64 lu_logblk_cnt; /* 0Bh qLogicalBlockCount. */ + +#if defined(CONFIG_SCSI_UFS_HPB) + u16 lu_max_active_hpb_rgns; /* 23h:24h wLUMaxActiveHPBRegions */ + u16 lu_hpb_pinned_rgn_startidx; /* 25h:26h wHPBPinnedRegionStartIdx */ + u16 lu_num_hpb_pinned_rgns; /* 27h:28h wNumHPBPinnedRegions */ + int lu_hpb_pinned_end_offset; +#endif +#if defined(CONFIG_SCSI_UFS_TW) + unsigned int tw_lu_buf_size; +#endif +}; + +struct ufsf_feature { + struct ufs_hba *hba; + int num_lu; + int slave_conf_cnt; + struct scsi_device *sdev_ufs_lu[UFS_UPIU_MAX_GENERAL_LUN]; +#if defined(CONFIG_SCSI_UFS_HPB) + struct ufshpb_dev_info hpb_dev_info; + struct ufshpb_lu *ufshpb_lup[UFS_UPIU_MAX_GENERAL_LUN]; + struct work_struct ufshpb_init_work; + struct work_struct ufshpb_reset_work; + struct work_struct ufshpb_eh_work; + wait_queue_head_t wait_hpb; + int ufshpb_state; + struct kref ufshpb_kref; + bool issue_ioctl; +#endif +#if defined(CONFIG_SCSI_UFS_TW) + struct ufstw_dev_info tw_dev_info; + struct ufstw_lu *tw_lup[UFS_UPIU_MAX_GENERAL_LUN]; + struct work_struct tw_init_work; + struct work_struct tw_reset_work; + wait_queue_head_t tw_wait; + atomic_t tw_state; + struct kref tw_kref; + + /* turbo write exception event control */ + bool tw_ee_mode; + + /* for debug */ + bool tw_debug; + int tw_debug_no; + atomic64_t tw_debug_ee_count; +#endif +}; + +struct ufs_hba; +struct ufshcd_lrb; + +void ufsf_device_check(struct ufs_hba *hba); +int ufsf_check_query(__u32 opcode); +int ufsf_query_ioctl(struct ufsf_feature *ufsf, unsigned int lun, + void __user *buffer, + struct ufs_ioctl_query_data_hpb *ioctl_data, + u8 selector); +int ufsf_query_flag_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, u8 idx, bool *flag_res); +int ufsf_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 idx, u32 *attr_val); +bool ufsf_is_valid_lun(int lun); +int ufsf_get_ee_status(struct ufs_hba *hba, u32 *status); + +/* for hpb */ +int ufsf_hpb_prepare_pre_req(struct ufsf_feature *ufsf, struct scsi_cmnd *cmd, + int lun); +int ufsf_hpb_prepare_add_lrbp(struct ufsf_feature *ufsf, int add_tag); +void ufsf_hpb_end_pre_req(struct ufsf_feature *ufsf, struct request *req); +void ufsf_hpb_change_lun(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp); +void ufsf_hpb_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp); +void ufsf_hpb_noti_rb(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp); +void ufsf_hpb_reset_lu(struct ufsf_feature *ufsf); +void ufsf_hpb_reset_host(struct ufsf_feature *ufsf); +void ufsf_hpb_init(struct ufsf_feature *ufsf); +void ufsf_hpb_reset(struct ufsf_feature *ufsf); +void ufsf_hpb_suspend(struct ufsf_feature *ufsf); +void ufsf_hpb_resume(struct ufsf_feature *ufsf); +void ufsf_hpb_release(struct ufsf_feature *ufsf); +void ufsf_hpb_set_init_state(struct ufsf_feature *ufsf); + +/* for tw*/ +void ufsf_tw_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp); +void ufsf_tw_init(struct ufsf_feature *ufsf); +void ufsf_tw_reset(struct ufsf_feature *ufsf); +int ufsf_tw_check_flush(struct ufsf_feature *ufsf); +void ufsf_tw_suspend(struct ufsf_feature *ufsf); +void ufsf_tw_resume(struct ufsf_feature *ufsf); +void ufsf_tw_release(struct ufsf_feature *ufsf); +void ufsf_tw_set_init_state(struct ufsf_feature *ufsf); +void ufsf_tw_reset_lu(struct ufsf_feature *ufsf); +void ufsf_tw_reset_host(struct ufsf_feature *ufsf); +void ufsf_tw_ee_handler(struct ufsf_feature *ufsf); +#endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index dc7c620bc507..13a012ce10ac 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2266,7 +2266,12 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * * Returns 0 in case of success, non-zero value in case of failure */ -static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +#if defined(CONFIG_SCSI_UFS_FEATURE) + int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +#else + static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +#endif + { struct ufshcd_sg_entry *prd; struct scatterlist *sg; @@ -2528,7 +2533,12 @@ static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) * @hba: per adapter instance * @lrbp: pointer to local reference block */ -static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +#if defined(CONFIG_SCSI_UFS_FEATURE) + int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +#else + static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +#endif + { u32 upiu_flags; int ret = 0; @@ -2540,9 +2550,21 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; if (likely(lrbp->cmd)) { +#if defined(CONFIG_SCSI_UFS_FEATURE) + ufsf_hpb_change_lun(&hba->ufsf, lrbp); + ufsf_tw_prep_fn(&hba->ufsf, lrbp); + ufsf_hpb_prep_fn(&hba->ufsf, lrbp); +#endif ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction); ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); +#if defined(CONFIG_SCSI_SKHPB) + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX) { + if (hba->skhpb_state == SKHPB_PRESENT && hba->issue_ioctl == false) { + skhpb_prep_fn(hba, lrbp); + } + } +#endif } else { ret = -EINVAL; } @@ -2575,6 +2597,15 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) unsigned long flags; int tag; int err = 0; +#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_HPB) + u32 line = 0; + struct scsi_cmnd *pre_cmd; + struct ufshcd_lrb *add_lrbp; + int add_tag = -ENODEV; + int pre_req_err = -EBUSY; + int lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); + bool req_sent = false; +#endif hba = shost_priv(host); @@ -2637,6 +2668,37 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) clear_bit_unlock(tag, &hba->lrb_in_use); goto out; } + +#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_HPB) + /* Micron version 2.0 not support write buffer id 2 */ + if (hba->dev_info.wmanufacturerid != UFS_VENDOR_SAMSUNG) + goto send_orig_cmd; + + if (ufshcd_vops_has_ufshci_perf_heuristic(hba)) + goto send_orig_cmd; + + add_tag = ufsf_hpb_prepare_pre_req(&hba->ufsf, cmd, lun); + if (add_tag == -EAGAIN) { + clear_bit_unlock(tag, &hba->lrb_in_use); + err = SCSI_MLQUEUE_HOST_BUSY; + ufshcd_release(hba); + line = __LINE__; + goto out; + } + + if (add_tag < 0) { + hba->lrb[tag].hpb_ctx_id = MAX_HPB_CONTEXT_ID; + goto send_orig_cmd; + } + + add_lrbp = &hba->lrb[add_tag]; + + pre_req_err = ufsf_hpb_prepare_add_lrbp(&hba->ufsf, add_tag); + if (pre_req_err) + hba->lrb[tag].hpb_ctx_id = MAX_HPB_CONTEXT_ID; +send_orig_cmd: +#endif + WARN_ON(hba->clk_gating.state != CLKS_ON); lrbp = &hba->lrb[tag]; @@ -2672,6 +2734,17 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ufs_mtk_biolog_queue_command(tag, lrbp->cmd); +#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_HPB) + if (!pre_req_err) { + ufshcd_vops_setup_xfer_req(hba, add_tag, + (add_lrbp->cmd ? true : false)); + ufshcd_send_command(hba, add_tag); + req_sent = true; + pre_req_err = -EBUSY; + atomic64_inc(&hba->ufsf.ufshpb_lup[add_lrbp->lun]->pre_req_cnt); + } +#endif + /* issue command to the controller */ spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false)); @@ -2681,6 +2754,17 @@ out_unlock: if (!err) ufs_mtk_biolog_send_command(tag); out: +#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_HPB) + if (!pre_req_err) { + pre_cmd = add_lrbp->cmd; + scsi_dma_unmap(pre_cmd); + add_lrbp->cmd = NULL; + clear_bit_unlock(add_tag, &hba->lrb_in_use); + ufshcd_release(hba); + ufsf_hpb_end_pre_req(&hba->ufsf, pre_cmd->request); + } +#endif + up_read(&hba->clk_scaling_lock); return err; } @@ -2863,8 +2947,13 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) * NOTE: Since there is only one available tag for device management commands, * it is expected you hold the hba->dev_cmd.lock mutex. */ -static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, - enum dev_cmd_type cmd_type, int timeout) +#if defined(CONFIG_SCSI_UFS_FEATURE) + int ufshcd_exec_dev_cmd(struct ufs_hba *hba, + enum dev_cmd_type cmd_type, int timeout) +#else + static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, + enum dev_cmd_type cmd_type, int timeout) +#endif { struct ufshcd_lrb *lrbp; int err; @@ -2934,8 +3023,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba, (*request)->upiu_req.selector = selector; } +#if defined(CONFIG_SCSI_SKHPB) +int ufshcd_query_flag_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum flag_idn idn, bool *flag_res) +#else static int ufshcd_query_flag_retry(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res) +#endif { int ret; int retries; @@ -3538,7 +3632,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, * Unit descriptors are only available for general purpose LUs (LUN id * from 0 to 7) and RPMB Well known LU. */ - if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) + if (!ufs_is_valid_unit_desc_lun(lun)) return -EOPNOTSUPP; return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, @@ -4463,7 +4557,6 @@ void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep) dev_err(hba->dev, "%s: Controller disable failed\n", __func__); } EXPORT_SYMBOL_GPL(ufshcd_hba_stop); - /** * ufshcd_hba_enable - initialize the controller * @hba: per adapter instance @@ -4966,6 +5059,18 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) if (schedule_work(&hba->eeh_work)) pm_runtime_get_noresume(hba->dev); } +#if defined(CONFIG_SCSI_UFS_FEATURE) + if (scsi_status == SAM_STAT_GOOD) + ufsf_hpb_noti_rb(&hba->ufsf, lrbp); +#endif +#if defined(CONFIG_SCSI_SKHPB) + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX) { + if (hba->skhpb_state == SKHPB_PRESENT && + scsi_status == SAM_STAT_GOOD) + skhpb_rsp_upiu(hba, lrbp); + } +#endif + break; case UPIU_TRANSACTION_REJECT_UPIU: /* TODO: handle Reject UPIU Response */ @@ -6308,6 +6413,18 @@ out: hba->req_abort_count = 0; ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err); if (!err) { +#if defined(CONFIG_SCSI_UFS_FEATURE) + ufsf_hpb_reset_lu(&hba->ufsf); + ufsf_tw_reset_lu(&hba->ufsf); +#endif +#if defined(CONFIG_SCSI_SKHPB) + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX) { + if (hba->skhpb_state == SKHPB_PRESENT) + hba->skhpb_state = SKHPB_RESET; + schedule_delayed_work(&hba->skhpb_init_work, + msecs_to_jiffies(10)); + } +#endif err = SUCCESS; } else { dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); @@ -6529,6 +6646,10 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) */ spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_hba_stop(hba, false); +#if defined(CONFIG_SCSI_UFS_FEATURE) + ufsf_hpb_reset_host(&hba->ufsf); + ufsf_tw_reset_host(&hba->ufsf); +#endif hba->silence_err_logs = true; ufshcd_complete_requests(hba); hba->silence_err_logs = false; @@ -7368,6 +7489,16 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) } ufshcd_print_info(hba, UFS_INFO_PWR); } +#if defined(CONFIG_SCSI_UFS_FEATURE) + ufsf_device_check(hba); + ufsf_hpb_init(&hba->ufsf); + ufsf_tw_init(&hba->ufsf); +#endif + scsi_scan_host(hba->host); +#if defined(CONFIG_SCSI_SKHPB) + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX) + schedule_delayed_work(&hba->skhpb_init_work, 0); +#endif /* * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec) @@ -8408,6 +8539,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; req_link_state = UIC_LINK_OFF_STATE; } +#if defined(CONFIG_SCSI_UFS_FEATURE) + ufsf_hpb_suspend(&hba->ufsf); + ufsf_tw_suspend(&hba->ufsf); +#endif +#if defined(CONFIG_SCSI_SKHPB) + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX) + skhpb_suspend(hba); +#endif ret = ufshcd_crypto_suspend(hba, pm_op); if (ret) @@ -8456,6 +8595,13 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_disable_auto_bkops(hba); } } +#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_TW) + if (ufstw_need_flush(&hba->ufsf)) { + ret = -EAGAIN; + pm_runtime_mark_last_busy(hba->dev); + goto enable_gating; + } +#endif if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || @@ -8625,6 +8771,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (hba->clk_scaling.is_allowed) ufshcd_resume_clkscaling(hba); +#if defined(CONFIG_SCSI_UFS_FEATURE) + ufsf_hpb_resume(&hba->ufsf); + ufsf_tw_resume(&hba->ufsf); +#endif +#if defined(CONFIG_SCSI_SKHPB) + skhpb_resume(hba); +#endif /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); @@ -8898,6 +9051,14 @@ EXPORT_SYMBOL(ufshcd_shutdown); */ void ufshcd_remove(struct ufs_hba *hba) { +#if defined(CONFIG_SCSI_UFS_FEATURE) + ufsf_hpb_release(&hba->ufsf); + ufsf_tw_release(&hba->ufsf); +#endif +#if defined(CONFIG_SCSI_SKHPB) +if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX) + skhpb_release(hba, SKHPB_NEED_INIT); +#endif ufs_bsg_remove(hba); ufs_sysfs_remove_nodes(hba->dev); scsi_remove_host(hba->host); @@ -9155,6 +9316,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) * ufshcd_probe_hba(). */ ufshcd_set_ufs_dev_active(hba); +#if defined(CONFIG_SCSI_UFS_FEATURE) + ufsf_hpb_set_init_state(&hba->ufsf); + ufsf_tw_set_init_state(&hba->ufsf); +#endif +#if defined(CONFIG_SCSI_SKHPB) /* initialize hpb structures */ + ufshcd_init_hpb(hba); +#endif async_schedule(ufshcd_async_scan, hba); ufs_sysfs_add_nodes(hba->dev); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index a6607c54725c..01b72f9b5093 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -71,6 +71,13 @@ #include "ufs.h" #include "ufshci.h" +#if defined(CONFIG_SCSI_UFS_FEATURE) +#include "ufsfeature.h" +#endif +#if defined(CONFIG_SCSI_SKHPB) +#include "ufshpb_skh.h" +#endif + #define UFSHCD "ufshcd" #define UFSHCD_DRIVER_VERSION "0.2" @@ -239,6 +246,10 @@ struct ufshcd_lrb { u64 data_unit_num; bool req_abort_skip; +#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_HPB) + int hpb_ctx_id; +#endif + }; /** @@ -747,6 +758,7 @@ struct ufs_hba { u16 ee_ctrl_mask; u16 hba_enable_delay_us; bool is_powered; + struct semaphore eh_sem; /* Work Queues */ struct work_struct eh_work; @@ -830,6 +842,24 @@ struct ufs_hba { struct device bsg_dev; struct request_queue *bsg_queue; +#if defined(CONFIG_SCSI_UFS_FEATURE) + struct ufsf_feature ufsf; +#endif +#if defined(CONFIG_SCSI_SKHPB) + /* HPB support */ + u32 skhpb_feat; + int skhpb_state; + int skhpb_max_regions; + struct delayed_work skhpb_init_work; + bool issue_ioctl; + struct skhpb_lu *skhpb_lup[UFS_UPIU_MAX_GENERAL_LUN]; + struct work_struct skhpb_eh_work; + u32 skhpb_quirk; + u8 hpb_control_mode; +#define SKHPB_U8_MAX 0xFF + u8 skhpb_quicklist_lu_enable[UFS_UPIU_MAX_GENERAL_LUN]; + struct scsi_device *sdev_ufs_lu[UFS_UPIU_MAX_GENERAL_LUN]; +#endif #ifdef CONFIG_SCSI_UFS_CRYPTO /* crypto */ @@ -1073,6 +1103,17 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); +#if defined(CONFIG_SCSI_UFS_FEATURE) +int ufshcd_exec_dev_cmd(struct ufs_hba *hba, + enum dev_cmd_type cmd_type, int timeout); +int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +#endif + +#if defined(CONFIG_SCSI_SKHPB) +int ufshcd_query_flag_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum flag_idn idn, bool *flag_res); +#endif int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, int *desc_length); diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c new file mode 100644 index 000000000000..2e6fa5354662 --- /dev/null +++ b/drivers/scsi/ufs/ufshpb.c @@ -0,0 +1,3762 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd. + */ + +#include "ufshcd.h" +#include "ufshpb.h" + +#define UFSHCD_REQ_SENSE_SIZE 18 + +/* + * define global constants + */ +static int sects_per_blk_shift; +static int bits_per_dword_shift; +static int bits_per_dword_mask; +static int bits_per_byte_shift; + +static int ufshpb_create_sysfs(struct ufsf_feature *ufsf, + struct ufshpb_lu *hpb); +static int ufshpb_remove_sysfs(struct ufshpb_lu *hpb); + +static inline void +ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx, + int *srgn_idx, int *offset) +{ + int rgn_offset; + + *rgn_idx = lpn >> hpb->entries_per_rgn_shift; + rgn_offset = lpn & hpb->entries_per_rgn_mask; + *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift; + *offset = rgn_offset & hpb->entries_per_srgn_mask; +} + +inline int ufshpb_valid_srgn(struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + return rgn->rgn_state != HPBREGION_INACTIVE && + srgn->srgn_state == HPBSUBREGION_CLEAN; +} + +/* Must be held hpb_lock */ +static bool ufshpb_ppn_dirty_check(struct ufshpb_lu *hpb, unsigned long lpn, + int transfer_len) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + unsigned long cur_lpn = lpn; + int rgn_idx, srgn_idx, srgn_offset, find_size; + int scan_cnt = transfer_len; + + do { + ufshpb_get_pos_from_lpn(hpb, cur_lpn, &rgn_idx, &srgn_idx, + &srgn_offset); + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (!ufshpb_valid_srgn(rgn, srgn)) + return true; + + if (!srgn->mctx || !srgn->mctx->ppn_dirty) + return true; + + if (hpb->entries_per_srgn < srgn_offset + scan_cnt) { + find_size = hpb->entries_per_srgn - srgn_offset; + scan_cnt -= find_size; + } else { + find_size = srgn_offset + scan_cnt; + scan_cnt = 0; + } + + srgn_offset = + find_next_bit((unsigned long *)srgn->mctx->ppn_dirty, + hpb->entries_per_srgn, srgn_offset); + + if (srgn_offset < hpb->entries_per_srgn) + return srgn_offset < find_size; + + cur_lpn += find_size; + } while (scan_cnt); + + return false; +} + +static void ufshpb_set_read16_cmd(struct ufshpb_lu *hpb, + struct ufshcd_lrb *lrbp, + unsigned long long ppn, + unsigned int transfer_len) +{ + unsigned char *cdb = lrbp->cmd->cmnd; + + cdb[0] = READ_16; + cdb[2] = lrbp->cmd->cmnd[2]; + cdb[3] = lrbp->cmd->cmnd[3]; + cdb[4] = lrbp->cmd->cmnd[4]; + cdb[5] = lrbp->cmd->cmnd[5]; + cdb[6] = GET_BYTE_7(ppn); + cdb[7] = GET_BYTE_6(ppn); + cdb[8] = GET_BYTE_5(ppn); + cdb[9] = GET_BYTE_4(ppn); + cdb[10] = GET_BYTE_3(ppn); + cdb[11] = GET_BYTE_2(ppn); + cdb[12] = GET_BYTE_1(ppn); + cdb[13] = GET_BYTE_0(ppn); + + if (lrbp->hpb_ctx_id < MAX_HPB_CONTEXT_ID) + cdb[14] = (1 << 7) | lrbp->hpb_ctx_id; + else + cdb[14] = UFSHPB_GROUP_NUMBER; + + cdb[15] = transfer_len; + + lrbp->cmd->cmd_len = MAX_CDB_SIZE; +} + +/* called with hpb_lock (irq) */ +static inline void +ufshpb_set_dirty_bits(struct ufshpb_lu *hpb, struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn, int dword, int offset, + unsigned int cnt) +{ + const unsigned long mask = ((1UL << cnt) - 1) & 0xffffffff; + + if (rgn->rgn_state == HPBREGION_INACTIVE) + return; + + BUG_ON(!srgn->mctx); + srgn->mctx->ppn_dirty[dword] |= (mask << offset); +} + +static inline void ufshpb_get_bit_offset(struct ufshpb_lu *hpb, int srgn_offset, + int *dword, int *offset) +{ + *dword = srgn_offset >> bits_per_dword_shift; + *offset = srgn_offset & bits_per_dword_mask; +} + +static void ufshpb_set_dirty(struct ufshpb_lu *hpb, struct ufshcd_lrb *lrbp, + int rgn_idx, int srgn_idx, int srgn_offset) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int cnt, bit_cnt, bit_dword, bit_offset; + + cnt = blk_rq_sectors(lrbp->cmd->request) >> sects_per_blk_shift; + ufshpb_get_bit_offset(hpb, srgn_offset, &bit_dword, &bit_offset); + + do { + bit_cnt = min(cnt, BITS_PER_DWORD - bit_offset); + + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + ufshpb_set_dirty_bits(hpb, rgn, srgn, bit_dword, bit_offset, + bit_cnt); + + bit_offset = 0; + bit_dword++; + + if (bit_dword == hpb->dwords_per_srgn) { + bit_dword = 0; + srgn_idx++; + + if (srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + } + cnt -= bit_cnt; + } while (cnt); + + BUG_ON(cnt < 0); +} + +static inline bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd) +{ + if (cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == READ_16) + return true; + + return false; +} + +static inline bool ufshpb_is_write_discard_lrbp(struct ufshcd_lrb *lrbp) +{ + if (lrbp->cmd->cmnd[0] == WRITE_10 || lrbp->cmd->cmnd[0] == WRITE_16 || + lrbp->cmd->cmnd[0] == UNMAP) + return true; + + return false; +} + +static unsigned long long ufshpb_get_ppn(struct ufshpb_map_ctx *mctx, int pos, + int *error) +{ + unsigned long long *ppn_table; + struct page *page = NULL; + int index, offset; + + index = pos / HPB_ENTREIS_PER_OS_PAGE; + offset = pos % HPB_ENTREIS_PER_OS_PAGE; + + page = mctx->m_page[index]; + if (!page) { + *error = -ENOMEM; + ERR_MSG("mctx %p cannot get m_page", mctx); + return 0; + } + + ppn_table = page_address(page); + if (!ppn_table) { + *error = -ENOMEM; + ERR_MSG("mctx %p cannot get ppn_table vm", mctx); + return 0; + } + + return ppn_table[offset]; +} + +static inline int ufshpb_lu_get(struct ufshpb_lu *hpb) +{ + if (!hpb || hpb->ufsf->ufshpb_state != HPB_PRESENT) + return -ENODEV; + + kref_get(&hpb->ufsf->ufshpb_kref); + return 0; +} + +static inline void ufshpb_schedule_error_handler(struct kref *kref) +{ + struct ufsf_feature *ufsf; + + ufsf = container_of(kref, struct ufsf_feature, ufshpb_kref); + schedule_work(&ufsf->ufshpb_eh_work); +} + +static inline void ufshpb_lu_put(struct ufshpb_lu *hpb) +{ + kref_put(&hpb->ufsf->ufshpb_kref, ufshpb_schedule_error_handler); +} + +static void ufshpb_failed(struct ufshpb_lu *hpb, const char *f) +{ + ERR_MSG("ufshpb_driver failed. function (%s)", f); + hpb->ufsf->ufshpb_state = HPB_FAILED; + ufshpb_lu_put(hpb); +} + +static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb, + struct ufshpb_req *pre_req) +{ + list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); + hpb->num_inflight_pre_req--; +} + +static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req; + + if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) { + HPB_DEBUG(hpb, "pre_req throttle. inflight %d throttle %d", + hpb->num_inflight_pre_req, hpb->throttle_pre_req); + return NULL; + } + + pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free, + struct ufshpb_req, + list_req); + if (!pre_req) { + HPB_DEBUG(hpb, "There is no pre_req"); + return NULL; + } + + list_del_init(&pre_req->list_req); + hpb->num_inflight_pre_req++; + + return pre_req; +} + +static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data; + struct ufshpb_lu *hpb = pre_req->hpb; + unsigned long flags; + struct scsi_sense_hdr sshdr; + + if (error) { + ERR_MSG("error number %d", error); + scsi_normalize_sense(pre_req->sense, SCSI_SENSE_BUFFERSIZE, + &sshdr); + ERR_MSG("code %x sense_key %x asc %x ascq %x", + sshdr.response_code, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + ERR_MSG("byte4 %x byte5 %x byte6 %x additional_len %x", + sshdr.byte4, sshdr.byte5, + sshdr.byte6, sshdr.additional_length); + } + + spin_lock_irqsave(&hpb->hpb_lock, flags); + ufshpb_put_pre_req(pre_req->hpb, pre_req); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + ufshpb_lu_put(pre_req->hpb); +} + +static int ufshpb_prep_entry(struct ufshpb_req *pre_req, + struct page *page) +{ + struct ufshpb_lu *hpb = pre_req->hpb; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + unsigned long long *addr; + unsigned long long entry_ppn = 0; + unsigned long lpn = pre_req->wb.lpn; + int rgn_idx, srgn_idx, srgn_offset; + int i, error = 0; + unsigned long flags; + + addr = page_address(page); + + spin_lock_irqsave(&hpb->hpb_lock, flags); + for (i = 0; i < pre_req->wb.len; i++, lpn++) { + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, + &srgn_offset); + + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (!ufshpb_valid_srgn(rgn, srgn)) + goto mctx_error; + + if (!srgn->mctx) + goto mctx_error; + + entry_ppn = ufshpb_get_ppn(srgn->mctx, srgn_offset, &error); + if (error) + goto mctx_error; + + addr[i] = entry_ppn; + } + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return 0; +mctx_error: + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return -ENOMEM; +} + +static int ufshpb_pre_req_add_bio_page(struct request_queue *q, + struct ufshpb_req *pre_req) +{ + struct page *page = pre_req->wb.m_page; + struct bio *bio = pre_req->bio; + int ret; + + BUG_ON(!page); + + bio_reset(bio); + + ret = ufshpb_prep_entry(pre_req, page); + if (ret) + return ret; + + ret = bio_add_pc_page(q, bio, page, OS_PAGE_SIZE, 0); + if (ret != OS_PAGE_SIZE) { + ERR_MSG("bio_add_pc_page fail: %d", ret); + return -ENOMEM; + } + + return 0; +} + +static void ufshpb_init_cmd_errh(struct scsi_cmnd *cmd) +{ + cmd->serial_number = 0; + scsi_set_resid(cmd, 0); + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (cmd->cmd_len == 0) + cmd->cmd_len = scsi_command_size(cmd->cmnd); +} + +static void ufshpb_pre_req_done(struct scsi_cmnd *cmd) +{ + blk_complete_request(cmd->request); +} + +static inline unsigned long ufshpb_get_lpn(struct request *rq) +{ + return blk_rq_pos(rq) / SECTORS_PER_BLOCK; +} + +static inline unsigned int ufshpb_get_len(struct request *rq) +{ + return blk_rq_sectors(rq) / SECTORS_PER_BLOCK; +} + +static inline unsigned int ufshpb_is_unaligned(struct request *rq) +{ + return blk_rq_sectors(rq) % SECTORS_PER_BLOCK; +} + +static inline int ufshpb_issue_ctx_id_ticket(struct ufshpb_lu *hpb) +{ + int hpb_ctx_id; + + hpb->ctx_id_ticket++; + if (hpb->ctx_id_ticket >= MAX_HPB_CONTEXT_ID) + hpb->ctx_id_ticket = 0; + hpb_ctx_id = hpb->ctx_id_ticket; + + return hpb_ctx_id; +} + +static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb, + unsigned long lpn, unsigned int len, + int hpb_ctx_id) +{ + int len_byte = len * HPB_ENTRY_SIZE; + + cdb[0] = UFSHPB_WRITE_BUFFER; + cdb[1] = UFSHPB_WRITE_BUFFER_ID; + cdb[2] = GET_BYTE_3(lpn); + cdb[3] = GET_BYTE_2(lpn); + cdb[4] = GET_BYTE_1(lpn); + cdb[5] = GET_BYTE_0(lpn); + cdb[6] = (1 << 7) | hpb_ctx_id; + cdb[7] = GET_BYTE_1(len_byte); + cdb[8] = GET_BYTE_0(len_byte); + cdb[9] = 0x00; /* Control = 0x00 */ +} + +static void ufshpb_mimic_scsi_release_buffers(struct scsi_cmnd *cmd) +{ + if (cmd->sdb.table.nents) + sg_free_table_chained(&cmd->sdb.table, false); + + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + + if (scsi_prot_sg_count(cmd)) + sg_free_table_chained(&cmd->prot_sdb->table, false); +} + +static inline void ufshpb_mimic_scsi_dispatch_cmd(struct scsi_cmnd *cmd) +{ + atomic_inc(&cmd->device->iorequest_cnt); + + scsi_log_send(cmd); + + cmd->scsi_done = ufshpb_pre_req_done; +} + +static int ufshpb_mimic_scsi_request_fn(struct ufshpb_lu *hpb, + struct request *req) +{ + struct request_queue *q = req->q; + struct scsi_device *sdev = q->queuedata; + struct Scsi_Host *shost = sdev->host; + struct scsi_cmnd *cmd; + unsigned long flags; + unsigned int busy; + int ret = 0; + + spin_lock_irqsave(q->queue_lock, flags); + req->rq_flags |= RQF_STARTED; + + ret = q->prep_rq_fn(q, req); + if (unlikely(ret != BLKPREP_OK)) { + HPB_DEBUG(hpb, "scsi_prep_fn is fail"); + ret = -EIO; + goto prep_err; + } + cmd = req->special; + if (unlikely(!cmd)) + BUG(); + + busy = atomic_inc_return(&sdev->device_busy) - 1; + if (busy >= sdev->queue_depth) { + ret = -EAGAIN; + goto finish_cmd; + } + + /* lh_pre_req_free list is dummy head for blk_dequeue_request() */ + list_add_tail(&req->queuelist, &hpb->lh_pre_req_dummy); + ret = blk_queue_start_tag(q, req); + if (ret) { + list_del_init(&req->queuelist); + ret = -EAGAIN; + goto finish_cmd; + } + spin_unlock_irqrestore(q->queue_lock, flags); + + /* + * UFS device has multi luns, so starget is not used. + * In case of UFS, starget->can_queue <= 0. + */ + if (unlikely(scsi_target(sdev)->can_queue > 0)) + atomic_inc(&scsi_target(sdev)->target_busy); + atomic_inc(&shost->host_busy); + + ufshpb_init_cmd_errh(cmd); + + ufshpb_mimic_scsi_dispatch_cmd(cmd); + + return ret; +finish_cmd: + ufshpb_mimic_scsi_release_buffers(cmd); + scsi_put_command(cmd); + put_device(&sdev->sdev_gendev); + req->special = NULL; + atomic_dec(&sdev->device_busy); +prep_err: + spin_unlock_irqrestore(q->queue_lock, flags); + return ret; +} + +static int ufshpb_set_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd, + struct ufshpb_req *pre_req, int hpb_ctx_id) +{ + struct scsi_device *sdev = cmd->device; + struct request_queue *q = sdev->request_queue; + struct request *req; + struct scsi_request *rq; + struct scsi_cmnd *scmd; + struct bio *bio = pre_req->bio; + int ret = 0; + + pre_req->hpb = hpb; + pre_req->wb.lpn = ufshpb_get_lpn(cmd->request); + pre_req->wb.len = ufshpb_get_len(cmd->request); + + ret = ufshpb_pre_req_add_bio_page(q, pre_req); + if (ret) + return ret; + + req = pre_req->req; + + /* + * blk_init_rl() -> alloc_request_size(). + * q->init_rq_fn = scsi_old_init_rq behavior. + */ + scmd = (struct scsi_cmnd *)(req + 1); + memset(scmd, 0, sizeof(*scmd)); + scmd->sense_buffer = pre_req->sense; + scmd->req.sense = scmd->sense_buffer; + + /* blk_get_request behavior */ + blk_rq_init(q, req); + q->initialize_rq_fn(req); + + /* 1. request setup */ + blk_rq_append_bio(req, &bio); + req->cmd_flags = REQ_OP_WRITE | REQ_SYNC | REQ_OP_SCSI_OUT; + req->rq_flags = RQF_QUIET | RQF_PREEMPT; + req->timeout = msecs_to_jiffies(30000); + req->end_io_data = (void *)pre_req; + req->end_io = ufshpb_pre_req_compl_fn; + + /* 2. scsi_request setup */ + rq = scsi_req(req); + ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len, + hpb_ctx_id); + rq->cmd_len = scsi_command_size(rq->cmd); + + ret = ufshpb_mimic_scsi_request_fn(hpb, req); + + return ret; +} + +static inline bool ufshpb_is_support_chunk(int transfer_len) +{ + return transfer_len <= HPB_MULTI_CHUNK_HIGH; +} + +static int ufshpb_check_pre_req_cond(struct ufshpb_lu *hpb, + struct scsi_cmnd *cmd) +{ + struct request *rq = cmd->request; + unsigned long flags; + unsigned int transfer_len; + unsigned int lpn; + + if (!ufshpb_is_read_cmd(cmd)) + return -EINVAL; + + if (ufshpb_is_unaligned(rq)) + return -EINVAL; + + transfer_len = ufshpb_get_len(rq); + if (!transfer_len) + return -EINVAL; + + if (!ufshpb_is_support_chunk(transfer_len)) + return -EINVAL; + + /* + * WRITE_BUFFER CMD support 36K (len=9) ~ 512K (len=128) default. + * it is possible to change range of transfer_len through sysfs. + */ + if (transfer_len < hpb->pre_req_min_tr_len || + transfer_len > hpb->pre_req_max_tr_len) + return -EINVAL; + + /* + * When the request is from ioctrl, the address in rq will be -1. + * At this case, the ufshpb_ppn_dirty_check() will check the address + * with 0xFFFFFFFF_FFFFFFFF (64bit) or 0xFFFFFFFF(32bit). This will + * cause unexpected behavier. So skip here. + */ + if ((long)blk_rq_pos(cmd->request) == -1) + return -EINVAL; + + lpn = ufshpb_get_lpn(cmd->request); + + spin_lock_irqsave(&hpb->hpb_lock, flags); + if (ufshpb_ppn_dirty_check(hpb, lpn, transfer_len)) { + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + return 0; +} + +void ufshpb_end_pre_req(struct ufsf_feature *ufsf, struct request *req) +{ + struct scsi_cmnd *scmd = (struct scsi_cmnd *)(req + 1); + + set_host_byte(scmd, DID_OK); + + scmd->scsi_done(scmd); +} + +int ufshpb_prepare_pre_req(struct ufsf_feature *ufsf, struct scsi_cmnd *cmd, + u8 lun) +{ + struct ufs_hba *hba = ufsf->hba; + struct ufshpb_lu *hpb; + struct ufshpb_req *pre_req; + struct ufshcd_lrb *add_lrbp; + struct ufshcd_lrb *orig_lrbp = &hba->lrb[cmd->request->tag]; + struct scsi_cmnd *pre_cmd; + unsigned long flags; + int add_tag, hpb_ctx_id; + int ret = 0; + + /* WKLU could not be HPB-LU */ + if (!ufsf_is_valid_lun(lun)) + return -ENODEV; + + hpb = ufsf->ufshpb_lup[lun]; + ret = ufshpb_lu_get(hpb); + if (ret) + return ret; + + if (hpb->force_disable) { + ret = -ENODEV; + goto put_hpb; + } + + ret = ufshpb_check_pre_req_cond(hpb, cmd); + if (ret) + goto put_hpb; + + spin_lock_irqsave(&hpb->hpb_lock, flags); + pre_req = ufshpb_get_pre_req(hpb); + if (!pre_req) { + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + ret = -ENOMEM; + goto put_hpb; + } + + hpb_ctx_id = ufshpb_issue_ctx_id_ticket(hpb); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + ret = ufshpb_set_pre_req(hpb, cmd, pre_req, hpb_ctx_id); + if (ret) + goto put_pre_req; + + add_tag = pre_req->req->tag; + if (test_and_set_bit_lock(add_tag, &hba->lrb_in_use)) { + ufshpb_end_pre_req(ufsf, pre_req->req); + return -EIO; + } + + add_lrbp = &hba->lrb[add_tag]; + WARN_ON(add_lrbp->cmd); + + pre_cmd = pre_req->req->special; + add_lrbp->cmd = pre_cmd; + add_lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE; + add_lrbp->sense_buffer = pre_cmd->sense_buffer; + add_lrbp->task_tag = add_tag; + add_lrbp->lun = lun; + add_lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; + add_lrbp->req_abort_skip = false; + + /* MTK patch: reset crypto_enable */ + add_lrbp->crypto_enable = false; + + orig_lrbp->hpb_ctx_id = hpb_ctx_id; + + return add_tag; +put_pre_req: + spin_lock_irqsave(&hpb->hpb_lock, flags); + ufshpb_put_pre_req(hpb, pre_req); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); +put_hpb: + ufshpb_lu_put(hpb); + return ret; +} + +int ufshpb_prepare_add_lrbp(struct ufsf_feature *ufsf, int add_tag) +{ + struct ufs_hba *hba = ufsf->hba; + struct ufshcd_lrb *add_lrbp; + struct scsi_cmnd *pre_cmd; + int err = 0; + + add_lrbp = &hba->lrb[add_tag]; + + pre_cmd = add_lrbp->cmd; + + err = ufshcd_hold(hba, true); + if (err) + goto hold_err; + + ufshcd_comp_scsi_upiu(hba, add_lrbp); + + err = ufshcd_map_sg(hba, add_lrbp); + if (err) + goto map_err; + + return 0; +map_err: + ufshcd_release(hba); +hold_err: + add_lrbp->cmd = NULL; + clear_bit_unlock(add_tag, &hba->lrb_in_use); + ufsf_hpb_end_pre_req(&hba->ufsf, pre_cmd->request); + return -EIO; +} + +/* routine : READ10 -> HPB_READ */ +void ufshpb_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + struct request *rq; + unsigned long long ppn = 0; + unsigned long lpn, flags; + int transfer_len = TRANSFER_LEN; + int rgn_idx, srgn_idx, srgn_offset, ret, error = 0; + + /* WKLU could not be HPB-LU */ + if (!lrbp || !ufsf_is_valid_lun(lrbp->lun)) + return; + + if (!ufshpb_is_write_discard_lrbp(lrbp) && + !ufshpb_is_read_cmd(lrbp->cmd)) + return; + + rq = lrbp->cmd->request; + hpb = ufsf->ufshpb_lup[lrbp->lun]; + ret = ufshpb_lu_get(hpb); + if (ret) + return; + + if (hpb->force_disable) { + if (ufshpb_is_read_cmd(lrbp->cmd)) + TMSG(ufsf, hpb->lun, "%llu + %u READ_10", + (unsigned long long) blk_rq_pos(rq), + (unsigned int) blk_rq_sectors(rq)); + goto put_hpb; + } + + /* + * When the request is from ioctrl, the address in rq will be -1. + * At this case, the ufshpb_ppn_dirty_check() will check the address + * with 0xFFFFFFFF_FFFFFFFF (64bit) or 0xFFFFFFFF(32bit). This will + * cause unexpected behavier. So skip here. + */ + if ((long)blk_rq_pos(rq) == -1) + goto put_hpb; + + lpn = ufshpb_get_lpn(rq); + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + /* + * If cmd type is WRITE, bitmap set to dirty. + */ + if (ufshpb_is_write_discard_lrbp(lrbp)) { + spin_lock_irqsave(&hpb->hpb_lock, flags); + if (rgn->rgn_state == HPBREGION_INACTIVE) { + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + goto put_hpb; + } + ufshpb_set_dirty(hpb, lrbp, rgn_idx, srgn_idx, srgn_offset); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + goto put_hpb; + } + + if (!ufshpb_is_read_cmd(lrbp->cmd)) + goto put_hpb; + + if (ufshpb_is_unaligned(rq)) { + TMSG_CMD(hpb, "READ_10 not aligned 4KB", rq, rgn_idx, srgn_idx); + goto put_hpb; + } + + transfer_len = ufshpb_get_len(rq); + if (!transfer_len) + goto put_hpb; + + if (!ufshpb_is_support_chunk(transfer_len)) { + TMSG_CMD(hpb, "READ_10 doesn't support chunk size", + rq, rgn_idx, srgn_idx); + goto put_hpb; + } + + spin_lock_irqsave(&hpb->hpb_lock, flags); + if (ufshpb_ppn_dirty_check(hpb, lpn, transfer_len)) { + atomic64_inc(&hpb->miss); + TMSG_CMD(hpb, "READ_10 E_D", rq, rgn_idx, srgn_idx); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + goto put_hpb; + } + + ppn = ufshpb_get_ppn(srgn->mctx, srgn_offset, &error); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + if (error) { + ERR_MSG("get_ppn failed.. err %d region %d subregion %d", + error, rgn_idx, srgn_idx); + ufshpb_lu_put(hpb); + goto wakeup_ee_worker; + } + + ufshpb_set_read16_cmd(hpb, lrbp, ppn, transfer_len); + TMSG(ufsf, hpb->lun, "%llu + %u HPB_READ %d - %d context_id %d", + (unsigned long long) blk_rq_pos(lrbp->cmd->request), + (unsigned int) blk_rq_sectors(lrbp->cmd->request), rgn_idx, + srgn_idx, lrbp->hpb_ctx_id); + + atomic64_inc(&hpb->hit); +put_hpb: + ufshpb_lu_put(hpb); + return; +wakeup_ee_worker: + ufshpb_failed(hpb, __func__); +} + +static inline void ufshpb_put_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req) +{ + list_add_tail(&map_req->list_req, &hpb->lh_map_req_free); + hpb->num_inflight_map_req--; +} + +static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *map_req; + + if (hpb->num_inflight_map_req >= hpb->throttle_map_req) { + HPB_DEBUG(hpb, "map_req throttle. inflight %d throttle %d", + hpb->num_inflight_map_req, hpb->throttle_map_req); + return NULL; + } + + map_req = list_first_entry_or_null(&hpb->lh_map_req_free, + struct ufshpb_req, list_req); + if (!map_req) { + HPB_DEBUG(hpb, "There is no map_req"); + return NULL; + } + + list_del_init(&map_req->list_req); + hpb->num_inflight_map_req++; + + return map_req; +} + +static int ufshpb_clean_dirty_bitmap(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + + BUG_ON(!srgn->mctx); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + + if (rgn->rgn_state == HPBREGION_INACTIVE) { + HPB_DEBUG(hpb, "%d - %d evicted", srgn->rgn_idx, + srgn->srgn_idx); + return -EINVAL; + } + + memset(srgn->mctx->ppn_dirty, 0x00, + hpb->entries_per_srgn >> bits_per_byte_shift); + + return 0; +} + +static void ufshpb_clean_active_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + + BUG_ON(!srgn->mctx); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + + if (rgn->rgn_state == HPBREGION_INACTIVE) { + HPB_DEBUG(hpb, "%d - %d evicted", srgn->rgn_idx, + srgn->srgn_idx); + return; + } + srgn->srgn_state = HPBSUBREGION_CLEAN; +} + +static void ufshpb_error_active_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + + BUG_ON(!srgn->mctx); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + + if (rgn->rgn_state == HPBREGION_INACTIVE) { + ERR_MSG("%d - %d evicted", srgn->rgn_idx, srgn->srgn_idx); + return; + } + srgn->srgn_state = HPBSUBREGION_DIRTY; +} + +static void ufshpb_check_ppn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx, + struct ufshpb_map_ctx *mctx, const char *str) +{ + int error = 0; + unsigned long long val[2]; + + BUG_ON(!mctx); + + val[0] = ufshpb_get_ppn(mctx, 0, &error); + if (!error) + val[1] = ufshpb_get_ppn(mctx, hpb->entries_per_srgn - 1, + &error); + if (error) + val[0] = val[1] = 0; + + HPB_DEBUG(hpb, "%s READ BUFFER %d - %d ( %llx ~ %llx )", str, rgn_idx, + srgn_idx, val[0], val[1]); +} + +static void ufshpb_map_compl_process(struct ufshpb_req *map_req) +{ + struct ufshpb_lu *hpb = map_req->hpb; + struct ufshpb_subregion *srgn; + unsigned long flags; + + srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl + + map_req->rb.srgn_idx; + + if (hpb->debug) + ufshpb_check_ppn(hpb, srgn->rgn_idx, srgn->srgn_idx, srgn->mctx, + "COMPL"); + + TMSG(hpb->ufsf, hpb->lun, "Noti: C RB %d - %d", map_req->rb.rgn_idx, + map_req->rb.srgn_idx); + + spin_lock_irqsave(&hpb->hpb_lock, flags); + ufshpb_clean_active_subregion(hpb, srgn); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); +} + +static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + list_del_init(&rgn->list_inact_rgn); + + if (list_empty(&srgn->list_act_srgn)) + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); +} + +static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int srgn_idx; + + rgn = hpb->rgn_tbl + rgn_idx; + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + srgn = rgn->srgn_tbl + srgn_idx; + + list_del_init(&srgn->list_act_srgn); + } + + if (list_empty(&rgn->list_inact_rgn)) + list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn); +} + +static int ufshpb_map_req_error(struct ufshpb_req *map_req) +{ + struct ufshpb_lu *hpb = map_req->hpb; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + struct scsi_sense_hdr sshdr; + unsigned long flags; + + rgn = hpb->rgn_tbl + map_req->rb.rgn_idx; + srgn = rgn->srgn_tbl + map_req->rb.srgn_idx; + + scsi_normalize_sense(map_req->sense, SCSI_SENSE_BUFFERSIZE, &sshdr); + + ERR_MSG("code %x sense_key %x asc %x ascq %x", sshdr.response_code, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + ERR_MSG("byte4 %x byte5 %x byte6 %x additional_len %x", sshdr.byte4, + sshdr.byte5, sshdr.byte6, sshdr.additional_length); + + if (sshdr.sense_key != ILLEGAL_REQUEST) + return 0; + + spin_lock_irqsave(&hpb->hpb_lock, flags); + if (rgn->rgn_state == HPBREGION_PINNED) { + if (sshdr.asc == 0x06 && sshdr.ascq == 0x01) { + HPB_DEBUG(hpb, "retry pinned rb %d - %d", + map_req->rb.rgn_idx, map_req->rb.srgn_idx); + + list_add_tail(&map_req->list_req, + &hpb->lh_map_req_retry); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + schedule_delayed_work(&hpb->ufshpb_retry_work, + msecs_to_jiffies(RETRY_DELAY_MS)); + return -EAGAIN; + } + HPB_DEBUG(hpb, "pinned rb %d - %d(dirty)", + map_req->rb.rgn_idx, map_req->rb.srgn_idx); + + ufshpb_error_active_subregion(hpb, srgn); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + } else { + ufshpb_error_active_subregion(hpb, srgn); + + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_inactive_info(hpb, map_req->rb.rgn_idx); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + HPB_DEBUG(hpb, "Non-pinned rb %d will be inactive", + map_req->rb.rgn_idx); + + schedule_work(&hpb->ufshpb_task_workq); + } + + return 0; +} + +static inline void ufshpb_mimic_blk_pm_put_request(struct request *rq) +{ + if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending) + pm_runtime_mark_last_busy(rq->q->dev); +} + +/* routine : map_req compl */ +static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data; + struct ufshpb_lu *hpb = map_req->hpb; + unsigned long flags; + int ret; + +#ifdef CONFIG_PM + ufshpb_mimic_blk_pm_put_request(req); +#endif + if (hpb->ufsf->ufshpb_state != HPB_PRESENT) + goto free_map_req; + + if (error) { + ERR_MSG("COMP_NOTI: RB number %d ( %d - %d )", error, + map_req->rb.rgn_idx, map_req->rb.srgn_idx); + + ret = ufshpb_map_req_error(map_req); + if (ret) + goto retry_map_req; + } else + ufshpb_map_compl_process(map_req); + +free_map_req: + spin_lock_irqsave(&hpb->hpb_lock, flags); + ufshpb_put_map_req(map_req->hpb, map_req); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); +retry_map_req: + scsi_device_put(hpb->ufsf->sdev_ufs_lu[hpb->lun]); + ufshpb_lu_put(hpb); +} + +static inline int ufshpb_get_scsi_device(struct ufs_hba *hba, + struct scsi_device *sdev) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(hba->host->host_lock, flags); + ret = scsi_device_get(sdev); + if (!ret && !scsi_device_online(sdev)) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + scsi_device_put(sdev); + WARNING_MSG("scsi_device_get failed.. ret %d", ret); + return -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return 0; +} + +static int ufshpb_execute_dev_ctx_req(struct ufshpb_lu *hpb, unsigned char *cdb, + void *buf, int len) +{ + struct scsi_sense_hdr sshdr; + struct scsi_device *sdev; + struct ufsf_feature *ufsf = hpb->ufsf; + int ret = 0; + + sdev = ufsf->sdev_ufs_lu[hpb->lun]; + if (!sdev) { + WARNING_MSG("cannot find scsi_device"); + return -ENODEV; + } + + ret = ufshpb_get_scsi_device(ufsf->hba, sdev); + if (ret) + return ret; + + ufsf->issue_ioctl = true; + + ret = scsi_execute(sdev, cdb, DMA_FROM_DEVICE, buf, len, NULL, &sshdr, + msecs_to_jiffies(30000), 3, 0, 0, NULL); + + ufsf->issue_ioctl = false; + + scsi_device_put(sdev); + + return ret; +} + +static inline void ufshpb_set_read_dev_ctx(unsigned char *cdb, int lba, int len) +{ + cdb[0] = READ_10; + cdb[1] = 0x02; + cdb[2] = GET_BYTE_3(lba); + cdb[3] = GET_BYTE_2(lba); + cdb[4] = GET_BYTE_1(lba); + cdb[5] = GET_BYTE_0(lba); + cdb[6] = GET_BYTE_2(len); + cdb[7] = GET_BYTE_1(len); + cdb[8] = GET_BYTE_0(len); +} + +int ufshpb_issue_req_dev_ctx(struct ufshpb_lu *hpb, unsigned char *buf, + int buf_len) +{ + unsigned char cdb[10] = { 0 }; + int cmd_len = buf_len >> OS_PAGE_SHIFT; + int ret = 0; + + ufshpb_set_read_dev_ctx(cdb, READ10_DEBUG_LBA, cmd_len); + + ret = ufshpb_execute_dev_ctx_req(hpb, cdb, buf, buf_len); + + if (ret < 0) + ERR_MSG("failed with err %d", ret); + + return ret; +} + +static inline void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx, + int srgn_idx, int srgn_mem_size) +{ + cdb[0] = UFSHPB_READ_BUFFER; + cdb[1] = UFSHPB_READ_BUFFER_ID; + cdb[2] = GET_BYTE_1(rgn_idx); + cdb[3] = GET_BYTE_0(rgn_idx); + cdb[4] = GET_BYTE_1(srgn_idx); + cdb[5] = GET_BYTE_0(srgn_idx); + cdb[6] = GET_BYTE_2(srgn_mem_size); + cdb[7] = GET_BYTE_1(srgn_mem_size); + cdb[8] = GET_BYTE_0(srgn_mem_size); + cdb[9] = 0x00; +} + +static int ufshpb_map_req_add_bio_page(struct ufshpb_lu *hpb, + struct request_queue *q, struct bio *bio, + struct ufshpb_map_ctx *mctx) +{ + struct page *page = NULL; + int i, ret = 0; + + bio_reset(bio); + + for (i = 0; i < hpb->mpages_per_srgn; i++) { + /* virt_to_page(p + (OS_PAGE_SIZE * i)); */ + page = mctx->m_page[i]; + if (!page) + return -ENOMEM; + + ret = bio_add_pc_page(q, bio, page, hpb->mpage_bytes, 0); + + if (ret != hpb->mpage_bytes) { + ERR_MSG("bio_add_pc_page fail: %d", ret); + return -ENOMEM; + } + } + + return 0; +} + +static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, + struct scsi_device *sdev, + struct ufshpb_req *map_req) +{ + struct request_queue *q = sdev->request_queue; + struct request *req; + struct scsi_request *rq; + struct scsi_cmnd *scmd; + struct bio *bio = map_req->bio; + int ret; + + ret = ufshpb_map_req_add_bio_page(hpb, q, bio, map_req->rb.mctx); + if (ret) + return ret; + + req = map_req->req; + + /* + * blk_init_rl() -> alloc_request_size(). + * q->init_rq_fn = scsi_old_init_rq behavior. + */ + scmd = (struct scsi_cmnd *)(req + 1); + memset(scmd, 0, sizeof(*scmd)); + scmd->sense_buffer = map_req->sense; + scmd->req.sense = scmd->sense_buffer; + + /* blk_get_request behavior */ + blk_rq_init(q, req); + q->initialize_rq_fn(req); + + /* 1. request setup */ + blk_rq_append_bio(req, &bio); /* req->__data_len is setted */ + req->cmd_flags = REQ_OP_READ | REQ_OP_SCSI_IN; + req->rq_flags = RQF_QUIET | RQF_PREEMPT; + req->timeout = msecs_to_jiffies(30000); + req->end_io_data = (void *)map_req; + + /* 2. scsi_request setup */ + rq = scsi_req(req); + ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx, + map_req->rb.srgn_idx, hpb->srgn_mem_size); + rq->cmd_len = scsi_command_size(rq->cmd); + + if (hpb->debug) + ufshpb_check_ppn(hpb, map_req->rb.rgn_idx, map_req->rb.srgn_idx, + map_req->rb.mctx, "ISSUE"); + + TMSG(hpb->ufsf, hpb->lun, "Noti: I RB %d - %d", map_req->rb.rgn_idx, + map_req->rb.srgn_idx); + + blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn); + + atomic64_inc(&hpb->map_req_cnt); + + return 0; +} + +static int ufshpb_issue_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req) +{ + struct scsi_device *sdev; + struct ufsf_feature *ufsf = hpb->ufsf; + int ret = 0; + + sdev = ufsf->sdev_ufs_lu[hpb->lun]; + if (!sdev) { + WARNING_MSG("cannot find scsi_device"); + return -ENODEV; + } + + ret = ufshpb_get_scsi_device(ufsf->hba, sdev); + if (ret) + return ret; + + ret = ufshpb_execute_map_req(hpb, sdev, map_req); + if (ret) + scsi_device_put(sdev); + + return ret; +} + +static inline void ufshpb_set_map_req(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx, struct ufshpb_map_ctx *mctx, + struct ufshpb_req *map_req) +{ + map_req->hpb = hpb; + map_req->rb.rgn_idx = rgn_idx; + map_req->rb.srgn_idx = srgn_idx; + map_req->rb.mctx = mctx; + map_req->rb.lun = hpb->lun; +} + +static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb, + int *err) +{ + struct ufshpb_map_ctx *mctx; + + mctx = list_first_entry_or_null(&hpb->lh_map_ctx_free, + struct ufshpb_map_ctx, list_table); + if (mctx) { + list_del_init(&mctx->list_table); + hpb->debug_free_table--; + return mctx; + } + *err = -ENOMEM; + return NULL; +} + +static inline void ufshpb_add_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + rgn->rgn_state = HPBREGION_ACTIVE; + list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); + atomic64_inc(&lru_info->active_cnt); +} + +static inline int ufshpb_add_region(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct victim_select_info *lru_info; + int srgn_idx; + int err = 0; + + lru_info = &hpb->lru_info; + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + struct ufshpb_subregion *srgn; + + srgn = rgn->srgn_tbl + srgn_idx; + + srgn->mctx = ufshpb_get_map_ctx(hpb, &err); + if (!srgn->mctx) { + HPB_DEBUG(hpb, "get mctx err %d srgn %d free_table %d", + err, srgn_idx, hpb->debug_free_table); + goto out; + } + + srgn->srgn_state = HPBSUBREGION_DIRTY; + } + HPB_DEBUG(hpb, "\x1b[44m\x1b[32m E->active region: %d \x1b[0m", + rgn->rgn_idx); + TMSG(hpb->ufsf, hpb->lun, "Noti: ACT RG: %d", rgn->rgn_idx); + + ufshpb_add_lru_info(lru_info, rgn); +out: + return err; +} + +static inline void ufshpb_put_map_ctx(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx) +{ + list_add(&mctx->list_table, &hpb->lh_map_ctx_free); + hpb->debug_free_table++; +} + +static inline void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn, + int state) +{ + if (state == HPBSUBREGION_UNUSED) { + ufshpb_put_map_ctx(hpb, srgn->mctx); + srgn->mctx = NULL; + } + + srgn->srgn_state = state; +} + +static inline void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_del_init(&rgn->list_lru_rgn); + rgn->rgn_state = HPBREGION_INACTIVE; + atomic64_dec(&lru_info->active_cnt); +} + +static void __ufshpb_evict_region(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct victim_select_info *lru_info; + struct ufshpb_subregion *srgn; + int srgn_idx; + + lru_info = &hpb->lru_info; + + HPB_DEBUG(hpb, "\x1b[41m\x1b[33m C->EVICT region: %d \x1b[0m", + rgn->rgn_idx); + TMSG(hpb->ufsf, hpb->lun, "Noti: EVIC RG: %d", rgn->rgn_idx); + + ufshpb_cleanup_lru_info(lru_info, rgn); + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + srgn = rgn->srgn_tbl + srgn_idx; + + ufshpb_purge_active_subregion(hpb, srgn, HPBSUBREGION_UNUSED); + } +} + +static void ufshpb_hit_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + switch (lru_info->selection_type) { + case LRU: + list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); + break; + default: + break; + } +} + +/* + * Must be held hpb_lock before call this func. + */ +static int ufshpb_check_issue_state_srgns(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + srgn = rgn->srgn_tbl + srgn_idx; + + if (srgn->srgn_state == HPBSUBREGION_ISSUED) + return -EPERM; + } + return 0; +} + +static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) +{ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn; + struct ufshpb_region *victim_rgn = NULL; + + switch (lru_info->selection_type) { + case LRU: + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { + if (!rgn) + break; + + if (ufshpb_check_issue_state_srgns(hpb, rgn)) + continue; + + victim_rgn = rgn; + break; + } + break; + default: + break; + } + + return victim_rgn; +} + +static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + unsigned long flags; + + spin_lock_irqsave(&hpb->hpb_lock, flags); + if (rgn->rgn_state == HPBREGION_PINNED) { + /* + * Pinned active-block should not drop-out. + * But if so, it would treat error as critical, + * and it will run ufshpb_eh_work + */ + WARNING_MSG("pinned active-block drop-out error"); + goto out; + } + + if (!list_empty(&rgn->list_lru_rgn)) { + if (ufshpb_check_issue_state_srgns(hpb, rgn)) + goto evict_fail; + + __ufshpb_evict_region(hpb, rgn); + } +out: + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return 0; +evict_fail: + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return -EPERM; +} + +static inline struct +ufshpb_rsp_field *ufshpb_get_hpb_rsp(struct ufshcd_lrb *lrbp) +{ + return (struct ufshpb_rsp_field *)&lrbp->ucd_rsp_ptr->sr.sense_data_len; +} + +static int ufshpb_prepare_map_req(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->hpb_lock, flags); + + if (srgn->srgn_state == HPBSUBREGION_ISSUED) { + ret = -EAGAIN; + goto unlock_out; + } + + map_req = ufshpb_get_map_req(hpb); + if (!map_req) { + ret = -ENOMEM; + goto unlock_out; + } + + srgn->srgn_state = HPBSUBREGION_ISSUED; + + ret = ufshpb_clean_dirty_bitmap(hpb, srgn); + if (ret) { + ufshpb_put_map_req(hpb, map_req); + goto unlock_out; + } + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + ufshpb_set_map_req(hpb, srgn->rgn_idx, srgn->srgn_idx, + srgn->mctx, map_req); + + ret = ufshpb_lu_get(hpb); + if (ret) { + WARNING_MSG("warning: ufshpb_lu_get failed.. %d", ret); + spin_lock_irqsave(&hpb->hpb_lock, flags); + ufshpb_put_map_req(hpb, map_req); + goto unlock_out; + } + + ret = ufshpb_issue_map_req(hpb, map_req); + if (ret) { + ERR_MSG("issue map_req failed. [%d-%d] err %d", + srgn->rgn_idx, srgn->srgn_idx, ret); + ufshpb_lu_put(hpb); + goto wakeup_ee_worker; + } + return ret; +unlock_out: + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return ret; +wakeup_ee_worker: + ufshpb_failed(hpb, __func__); + return ret; +} + +static int ufshpb_load_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + struct ufshpb_region *victim_rgn; + struct victim_select_info *lru_info = &hpb->lru_info; + unsigned long flags; + int ret = 0; + + /* + * if already region is added to lru_list, + * just initiate the information of lru. + * because the region already has the map ctx. + * (!list_empty(&rgn->list_region) == region->state=active...) + */ + spin_lock_irqsave(&hpb->hpb_lock, flags); + if (!list_empty(&rgn->list_lru_rgn)) { + ufshpb_hit_lru_info(lru_info, rgn); + goto out; + } + + if (rgn->rgn_state == HPBREGION_INACTIVE) { + if (atomic64_read(&lru_info->active_cnt) + == lru_info->max_lru_active_cnt) { + victim_rgn = ufshpb_victim_lru_info(hpb); + if (!victim_rgn) { + HPB_DEBUG(hpb, "UFSHPB victim_rgn is NULL"); + ret = -ENOMEM; + goto out; + } + TMSG(hpb->ufsf, hpb->lun, "Noti: VT RG %d", + victim_rgn->rgn_idx); + HPB_DEBUG(hpb, "LRU MAX(=%ld). victim choose %d", + atomic64_read(&lru_info->active_cnt), + victim_rgn->rgn_idx); + + __ufshpb_evict_region(hpb, victim_rgn); + } + + ret = ufshpb_add_region(hpb, rgn); + if (ret) { + ERR_MSG("UFSHPB memory allocation failed"); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + goto wake_up_ee_worker; + } + } +out: + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return ret; +wake_up_ee_worker: + ufshpb_failed(hpb, __func__); + return ret; +} + +static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, + struct ufshpb_rsp_field *rsp_field) +{ + int num, rgn_idx, srgn_idx; + + /* + * If active rgn = inactive rgn, choose inactive rgn. + * So, active process -> inactive process + */ + spin_lock(&hpb->rsp_list_lock); + for (num = 0; num < rsp_field->active_rgn_cnt; num++) { + rgn_idx = be16_to_cpu(rsp_field->hpb_active_field[num].active_rgn); + srgn_idx = be16_to_cpu(rsp_field->hpb_active_field[num].active_srgn); + + HPB_DEBUG(hpb, "act num: %d, region: %d, subregion: %d", + num + 1, rgn_idx, srgn_idx); + ufshpb_update_active_info(hpb, rgn_idx, srgn_idx); + atomic64_inc(&hpb->rb_active_cnt); + } + + for (num = 0; num < rsp_field->inactive_rgn_cnt; num++) { + rgn_idx = be16_to_cpu(rsp_field->hpb_inactive_field[num]); + HPB_DEBUG(hpb, "inact num: %d, region: %d", num + 1, rgn_idx); + ufshpb_update_inactive_info(hpb, rgn_idx); + atomic64_inc(&hpb->rb_inactive_cnt); + } + spin_unlock(&hpb->rsp_list_lock); + + TMSG(hpb->ufsf, hpb->lun, "Noti: #ACT %u, #INACT %u", + rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt); + + schedule_work(&hpb->ufshpb_task_workq); +} + +static inline int ufshpb_may_field_valid(struct ufshcd_lrb *lrbp, + struct ufshpb_rsp_field *rsp_field) +{ + if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN || + rsp_field->desc_type != DEV_DES_TYPE || + rsp_field->additional_len != DEV_ADDITIONAL_LEN || + rsp_field->hpb_type == HPB_RSP_NONE || + rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM || + rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM || + (!rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt)) + return -EINVAL; + + if (!ufsf_is_valid_lun(lrbp->lun)) { + ERR_MSG("LU(%d) is not supported", lrbp->lun); + return -EINVAL; + } + + return 0; +} + +static bool ufshpb_is_empty_rsp_lists(struct ufshpb_lu *hpb) +{ + bool ret = true; + unsigned long flags; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn)) + ret = false; + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + return ret; +} + +/* routine : isr (ufs) */ +void ufshpb_rsp_upiu(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb; + struct ufshpb_rsp_field *rsp_field; + struct ufshpb_rsp_field sense_data; + int data_seg_len, ret; + + data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) + & MASK_RSP_UPIU_DATA_SEG_LEN; + + if (!data_seg_len) { + bool do_workq = false; + + if (!ufsf_is_valid_lun(lrbp->lun)) + return; + + hpb = ufsf->ufshpb_lup[lrbp->lun]; + ret = ufshpb_lu_get(hpb); + if (ret) + return; + + do_workq = !ufshpb_is_empty_rsp_lists(hpb); + if (do_workq) + schedule_work(&hpb->ufshpb_task_workq); + + goto put_hpb; + } + + memcpy(&sense_data, &lrbp->ucd_rsp_ptr->sr.sense_data_len, + sizeof(struct ufshpb_rsp_field)); + rsp_field = &sense_data; + + if (ufshpb_may_field_valid(lrbp, rsp_field)) { + WARN_ON(rsp_field->additional_len != DEV_ADDITIONAL_LEN); + return; + } + + hpb = ufsf->ufshpb_lup[lrbp->lun]; + ret = ufshpb_lu_get(hpb); + if (ret) { + WARNING_MSG("warning: ufshpb_lu_get failed %d..", ret); + return; + } + + if (hpb->force_map_req_disable) + goto put_hpb; + + HPB_DEBUG(hpb, "**** HPB Noti %u LUN %u Seg-Len %u, #ACT %u, #INACT %u", + rsp_field->hpb_type, lrbp->lun, + be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & + MASK_RSP_UPIU_DATA_SEG_LEN, rsp_field->active_rgn_cnt, + rsp_field->inactive_rgn_cnt); + atomic64_inc(&hpb->rb_noti_cnt); + + switch (rsp_field->hpb_type) { + case HPB_RSP_REQ_REGION_UPDATE: + WARN_ON(data_seg_len != DEV_DATA_SEG_LEN); + ufshpb_rsp_req_region_update(hpb, rsp_field); + goto put_hpb; + default: + HPB_DEBUG(hpb, "hpb_type is not available : %d", + rsp_field->hpb_type); + goto put_hpb; + } + +put_hpb: + ufshpb_lu_put(hpb); +} + +static int ufshpb_execute_map_req_wait(struct ufshpb_lu *hpb, + unsigned char *cmd, + struct ufshpb_subregion *srgn) +{ + struct ufsf_feature *ufsf = hpb->ufsf; + struct scsi_device *sdev; + struct request_queue *q; + struct request *req; + struct scsi_request *rq; + struct bio *bio; + struct scsi_sense_hdr sshdr = {0}; + unsigned long flags; + int ret = 0; + + sdev = ufsf->sdev_ufs_lu[hpb->lun]; + if (!sdev) { + WARNING_MSG("cannot find scsi_device"); + return -ENODEV; + } + + q = sdev->request_queue; + + ret = ufshpb_get_scsi_device(ufsf->hba, sdev); + if (ret) + return ret; + + req = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL); + if (IS_ERR(req)) { + WARNING_MSG("cannot get request"); + ret = -EIO; + goto sdev_put_out; + } + + bio = bio_kmalloc(GFP_KERNEL, hpb->mpages_per_srgn); + if (!bio) { + ret = -ENOMEM; + goto req_put_out; + } + + ret = ufshpb_map_req_add_bio_page(hpb, q, bio, srgn->mctx); + if (ret) + goto mem_free_out; + + /* 1. request setup*/ + blk_rq_append_bio(req, &bio); /* req->__data_len */ + req->timeout = msecs_to_jiffies(30000); + req->cmd_flags |= REQ_OP_READ; + req->rq_flags |= RQF_QUIET | RQF_PREEMPT; + + /* 2. scsi_request setup */ + rq = scsi_req(req); + rq->cmd_len = scsi_command_size(cmd); + memcpy(rq->cmd, cmd, rq->cmd_len); + + blk_execute_rq(q, NULL, req, 1); + if (rq->result) { + ret = -EIO; + scsi_normalize_sense(rq->sense, SCSI_SENSE_BUFFERSIZE, &sshdr); + ERR_MSG("code %x sense_key %x asc %x ascq %x", + sshdr.response_code, sshdr.sense_key, sshdr.asc, + sshdr.ascq); + ERR_MSG("byte4 %x byte5 %x byte6 %x additional_len %x", + sshdr.byte4, sshdr.byte5, sshdr.byte6, + sshdr.additional_length); + spin_lock_irqsave(&hpb->hpb_lock, flags); + ufshpb_error_active_subregion(hpb, srgn); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + } +mem_free_out: + bio_put(bio); +req_put_out: + blk_put_request(req); +sdev_put_out: + scsi_device_put(sdev); + return ret; +} + +static int ufshpb_issue_map_req_from_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_subregion *srgn; + unsigned long flags; + int ret; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + + while ((srgn = list_first_entry_or_null(&hpb->lh_pinned_srgn, + struct ufshpb_subregion, + list_act_srgn))) { + unsigned char cmd[10] = { 0 }; + + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + ufshpb_set_read_buf_cmd(cmd, srgn->rgn_idx, srgn->srgn_idx, + hpb->srgn_mem_size); + + if (hpb->debug) + ufshpb_check_ppn(hpb, srgn->rgn_idx, srgn->srgn_idx, + srgn->mctx, "ISSUE"); + + TMSG(hpb->ufsf, hpb->lun, "Noti: I RB %d - %d", + srgn->rgn_idx, srgn->srgn_idx); + + ret = ufshpb_execute_map_req_wait(hpb, cmd, srgn); + if (ret < 0) { + ERR_MSG("region %d sub %d failed with err %d", + srgn->rgn_idx, srgn->srgn_idx, ret); + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + if (list_empty(&srgn->list_act_srgn)) + list_add(&srgn->list_act_srgn, + &hpb->lh_pinned_srgn); + continue; + } + + TMSG(hpb->ufsf, hpb->lun, "Noti: C RB %d - %d", + srgn->rgn_idx, srgn->srgn_idx); + + if (hpb->debug) + ufshpb_check_ppn(hpb, srgn->rgn_idx, srgn->srgn_idx, + srgn->mctx, "COMPL"); + + spin_lock_irqsave(&hpb->hpb_lock, flags); + ufshpb_clean_active_subregion(hpb, srgn); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + return 0; +} + +static void ufshpb_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb; + int ret; + + hpb = container_of(work, struct ufshpb_lu, ufshpb_work); + HPB_DEBUG(hpb, "worker start for pinned region"); + + if (!list_empty(&hpb->lh_pinned_srgn)) { + ret = ufshpb_issue_map_req_from_list(hpb); + /* + * if its function failed at init time, + * ufshpb-device will request map-req, + * so it is not critical-error, and just finish work-handler + */ + if (ret) + HPB_DEBUG(hpb, "failed map-issue. ret %d", ret); + } + + HPB_DEBUG(hpb, "worker end"); +} + +static int ufshpb_check_pm(struct ufshpb_lu *hpb) +{ + struct ufs_hba *hba = hpb->ufsf->hba; + + if (hba->pm_op_in_progress || + hba->curr_dev_pwr_mode != UFS_ACTIVE_PWR_MODE) { + INFO_MSG("hba current power state %d pm_progress %d", + hba->curr_dev_pwr_mode, + hba->pm_op_in_progress); + return -ENODEV; + } + return 0; +} + +static void ufshpb_retry_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb; + struct delayed_work *dwork = to_delayed_work(work); + struct ufshpb_req *map_req, *next; + unsigned long flags; + int ret = 0; + + LIST_HEAD(retry_list); + + hpb = container_of(dwork, struct ufshpb_lu, ufshpb_retry_work); + + if (ufshpb_check_pm(hpb)) + return; + + ret = ufshpb_lu_get(hpb); + if (ret) { + WARNING_MSG("warning: ufshpb_lu_get failed %d..", ret); + return; + } + + HPB_DEBUG(hpb, "retry worker start"); + + spin_lock_irqsave(&hpb->hpb_lock, flags); + list_splice_init(&hpb->lh_map_req_retry, &retry_list); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + list_for_each_entry_safe(map_req, next, &retry_list, list_req) { + list_del_init(&map_req->list_req); + + ret = ufshpb_lu_get(hpb); + if (ret) { + WARNING_MSG("warning: ufshpb_lu_get failed %d..", ret); + spin_lock_irqsave(&hpb->hpb_lock, flags); + ufshpb_put_map_req(hpb, map_req); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + continue; + } + + ret = ufshpb_issue_map_req(hpb, map_req); + if (ret) { + ERR_MSG("issue map_req failed. [%d-%d] err %d", + map_req->rb.rgn_idx, map_req->rb.srgn_idx, ret); + ufshpb_lu_put(hpb); + goto wakeup_ee_worker; + } + } + HPB_DEBUG(hpb, "worker end"); + ufshpb_lu_put(hpb); + return; +wakeup_ee_worker: + ufshpb_lu_put(hpb); + ufshpb_failed(hpb, __func__); +} + +static void ufshpb_add_starved_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct list_head *starved_list) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + if (!list_empty(&rgn->list_inact_rgn)) + return; + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + srgn = rgn->srgn_tbl + srgn_idx; + + if (!list_empty(&srgn->list_act_srgn)) + return; + } + + list_add_tail(&rgn->list_inact_rgn, starved_list); +} + +static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + unsigned long flags; + int ret; + LIST_HEAD(starved_list); + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn, + struct ufshpb_region, + list_inact_rgn))) { + list_del_init(&rgn->list_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + ret = ufshpb_evict_region(hpb, rgn); + if (ret) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_starved_list(hpb, rgn, &starved_list); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + + list_splice(&starved_list, &hpb->lh_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_add_active_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + if (!list_empty(&rgn->list_inact_rgn)) + return; + + if (!list_empty(&srgn->list_act_srgn)) { + list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn); + return; + } + + list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn); +} + +static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn, + struct ufshpb_subregion, + list_act_srgn))) { + list_del_init(&srgn->list_act_srgn); + + if (hpb->force_map_req_disable) { + HPB_DEBUG(hpb, "map_req disabled"); + continue; + } + + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + + ret = ufshpb_load_region(hpb, rgn); + if (ret) + break; + + ret = ufshpb_prepare_map_req(hpb, srgn); + if (ret) + break; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + + if (ret) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_active_list(hpb, rgn, srgn); + } + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_task_workq_fn(struct work_struct *work) +{ + struct ufshpb_lu *hpb; + int ret; + + hpb = container_of(work, struct ufshpb_lu, ufshpb_task_workq); + ret = ufshpb_lu_get(hpb); + if (ret) { + WARNING_MSG("warning: ufshpb_lu_get failed %d..", ret); + return; + } + + ufshpb_run_inactive_region_list(hpb); + ufshpb_run_active_subregion_list(hpb); + + ufshpb_lu_put(hpb); +} + +static void ufshpb_init_constant(void) +{ + sects_per_blk_shift = ffs(BLOCK) - ffs(SECTOR); + INIT_INFO("sects_per_blk_shift: %u %u", sects_per_blk_shift, + ffs(SECTORS_PER_BLOCK) - 1); + + bits_per_dword_shift = ffs(BITS_PER_DWORD) - 1; + bits_per_dword_mask = BITS_PER_DWORD - 1; + INIT_INFO("bits_per_dword %u shift %u mask 0x%X", BITS_PER_DWORD, + bits_per_dword_shift, bits_per_dword_mask); + + bits_per_byte_shift = ffs(BITS_PER_BYTE) - 1; + INIT_INFO("bits_per_byte %u shift %u", BITS_PER_BYTE, + bits_per_byte_shift); +} + +static inline void ufshpb_map_req_mempool_remove(struct ufshpb_lu *hpb) +{ + int i; + + for (i = 0; i < hpb->qd; i++) { + kfree(hpb->map_req[i].req); + bio_put(hpb->map_req[i].bio); + } + + kfree(hpb->map_req); +} + +static inline void ufshpb_pre_req_mempool_remove(struct ufshpb_lu *hpb) +{ + int i; + + for (i = 0; i < hpb->qd; i++) { + kfree(hpb->pre_req[i].req); + bio_put(hpb->pre_req[i].bio); + __free_page(hpb->pre_req[i].wb.m_page); + } + + kfree(hpb->pre_req); +} + +static void ufshpb_table_mempool_remove(struct ufshpb_lu *hpb) +{ + struct ufshpb_map_ctx *mctx, *next; + int i; + + /* + * the mctx in the lh_map_ctx_free has been allocated completely. + */ + list_for_each_entry_safe(mctx, next, &hpb->lh_map_ctx_free, + list_table) { + for (i = 0; i < hpb->mpages_per_srgn; i++) + __free_page(mctx->m_page[i]); + + vfree(mctx->ppn_dirty); + kfree(mctx->m_page); + kfree(mctx); + hpb->alloc_mctx--; + } +} + +/* + * this function doesn't need to hold lock due to be called in init. + * (hpb_lock, rsp_list_lock, etc..) + */ +static int ufshpb_init_pinned_active_region(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx, j; + int err = 0; + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + srgn = rgn->srgn_tbl + srgn_idx; + + srgn->mctx = ufshpb_get_map_ctx(hpb, &err); + if (err) { + ERR_MSG("get mctx err %d srgn %d free_table %d", + err, srgn_idx, hpb->debug_free_table); + goto release; + } + + srgn->srgn_state = HPBSUBREGION_ISSUED; + ufshpb_clean_dirty_bitmap(hpb, srgn); + list_add_tail(&srgn->list_act_srgn, &hpb->lh_pinned_srgn); + } + + rgn->rgn_state = HPBREGION_PINNED; + + return 0; + +release: + for (j = 0; j < srgn_idx; j++) { + srgn = rgn->srgn_tbl + j; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } + + return err; +} + +static inline bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx) +{ + if (hpb->lu_pinned_end_offset != -1 && + rgn_idx >= hpb->lu_pinned_rgn_startidx && + rgn_idx <= hpb->lu_pinned_end_offset) + return true; + + return false; +} + +static inline void ufshpb_init_jobs(struct ufshpb_lu *hpb) +{ + INIT_WORK(&hpb->ufshpb_work, ufshpb_work_handler); + INIT_DELAYED_WORK(&hpb->ufshpb_retry_work, ufshpb_retry_work_handler); + INIT_WORK(&hpb->ufshpb_task_workq, ufshpb_task_workq_fn); +} + +static inline void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) +{ + cancel_work_sync(&hpb->ufshpb_work); + cancel_delayed_work_sync(&hpb->ufshpb_retry_work); + cancel_work_sync(&hpb->ufshpb_task_workq); +} + +static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + int srgn_idx; + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx; + + INIT_LIST_HEAD(&srgn->list_act_srgn); + + srgn->rgn_idx = rgn->rgn_idx; + srgn->srgn_idx = srgn_idx; + srgn->srgn_state = HPBSUBREGION_UNUSED; + } +} + +static inline int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + int srgn_cnt) +{ + rgn->srgn_tbl = + kzalloc(sizeof(struct ufshpb_subregion) * srgn_cnt, GFP_KERNEL); + if (!rgn->srgn_tbl) + return -ENOMEM; + + rgn->srgn_cnt = srgn_cnt; + return 0; +} + +static int ufshpb_table_mempool_init(struct ufshpb_lu *hpb) +{ + struct ufshpb_map_ctx *mctx = NULL; + int i, j, k; + + INIT_LIST_HEAD(&hpb->lh_map_ctx_free); + + hpb->alloc_mctx = hpb->lu_max_active_rgns * hpb->srgns_per_rgn; + + for (i = 0; i < hpb->alloc_mctx; i++) { + mctx = kmalloc(sizeof(struct ufshpb_map_ctx), GFP_KERNEL); + if (!mctx) + goto release_mem; + + mctx->m_page = + kzalloc(sizeof(struct page *) * hpb->mpages_per_srgn, + GFP_KERNEL); + if (!mctx->m_page) + goto release_mem; + + mctx->ppn_dirty = + vzalloc(hpb->entries_per_srgn >> bits_per_byte_shift); + if (!mctx->ppn_dirty) + goto release_mem; + + for (j = 0; j < hpb->mpages_per_srgn; j++) { + mctx->m_page[j] = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!mctx->m_page[j]) { + for (k = 0; k < j; k++) + kfree(mctx->m_page[k]); + goto release_mem; + } + } + + INIT_LIST_HEAD(&mctx->list_table); + list_add(&mctx->list_table, &hpb->lh_map_ctx_free); + + hpb->debug_free_table++; + } + + INIT_INFO("The number of mctx = %d. debug_free_table %d", + hpb->alloc_mctx, hpb->debug_free_table); + return 0; +release_mem: + /* + * mctxs already added in lh_map_ctx_free will be removed + * in the caller function. + */ + if (mctx) { + kfree(mctx->m_page); + vfree(mctx->ppn_dirty); + kfree(mctx); + } + return -ENOMEM; +} + +static int +ufshpb_map_req_mempool_init(struct ufshpb_lu *hpb) +{ + struct scsi_device *sdev; + struct request_queue *q; + struct ufshpb_req *map_req = NULL; + int qd = hpb->qd; + int i, j; + + sdev = hpb->ufsf->sdev_ufs_lu[hpb->lun]; + q = sdev->request_queue; + + INIT_LIST_HEAD(&hpb->lh_map_req_free); + INIT_LIST_HEAD(&hpb->lh_map_req_retry); + + hpb->map_req = kzalloc(sizeof(struct ufshpb_req) * qd, GFP_KERNEL); + if (!hpb->map_req) + goto release_mem; + + /* + * q->cmd_size: sizeof(struct scsi_cmd) + shost->hostt->cmd_size + */ + for (i = 0; i < qd; i++) { + map_req = hpb->map_req + i; + INIT_LIST_HEAD(&map_req->list_req); + map_req->req = kzalloc(sizeof(struct request) + q->cmd_size, + GFP_KERNEL); + if (!map_req->req) { + for (j = 0; j < i; j++) + kfree(hpb->map_req[j].req); + goto release_mem; + } + + map_req->bio = bio_kmalloc(GFP_KERNEL, hpb->mpages_per_srgn); + if (!map_req->bio) { + kfree(hpb->map_req[i].req); + for (j = 0; j < i; j++) { + kfree(hpb->map_req[j].req); + bio_put(hpb->map_req[j].bio); + } + goto release_mem; + } + list_add_tail(&map_req->list_req, &hpb->lh_map_req_free); + } + + return 0; +release_mem: + kfree(hpb->map_req); + return -ENOMEM; +} + +static int +ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb) +{ + struct scsi_device *sdev; + struct request_queue *q; + struct ufshpb_req *pre_req = NULL; + int qd = hpb->qd; + int i, j; + + INIT_LIST_HEAD(&hpb->lh_pre_req_free); + INIT_LIST_HEAD(&hpb->lh_pre_req_dummy); + + sdev = hpb->ufsf->sdev_ufs_lu[hpb->lun]; + q = sdev->request_queue; + + hpb->pre_req = kzalloc(sizeof(struct ufshpb_req) * qd, GFP_KERNEL); + if (!hpb->pre_req) + goto release_mem; + + /* + * q->cmd_size: sizeof(struct scsi_cmd) + shost->hostt->cmd_size + */ + for (i = 0; i < qd; i++) { + pre_req = hpb->pre_req + i; + INIT_LIST_HEAD(&pre_req->list_req); + pre_req->req = kzalloc(sizeof(struct request) + q->cmd_size, + GFP_KERNEL); + if (!pre_req->req) { + for (j = 0; j < i; j++) + kfree(hpb->pre_req[j].req); + goto release_mem; + } + + pre_req->bio = bio_kmalloc(GFP_KERNEL, 1); + if (!pre_req->bio) { + kfree(hpb->pre_req[i].req); + for (j = 0; j < i; j++) { + kfree(hpb->pre_req[j].req); + bio_put(hpb->pre_req[j].bio); + } + goto release_mem; + } + + pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pre_req->wb.m_page) { + kfree(hpb->pre_req[i].req); + bio_put(hpb->pre_req[i].bio); + for (j = 0; j < i; j++) { + kfree(hpb->pre_req[j].req); + bio_put(hpb->pre_req[j].bio); + __free_page(hpb->pre_req[j].wb.m_page); + } + goto release_mem; + } + list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); + } + + return 0; +release_mem: + kfree(hpb->pre_req); + return -ENOMEM; +} + +static void ufshpb_find_lu_qd(struct ufshpb_lu *hpb) +{ + struct scsi_device *sdev; + struct ufs_hba *hba; + + sdev = hpb->ufsf->sdev_ufs_lu[hpb->lun]; + hba = hpb->ufsf->hba; + + /* + * ufshcd_slave_alloc(sdev) -> ufshcd_set_queue_depth(sdev) + * a lu-queue-depth compared with lu_info and hba->nutrs + * is selected in ufshcd_set_queue_depth() + */ + hpb->qd = sdev->queue_depth; + INIT_INFO("lu %d queue_depth %d", hpb->lun, hpb->qd); + if (!hpb->qd) { + hpb->qd = hba->nutrs; + INIT_INFO("lu_queue_depth is 0. we use device's queue info."); + INIT_INFO("hba->nutrs = %d", hba->nutrs); + } + + hpb->throttle_map_req = hpb->qd; + hpb->throttle_pre_req = hpb->qd; + hpb->num_inflight_map_req = 0; + hpb->num_inflight_pre_req = 0; +} + +static void ufshpb_init_lu_constant(struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu *hpb) +{ + unsigned long long rgn_unit_size, rgn_mem_size; + int entries_per_rgn; + + hpb->debug = false; + + ufshpb_find_lu_qd(hpb); + + /* for pre_req */ + hpb->pre_req_min_tr_len = HPB_MULTI_CHUNK_LOW; + hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH; + hpb->ctx_id_ticket = 0; + + /* From descriptors */ + rgn_unit_size = (unsigned long long) + SECTOR * (0x01 << hpb_dev_info->hpb_rgn_size); + rgn_mem_size = rgn_unit_size / BLOCK * HPB_ENTRY_SIZE; + + hpb->srgn_unit_size = (unsigned long long) + SECTOR * (0x01 << hpb_dev_info->hpb_srgn_size); + hpb->srgn_mem_size = + hpb->srgn_unit_size / BLOCK * HPB_ENTRY_SIZE; + + hpb->hpb_ver = hpb_dev_info->hpb_ver; + + /* relation : lu <-> region <-> sub region <-> entry */ + entries_per_rgn = rgn_mem_size / HPB_ENTRY_SIZE; + hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE; +#if BITS_PER_LONG == 32 + hpb->srgns_per_rgn = div_u64(rgn_mem_size, hpb->srgn_mem_size); + + /* + * regions_per_lu = (lu_num_blocks * 4096) / region_unit_size + * = (lu_num_blocks * HPB_ENTRY_SIZE) / region_mem_size + */ + hpb->rgns_per_lu = + div_u64(((unsigned long long)hpb->lu_num_blocks + + (rgn_mem_size / HPB_ENTRY_SIZE) - 1), + (rgn_mem_size / HPB_ENTRY_SIZE)); + hpb->srgns_per_lu = + div_u64(((unsigned long long)hpb->lu_num_blocks + + (hpb->srgn_mem_size / HPB_ENTRY_SIZE) - 1), + (hpb->srgn_mem_size / HPB_ENTRY_SIZE)); +#else + hpb->srgns_per_rgn = rgn_mem_size / hpb->srgn_mem_size; + + /* + * regions_per_lu = (lu_num_blocks * 4096) / region_unit_size + * = (lu_num_blocks * HPB_ENTRY_SIZE) / region_mem_size + */ + hpb->rgns_per_lu = + ((unsigned long long)hpb->lu_num_blocks + + (rgn_mem_size / HPB_ENTRY_SIZE) - 1) + / (rgn_mem_size / HPB_ENTRY_SIZE); + hpb->srgns_per_lu = + ((unsigned long long)hpb->lu_num_blocks + + (hpb->srgn_mem_size / HPB_ENTRY_SIZE) - 1) + / (hpb->srgn_mem_size / HPB_ENTRY_SIZE); +#endif + + /* mempool info */ + hpb->mpage_bytes = OS_PAGE_SIZE; + hpb->mpages_per_srgn = hpb->srgn_mem_size / hpb->mpage_bytes; + + /* Bitmask Info. */ + hpb->dwords_per_srgn = hpb->entries_per_srgn / BITS_PER_DWORD; + hpb->entries_per_rgn_shift = ffs(entries_per_rgn) - 1; + hpb->entries_per_rgn_mask = entries_per_rgn - 1; + hpb->entries_per_srgn_shift = ffs(hpb->entries_per_srgn) - 1; + hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1; + + INIT_INFO("===== From Device Descriptor! ====="); + INIT_INFO("hpb_region_size = %d, hpb_subregion_size = %d", + hpb_dev_info->hpb_rgn_size, + hpb_dev_info->hpb_srgn_size); + INIT_INFO("===== Constant Values(LU) ====="); + INIT_INFO("region_unit_size = %lld, region_mem_size %lld", + rgn_unit_size, rgn_mem_size); + INIT_INFO("subregion_unit_size = %lld, subregion_mem_size %d", + hpb->srgn_unit_size, hpb->srgn_mem_size); + + INIT_INFO("lu_num_blocks = %d", hpb->lu_num_blocks); + INIT_INFO("regions_per_lu = %d, subregions_per_lu = %d", + hpb->rgns_per_lu, hpb->srgns_per_lu); + + INIT_INFO("subregions_per_region = %d", hpb->srgns_per_rgn); + INIT_INFO("entries_per_region %u shift %u mask 0x%X", + entries_per_rgn, hpb->entries_per_rgn_shift, + hpb->entries_per_rgn_mask); + INIT_INFO("entries_per_subregion %u shift %u mask 0x%X", + hpb->entries_per_srgn, hpb->entries_per_srgn_shift, + hpb->entries_per_srgn_mask); + INIT_INFO("mpages_per_subregion : %d", hpb->mpages_per_srgn); + INIT_INFO("===================================\n"); +} + +static int ufshpb_lu_hpb_init(struct ufsf_feature *ufsf, u8 lun) +{ + struct ufshpb_lu *hpb = ufsf->ufshpb_lup[lun]; + struct ufshpb_region *rgn_table, *rgn; + struct ufshpb_subregion *srgn; + int rgn_idx, srgn_idx, total_srgn_cnt, srgn_cnt, i, ret = 0; + bool do_work_handler = false; + + ufshpb_init_lu_constant(&ufsf->hpb_dev_info, hpb); + + rgn_table = kzalloc(sizeof(struct ufshpb_region) * hpb->rgns_per_lu, + GFP_KERNEL); + if (!rgn_table) { + ret = -ENOMEM; + goto out; + } + + INIT_INFO("active_region_table bytes: %lu", + (sizeof(struct ufshpb_region) * hpb->rgns_per_lu)); + + hpb->rgn_tbl = rgn_table; + + spin_lock_init(&hpb->hpb_lock); + spin_lock_init(&hpb->rsp_list_lock); + + /* init lru information */ + INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn); + hpb->lru_info.selection_type = LRU; + + INIT_LIST_HEAD(&hpb->lh_pinned_srgn); + INIT_LIST_HEAD(&hpb->lh_act_srgn); + INIT_LIST_HEAD(&hpb->lh_inact_rgn); + + INIT_LIST_HEAD(&hpb->lh_map_ctx_free); + + ufshpb_init_jobs(hpb); + + ret = ufshpb_map_req_mempool_init(hpb); + if (ret) { + ERR_MSG("map_req_mempool init fail!"); + goto release_rgn_table; + } + + ret = ufshpb_pre_req_mempool_init(hpb); + if (ret) { + ERR_MSG("pre_req_mempool init fail!"); + goto release_map_req_mempool; + } + + ret = ufshpb_table_mempool_init(hpb); + if (ret) { + ERR_MSG("ppn table mempool init fail!"); + ufshpb_table_mempool_remove(hpb); + goto release_pre_req_mempool; + } + + total_srgn_cnt = hpb->srgns_per_lu; + INIT_INFO("total_subregion_count: %d", total_srgn_cnt); + for (rgn_idx = 0, srgn_cnt = 0; rgn_idx < hpb->rgns_per_lu; + rgn_idx++, total_srgn_cnt -= srgn_cnt) { + rgn = rgn_table + rgn_idx; + rgn->rgn_idx = rgn_idx; + + INIT_LIST_HEAD(&rgn->list_inact_rgn); + + /* init lru region information*/ + INIT_LIST_HEAD(&rgn->list_lru_rgn); + + srgn_cnt = min(total_srgn_cnt, hpb->srgns_per_rgn); + + ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt); + if (ret) + goto release_srgns; + ufshpb_init_subregion_tbl(hpb, rgn); + + if (ufshpb_is_pinned_region(hpb, rgn_idx)) { + ret = ufshpb_init_pinned_active_region(hpb, rgn); + if (ret) + goto release_srgns; + + do_work_handler = true; + } else { + rgn->rgn_state = HPBREGION_INACTIVE; + } + } + + if (total_srgn_cnt != 0) { + ERR_MSG("error total_subregion_count: %d", + total_srgn_cnt); + goto release_srgns; + } + + if (do_work_handler) + schedule_work(&hpb->ufshpb_work); + + /* + * even if creating sysfs failed, ufshpb could run normally. + * so we don't deal with error handling + */ + ufshpb_create_sysfs(ufsf, hpb); + + return 0; +release_srgns: + for (i = 0; i < rgn_idx; i++) { + rgn = rgn_table + i; + if (rgn->srgn_tbl) { + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; + srgn_idx++) { + srgn = rgn->srgn_tbl + srgn_idx; + if (srgn->mctx) + ufshpb_put_map_ctx(hpb, srgn->mctx); + } + kfree(rgn->srgn_tbl); + } + } + + ufshpb_table_mempool_remove(hpb); +release_pre_req_mempool: + ufshpb_pre_req_mempool_remove(hpb); +release_map_req_mempool: + ufshpb_map_req_mempool_remove(hpb); +release_rgn_table: + kfree(rgn_table); +out: + return ret; +} + +static inline int ufshpb_version_check(struct ufshpb_dev_info *hpb_dev_info) +{ + if (hpb_dev_info->hpb_ver < UFSHPB_VER) { + INIT_INFO("Driver = %.2x %.2x, Device = %.2x %.2x", + GET_BYTE_1(UFSHPB_VER), GET_BYTE_0(UFSHPB_VER), + GET_BYTE_1(hpb_dev_info->hpb_ver), + GET_BYTE_0(hpb_dev_info->hpb_ver)); + return -ENODEV; + } + return 0; +} + +void ufshpb_get_dev_info(struct ufshpb_dev_info *hpb_dev_info, u8 *desc_buf) +{ + int ret; + + hpb_dev_info->hpb_device = false; + + if (desc_buf[DEVICE_DESC_PARAM_UFS_FEAT] & UFS_FEATURE_SUPPORT_HPB_BIT) + INIT_INFO("bUFSFeaturesSupport: HPB is set"); + else { + INIT_INFO("bUFSFeaturesSupport: HPB not support"); + return; + } + + hpb_dev_info->hpb_ver = LI_EN_16(desc_buf + DEVICE_DESC_PARAM_HPB_VER); + + ret = ufshpb_version_check(hpb_dev_info); + if (!ret) + hpb_dev_info->hpb_device = true; +} + +void ufshpb_get_geo_info(struct ufshpb_dev_info *hpb_dev_info, u8 *geo_buf) +{ + hpb_dev_info->hpb_number_lu = geo_buf[GEOMETRY_DESC_HPB_NUMBER_LU]; + if (hpb_dev_info->hpb_number_lu == 0) { + ERR_MSG("Don't have a lu for hpb."); + hpb_dev_info->hpb_device = false; + return; + } + + hpb_dev_info->hpb_rgn_size = geo_buf[GEOMETRY_DESC_HPB_REGION_SIZE]; + hpb_dev_info->hpb_srgn_size = geo_buf[GEOMETRY_DESC_HPB_SUBREGION_SIZE]; + hpb_dev_info->hpb_device_max_active_rgns = + LI_EN_16(geo_buf + GEOMETRY_DESC_HPB_DEVICE_MAX_ACTIVE_REGIONS); + + INIT_INFO("[48] bHPBRegionSiz %u", hpb_dev_info->hpb_rgn_size); + INIT_INFO("[49] bHPBNumberLU %u", hpb_dev_info->hpb_number_lu); + INIT_INFO("[4A] bHPBSubRegionSize %u", hpb_dev_info->hpb_srgn_size); + INIT_INFO("[4B:4C] wDeviceMaxActiveHPBRegions %u", + hpb_dev_info->hpb_device_max_active_rgns); + + ufshpb_init_constant(); +} + +int ufshpb_get_lu_info(struct ufsf_feature *ufsf, u8 lun, u8 *unit_buf) +{ + struct ufsf_lu_desc lu_desc; + struct ufshpb_lu *hpb; + + lu_desc.lu_enable = unit_buf[UNIT_DESC_PARAM_LU_ENABLE]; + lu_desc.lu_queue_depth = unit_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]; + lu_desc.lu_logblk_size = unit_buf[UNIT_DESC_PARAM_LOGICAL_BLK_SIZE]; + lu_desc.lu_logblk_cnt = + LI_EN_64(unit_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT); + lu_desc.lu_max_active_hpb_rgns = + LI_EN_16(unit_buf + UNIT_DESC_HPB_LU_MAX_ACTIVE_REGIONS); + lu_desc.lu_hpb_pinned_rgn_startidx = + LI_EN_16(unit_buf + UNIT_DESC_HPB_LU_PIN_REGION_START_OFFSET); + lu_desc.lu_num_hpb_pinned_rgns = + LI_EN_16(unit_buf + UNIT_DESC_HPB_LU_NUM_PIN_REGIONS); + + if (lu_desc.lu_num_hpb_pinned_rgns > 0) { + lu_desc.lu_hpb_pinned_end_offset = + lu_desc.lu_hpb_pinned_rgn_startidx + + lu_desc.lu_num_hpb_pinned_rgns - 1; + } else + lu_desc.lu_hpb_pinned_end_offset = -1; + + INIT_INFO("LUN(%d) [0A] bLogicalBlockSize %d", + lun, lu_desc.lu_logblk_size); + INIT_INFO("LUN(%d) [0B] qLogicalBlockCount %llu", + lun, lu_desc.lu_logblk_cnt); + INIT_INFO("LUN(%d) [03] bLuEnable %d", lun, lu_desc.lu_enable); + INIT_INFO("LUN(%d) [06] bLuQueueDepth %d", lun, lu_desc.lu_queue_depth); + INIT_INFO("LUN(%d) [23:24] wLUMaxActiveHPBRegions %d", + lun, lu_desc.lu_max_active_hpb_rgns); + INIT_INFO("LUN(%d) [25:26] wHPBPinnedRegionStartIdx %d", + lun, lu_desc.lu_hpb_pinned_rgn_startidx); + INIT_INFO("LUN(%d) [27:28] wNumHPBPinnedRegions %d", + lun, lu_desc.lu_num_hpb_pinned_rgns); + INIT_INFO("LUN(%d) PINNED Start %d End %d", + lun, lu_desc.lu_hpb_pinned_rgn_startidx, + lu_desc.lu_hpb_pinned_end_offset); + + ufsf->ufshpb_lup[lun] = NULL; + + if (lu_desc.lu_enable == 0x02) { + ufsf->ufshpb_lup[lun] = kzalloc(sizeof(struct ufshpb_lu), + GFP_KERNEL); + if (!ufsf->ufshpb_lup[lun]) + return -ENOMEM; + + hpb = ufsf->ufshpb_lup[lun]; + hpb->ufsf = ufsf; + hpb->lun = lun; + hpb->lu_num_blocks = lu_desc.lu_logblk_cnt; + hpb->lu_max_active_rgns = lu_desc.lu_max_active_hpb_rgns; + hpb->lru_info.max_lru_active_cnt = + lu_desc.lu_max_active_hpb_rgns - + lu_desc.lu_num_hpb_pinned_rgns; + hpb->lu_pinned_rgn_startidx = + lu_desc.lu_hpb_pinned_rgn_startidx; + hpb->lu_pinned_end_offset = lu_desc.lu_hpb_pinned_end_offset; + } else { + INIT_INFO("===== LU %d is hpb-disabled.", lun); + return -ENODEV; + } + + return 0; +} + +static void ufshpb_error_handler(struct work_struct *work) +{ + struct ufsf_feature *ufsf; + + ufsf = container_of(work, struct ufsf_feature, ufshpb_eh_work); + + WARNING_MSG("driver has failed. but UFSHCD can run without UFSHPB"); + WARNING_MSG("UFSHPB will be removed from the kernel"); + + ufshpb_release(ufsf, HPB_FAILED); +} + +static int ufshpb_init(struct ufsf_feature *ufsf) +{ + int lun, ret; + int hpb_enabled_lun = 0; + + seq_scan_lu(lun) { + if (!ufsf->ufshpb_lup[lun]) + continue; + + /* + * HPB need info about request queue in order to issue + * RB-CMD for pinned region. + */ + if (!ufsf->sdev_ufs_lu[lun]) { + WARNING_MSG("warn: lun %d don't have scsi_device", lun); + continue; + } + + ret = ufshpb_lu_hpb_init(ufsf, lun); + if (ret) { + if (ret == -ENODEV) + continue; + else + goto out_free_mem; + } + hpb_enabled_lun++; + } + + if (hpb_enabled_lun == 0) { + ERR_MSG("No UFSHPB LU to init"); + ret = -ENODEV; + goto out_free_mem; + } + + INIT_WORK(&ufsf->ufshpb_reset_work, ufshpb_reset_handler); + INIT_WORK(&ufsf->ufshpb_eh_work, ufshpb_error_handler); + + kref_init(&ufsf->ufshpb_kref); + ufsf->ufshpb_state = HPB_PRESENT; + ufsf->issue_ioctl = false; + + seq_scan_lu(lun) + if (ufsf->ufshpb_lup[lun]) + INFO_MSG("UFSHPB LU %d working", lun); + + return 0; +out_free_mem: + seq_scan_lu(lun) + kfree(ufsf->ufshpb_lup[lun]); + + ufsf->ufshpb_state = HPB_NOT_SUPPORTED; + return ret; +} + +static void ufshpb_purge_active_region(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + unsigned long flags; + int rgn_idx, srgn_idx, state; + + RELEASE_INFO("Start"); + spin_lock_irqsave(&hpb->hpb_lock, flags); + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + rgn = hpb->rgn_tbl + rgn_idx; + + if (rgn->rgn_state == HPBREGION_INACTIVE) + continue; + + if (rgn->rgn_state == HPBREGION_PINNED) + state = HPBSUBREGION_DIRTY; + else if (rgn->rgn_state == HPBREGION_ACTIVE) { + state = HPBSUBREGION_UNUSED; + ufshpb_cleanup_lru_info(&hpb->lru_info, rgn); + } else + continue; + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + srgn = rgn->srgn_tbl + srgn_idx; + + ufshpb_purge_active_subregion(hpb, srgn, state); + } + } + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + RELEASE_INFO("END"); +} + +static void ufshpb_drop_retry_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *map_req, *next; + unsigned long flags; + + if (list_empty(&hpb->lh_map_req_retry)) + return; + + spin_lock_irqsave(&hpb->hpb_lock, flags); + list_for_each_entry_safe(map_req, next, &hpb->lh_map_req_retry, + list_req) { + INFO_MSG("drop map_req %p ( %d - %d )", map_req, + map_req->rb.rgn_idx, map_req->rb.srgn_idx); + + list_del_init(&map_req->list_req); + + ufshpb_put_map_req(hpb, map_req); + } + spin_unlock_irqrestore(&hpb->hpb_lock, flags); +} + +static void ufshpb_drop_rsp_lists(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn, *next_rgn; + struct ufshpb_subregion *srgn, *next_srgn; + unsigned long flags; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn, + list_inact_rgn) { + list_del_init(&rgn->list_inact_rgn); + } + + list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn, + list_act_srgn) { + list_del_init(&srgn->list_act_srgn); + } + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + int srgn_idx; + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + struct ufshpb_subregion *srgn; + + srgn = rgn->srgn_tbl + srgn_idx; + srgn->srgn_state = HPBSUBREGION_UNUSED; + + ufshpb_put_map_ctx(hpb, srgn->mctx); + } + + kfree(rgn->srgn_tbl); +} + +static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb) +{ + int rgn_idx; + + RELEASE_INFO("Start"); + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn; + + rgn = hpb->rgn_tbl + rgn_idx; + if (rgn->rgn_state == HPBREGION_PINNED || + rgn->rgn_state == HPBREGION_ACTIVE) { + rgn->rgn_state = HPBREGION_INACTIVE; + + ufshpb_destroy_subregion_tbl(hpb, rgn); + } + } + + ufshpb_table_mempool_remove(hpb); + kfree(hpb->rgn_tbl); + + RELEASE_INFO("End"); +} + +void ufshpb_release(struct ufsf_feature *ufsf, int state) +{ + struct ufshpb_lu *hpb; + int lun; + + RELEASE_INFO("start release"); + ufsf->ufshpb_state = HPB_FAILED; + + RELEASE_INFO("kref count %d", + atomic_read(&ufsf->ufshpb_kref.refcount.refs)); + + seq_scan_lu(lun) { + hpb = ufsf->ufshpb_lup[lun]; + + RELEASE_INFO("lun %d %p", lun, hpb); + + ufsf->ufshpb_lup[lun] = NULL; + + if (!hpb) + continue; + + ufshpb_cancel_jobs(hpb); + + ufshpb_destroy_region_tbl(hpb); + if (hpb->alloc_mctx != 0) + WARNING_MSG("warning: alloc_mctx %d", hpb->alloc_mctx); + + ufshpb_map_req_mempool_remove(hpb); + + ufshpb_pre_req_mempool_remove(hpb); + + ufshpb_remove_sysfs(hpb); + + kfree(hpb); + } + + ufsf->ufshpb_state = state; + + RELEASE_INFO("end release"); +} + +static void ufshpb_reset(struct ufsf_feature *ufsf) +{ + struct ufshpb_lu *hpb; + int lun; + + seq_scan_lu(lun) { + hpb = ufsf->ufshpb_lup[lun]; + if (hpb) { + INFO_MSG("UFSHPB lun %d reset", lun); + ufshpb_cancel_jobs(hpb); + ufshpb_drop_retry_list(hpb); + ufshpb_drop_rsp_lists(hpb); + ufshpb_purge_active_region(hpb); + } + } + + ufsf->ufshpb_state = HPB_PRESENT; +} + +static inline int ufshpb_wait_kref_init_value(struct ufsf_feature *ufsf) +{ + return (atomic_read(&ufsf->ufshpb_kref.refcount.refs) == 1); +} + +void ufshpb_reset_handler(struct work_struct *work) +{ + struct ufsf_feature *ufsf; + int ret; + + ufsf = container_of(work, struct ufsf_feature, ufshpb_reset_work); + + init_waitqueue_head(&ufsf->wait_hpb); + + ret = wait_event_timeout(ufsf->wait_hpb, + ufshpb_wait_kref_init_value(ufsf), + msecs_to_jiffies(15000)); + if (ret == 0) + ERR_MSG("UFSHPB kref is not init_value(=1). kref count = %d", + atomic_read(&ufsf->ufshpb_kref.refcount.refs)); + + INIT_INFO("HPB_RESET_START"); + + ufshpb_reset(ufsf); +} + +static inline int ufshpb_probe_lun_done(struct ufsf_feature *ufsf) +{ + return (ufsf->num_lu == ufsf->slave_conf_cnt); +} + +void ufshpb_init_handler(struct work_struct *work) +{ + struct ufsf_feature *ufsf; + int ret; + + ufsf = container_of(work, struct ufsf_feature, ufshpb_init_work); + + init_waitqueue_head(&ufsf->wait_hpb); + + ret = wait_event_timeout(ufsf->wait_hpb, + ufshpb_probe_lun_done(ufsf), + msecs_to_jiffies(10000)); + if (ret == 0) + ERR_MSG("Probing LU is not fully complete."); + + INIT_INFO("HPB_INIT_START"); + + ret = ufshpb_init(ufsf); + if (ret) + ERR_MSG("UFSHPB driver init failed. err %d", ret); +} + +void ufshpb_suspend(struct ufsf_feature *ufsf) +{ + struct ufshpb_lu *hpb; + int lun; + + seq_scan_lu(lun) { + hpb = ufsf->ufshpb_lup[lun]; + if (hpb) { + INFO_MSG("ufshpb_lu %d goto suspend", lun); + ufshpb_cancel_jobs(hpb); + } + } +} + +void ufshpb_resume(struct ufsf_feature *ufsf) +{ + struct ufshpb_lu *hpb; + int lun; + + seq_scan_lu(lun) { + hpb = ufsf->ufshpb_lup[lun]; + if (hpb) { + bool do_workq = false; + bool do_retry_work = false; + + do_workq = !ufshpb_is_empty_rsp_lists(hpb); + do_retry_work = + !list_empty_careful(&hpb->lh_map_req_retry); + + INFO_MSG("ufshpb_lu %d resume. do_workq %d retry %d", + lun, do_workq, do_retry_work); + + if (do_workq) + schedule_work(&hpb->ufshpb_task_workq); + if (do_retry_work) + schedule_delayed_work(&hpb->ufshpb_retry_work, + msecs_to_jiffies(100)); + } + } +} + +static void ufshpb_stat_init(struct ufshpb_lu *hpb) +{ + atomic64_set(&hpb->hit, 0); + atomic64_set(&hpb->miss, 0); + atomic64_set(&hpb->rb_noti_cnt, 0); + atomic64_set(&hpb->rb_active_cnt, 0); + atomic64_set(&hpb->rb_inactive_cnt, 0); + atomic64_set(&hpb->map_req_cnt, 0); + atomic64_set(&hpb->pre_req_cnt, 0); +} + +/* SYSFS functions */ +static ssize_t ufshpb_sysfs_prep_disable_show(struct ufshpb_lu *hpb, char *buf) +{ + int ret; + + ret = snprintf(buf, PAGE_SIZE, "force_hpb_read_disable %d\n", + hpb->force_disable); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_prep_disable_store(struct ufshpb_lu *hpb, + const char *buf, size_t cnt) +{ + unsigned long value = 0; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + if (value > 1) + return -EINVAL; + + if (value == 1) + hpb->force_disable = true; + else if (value == 0) + hpb->force_disable = false; + + SYSFS_INFO("force_hpb_read_disable %d", hpb->force_disable); + + return cnt; +} + +static ssize_t ufshpb_sysfs_map_disable_show(struct ufshpb_lu *hpb, char *buf) +{ + int ret; + + ret = snprintf(buf, PAGE_SIZE, "force_map_req_disable %d\n", + hpb->force_map_req_disable); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_map_disable_store(struct ufshpb_lu *hpb, + const char *buf, size_t cnt) +{ + unsigned long value = 0; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + if (value > 1) + return -EINVAL; + + if (value == 1) + hpb->force_map_req_disable = true; + else if (value == 0) + hpb->force_map_req_disable = false; + + SYSFS_INFO("force_map_req_disable %d", hpb->force_map_req_disable); + + return cnt; +} + +static ssize_t ufshpb_sysfs_throttle_map_req_show(struct ufshpb_lu *hpb, + char *buf) +{ + int ret; + + ret = snprintf(buf, PAGE_SIZE, "throttle_map_req %d\n", + hpb->throttle_map_req); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_throttle_map_req_store(struct ufshpb_lu *hpb, + const char *buf, size_t cnt) +{ + unsigned long throttle_map_req = 0; + + if (kstrtoul(buf, 0, &throttle_map_req)) + return -EINVAL; + + hpb->throttle_map_req = (int)throttle_map_req; + + SYSFS_INFO("throttle_map_req %d", hpb->throttle_map_req); + + return cnt; +} + +static ssize_t ufshpb_sysfs_throttle_pre_req_show(struct ufshpb_lu *hpb, + char *buf) +{ + int ret; + + ret = snprintf(buf, PAGE_SIZE, "throttle_pre_req %d\n", + hpb->throttle_pre_req); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_throttle_pre_req_store(struct ufshpb_lu *hpb, + const char *buf, size_t cnt) +{ + unsigned long throttle_pre_req = 0; + + if (kstrtoul(buf, 0, &throttle_pre_req)) + return -EINVAL; + + hpb->throttle_pre_req = (int)throttle_pre_req; + + SYSFS_INFO("throttle_pre_req %d", hpb->throttle_pre_req); + + return cnt; +} + +static ssize_t ufshpb_sysfs_pre_req_min_tr_len_show(struct ufshpb_lu *hpb, + char *buf) +{ + int ret; + + ret = snprintf(buf, PAGE_SIZE, "%d", hpb->pre_req_min_tr_len); + + if (ret < 0) + return ret; + + SYSFS_INFO("pre_req min transfer len %d", hpb->pre_req_min_tr_len); + + return ret; +} + +static ssize_t ufshpb_sysfs_pre_req_min_tr_len_store(struct ufshpb_lu *hpb, + const char *buf, + size_t count) +{ + unsigned long val = 0; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 0) + val = 0; + + if (hpb->pre_req_max_tr_len < val || val < HPB_MULTI_CHUNK_LOW) + SYSFS_INFO("value is wrong. pre_req transfer len %d ~ %d\n", + HPB_MULTI_CHUNK_LOW, hpb->pre_req_max_tr_len); + else + hpb->pre_req_min_tr_len = val; + + SYSFS_INFO("pre_req min transfer len %d", hpb->pre_req_min_tr_len); + + return count; +} + +static ssize_t ufshpb_sysfs_pre_req_max_tr_len_show(struct ufshpb_lu *hpb, + char *buf) +{ + int ret; + + ret = snprintf(buf, PAGE_SIZE, "%d", hpb->pre_req_max_tr_len); + + if (ret < 0) + return ret; + + SYSFS_INFO("pre_req max transfer len %d", hpb->pre_req_max_tr_len); + + return ret; +} + +static ssize_t ufshpb_sysfs_pre_req_max_tr_len_store(struct ufshpb_lu *hpb, + const char *buf, + size_t count) +{ + unsigned long val = 0; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (hpb->pre_req_min_tr_len > val || val > HPB_MULTI_CHUNK_HIGH) + SYSFS_INFO("value is wrong. pre_req transfer len %d ~ %d\n", + hpb->pre_req_min_tr_len, HPB_MULTI_CHUNK_HIGH); + else + hpb->pre_req_max_tr_len = val; + + SYSFS_INFO("pre_req max transfer len %d", hpb->pre_req_max_tr_len); + + return count; +} + +static ssize_t ufshpb_sysfs_debug_show(struct ufshpb_lu *hpb, char *buf) +{ + int ret; + + ret = snprintf(buf, PAGE_SIZE, "debug %d\n", hpb->debug); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_debug_store(struct ufshpb_lu *hpb, + const char *buf, size_t cnt) +{ + unsigned long debug = 0; + + if (kstrtoul(buf, 0, &debug)) + return -EINVAL; + + if (debug >= 1) + hpb->debug = 1; + else + hpb->debug = 0; + + SYSFS_INFO("debug %d", hpb->debug); + + return cnt; +} + +static ssize_t ufshpb_sysfs_version_show(struct ufshpb_lu *hpb, char *buf) +{ + int ret; + + ret = snprintf(buf, PAGE_SIZE, + "HPB version %.2x %.2x D/D version %.2x %.2x\n", + GET_BYTE_1(hpb->hpb_ver), GET_BYTE_0(hpb->hpb_ver), + GET_BYTE_1(UFSHPB_DD_VER), GET_BYTE_0(UFSHPB_DD_VER)); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_hit_show(struct ufshpb_lu *hpb, char *buf) +{ + long long hit_cnt; + int ret; + + hit_cnt = atomic64_read(&hpb->hit); + + ret = snprintf(buf, PAGE_SIZE, "hit_count %lld\n", hit_cnt); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_miss_show(struct ufshpb_lu *hpb, char *buf) +{ + long long miss_cnt; + int ret; + + miss_cnt = atomic64_read(&hpb->miss); + + ret = snprintf(buf, PAGE_SIZE, "miss_count %lld\n", miss_cnt); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_map_req_show(struct ufshpb_lu *hpb, char *buf) +{ + long long rb_noti_cnt, rb_active_cnt, rb_inactive_cnt, map_req_cnt; + int ret; + + rb_noti_cnt = atomic64_read(&hpb->rb_noti_cnt); + rb_active_cnt = atomic64_read(&hpb->rb_active_cnt); + rb_inactive_cnt = atomic64_read(&hpb->rb_inactive_cnt); + map_req_cnt = atomic64_read(&hpb->map_req_cnt); + + ret = snprintf(buf, PAGE_SIZE, + "rb_noti %lld ACT %lld INACT %lld map_req_count %lld\n", + rb_noti_cnt, rb_active_cnt, rb_inactive_cnt, + map_req_cnt); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_pre_req_show(struct ufshpb_lu *hpb, char *buf) +{ + long long pre_req_cnt; + int ret; + + pre_req_cnt = atomic64_read(&hpb->pre_req_cnt); + + ret = snprintf(buf, PAGE_SIZE, "pre_req_count %lld\n", pre_req_cnt); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_region_stat_show(struct ufshpb_lu *hpb, char *buf) +{ + int ret, pin_cnt = 0, act_cnt = 0, inact_cnt = 0, rgn_idx; + enum HPBREGION_STATE state; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + state = hpb->rgn_tbl[rgn_idx].rgn_state; + if (state == HPBREGION_PINNED) + pin_cnt++; + else if (state == HPBREGION_ACTIVE) + act_cnt++; + else if (state == HPBREGION_INACTIVE) + inact_cnt++; + } + + ret = snprintf(buf, PAGE_SIZE, + "Total %d pinned %d active %d inactive %d\n", + hpb->rgns_per_lu, pin_cnt, act_cnt, inact_cnt); + + if (ret < 0) + return ret; + + SYSFS_INFO("%s", buf); + + return ret; +} + +static ssize_t ufshpb_sysfs_count_reset_store(struct ufshpb_lu *hpb, + const char *buf, size_t cnt) +{ + unsigned long debug; + + if (kstrtoul(buf, 0, &debug)) + return -EINVAL; + + SYSFS_INFO("Stat Init"); + + ufshpb_stat_init(hpb); + + return cnt; +} + +static ssize_t ufshpb_sysfs_info_lba_store(struct ufshpb_lu *hpb, + const char *buf, size_t cnt) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + unsigned long long ppn = 0; + unsigned long value = 0, lpn, flags; + int rgn_idx = 0, srgn_idx = 0, srgn_offset = 0, error = 0; + + if (kstrtoul(buf, 0, &value)) { + ERR_MSG("kstrtoul error"); + return -EINVAL; + } + + if (value > hpb->lu_num_blocks * SECTORS_PER_BLOCK) { + ERR_MSG("value %lu > lu_num_blocks %d error", + value, hpb->lu_num_blocks); + return -EINVAL; + } + + lpn = value / SECTORS_PER_BLOCK; + + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); + + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + spin_lock_irqsave(&hpb->hpb_lock, flags); + SYSFS_INFO("lba %lu lpn %lu region %d state %d subregion %d state %d", + value, lpn, rgn_idx, rgn->rgn_state, srgn_idx, + srgn->srgn_state); + + if (!ufshpb_valid_srgn(rgn, srgn)) { + SYSFS_INFO("[region %d subregion %d] has not valid hpb info.", + rgn_idx, srgn_idx); + goto out; + } + + if (!srgn->mctx) { + SYSFS_INFO("mctx is NULL"); + goto out; + } + + ppn = ufshpb_get_ppn(srgn->mctx, srgn_offset, &error); + if (error) { + SYSFS_INFO("getting ppn is fail from a page."); + goto out; + } + + SYSFS_INFO("ppn %llx is_dirty %d", ppn, + ufshpb_ppn_dirty_check(hpb, lpn, 1)); +out: + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return cnt; +} + +static ssize_t ufshpb_sysfs_info_region_store(struct ufshpb_lu *hpb, + const char *buf, size_t cnt) +{ + unsigned long rgn_idx = 0; + int srgn_idx; + + if (kstrtoul(buf, 0, &rgn_idx)) + return -EINVAL; + + if (rgn_idx >= hpb->rgns_per_lu) + ERR_MSG("error region %ld max %d", rgn_idx, hpb->rgns_per_lu); + else { + SYSFS_INFO("(region state : PINNED=%d ACTIVE=%d INACTIVE=%d)", + HPBREGION_PINNED, HPBREGION_ACTIVE, + HPBREGION_INACTIVE); + + SYSFS_INFO("region %ld state %d", rgn_idx, + hpb->rgn_tbl[rgn_idx].rgn_state); + + for (srgn_idx = 0; srgn_idx < hpb->rgn_tbl[rgn_idx].srgn_cnt; + srgn_idx++) { + SYSFS_INFO("--- subregion %d state %d", srgn_idx, + hpb->rgn_tbl[rgn_idx].srgn_tbl[srgn_idx].srgn_state); + } + } + + return cnt; +} + +static ssize_t ufshpb_sysfs_ufshpb_release_store(struct ufshpb_lu *hpb, + const char *buf, size_t cnt) +{ + unsigned long value = 0; + + SYSFS_INFO("start release function"); + + if (kstrtoul(buf, 0, &value)) { + ERR_MSG("kstrtoul error"); + return -EINVAL; + } + + if (value == 0xab) { + SYSFS_INFO("magic number %lu release start", value); + goto err_out; + } else + SYSFS_INFO("wrong magic number %lu", value); + + return cnt; +err_out: + SYSFS_INFO("ref_cnt %d", + atomic_read(&hpb->ufsf->ufshpb_kref.refcount.refs)); + ufshpb_failed(hpb, __func__); + + return cnt; +} + +static struct ufshpb_sysfs_entry ufshpb_sysfs_entries[] = { + __ATTR(hpb_read_disable, 0644, + ufshpb_sysfs_prep_disable_show, ufshpb_sysfs_prep_disable_store), + __ATTR(map_cmd_disable, 0644, + ufshpb_sysfs_map_disable_show, ufshpb_sysfs_map_disable_store), + __ATTR(throttle_map_req, 0644, + ufshpb_sysfs_throttle_map_req_show, + ufshpb_sysfs_throttle_map_req_store), + __ATTR(throttle_pre_req, 0644, + ufshpb_sysfs_throttle_pre_req_show, + ufshpb_sysfs_throttle_pre_req_store), + __ATTR(pre_req_min_tr_len, 0644, + ufshpb_sysfs_pre_req_min_tr_len_show, + ufshpb_sysfs_pre_req_min_tr_len_store), + __ATTR(pre_req_max_tr_len, 0644, + ufshpb_sysfs_pre_req_max_tr_len_show, + ufshpb_sysfs_pre_req_max_tr_len_store), + __ATTR(debug, 0644, + ufshpb_sysfs_debug_show, ufshpb_sysfs_debug_store), + __ATTR(hpb_version, 0444, ufshpb_sysfs_version_show, NULL), + __ATTR(hit_count, 0444, ufshpb_sysfs_hit_show, NULL), + __ATTR(miss_count, 0444, ufshpb_sysfs_miss_show, NULL), + __ATTR(map_req_count, 0444, ufshpb_sysfs_map_req_show, NULL), + __ATTR(pre_req_count, 0444, ufshpb_sysfs_pre_req_show, NULL), + __ATTR(region_stat_count, 0444, ufshpb_sysfs_region_stat_show, NULL), + __ATTR(count_reset, 0200, NULL, ufshpb_sysfs_count_reset_store), + __ATTR(get_info_from_lba, 0200, NULL, ufshpb_sysfs_info_lba_store), + __ATTR(get_info_from_region, 0200, NULL, + ufshpb_sysfs_info_region_store), + __ATTR(release, 0200, NULL, ufshpb_sysfs_ufshpb_release_store), + __ATTR_NULL +}; + +static ssize_t ufshpb_attr_show(struct kobject *kobj, struct attribute *attr, + char *page) +{ + struct ufshpb_sysfs_entry *entry; + struct ufshpb_lu *hpb; + ssize_t error; + + entry = container_of(attr, struct ufshpb_sysfs_entry, attr); + hpb = container_of(kobj, struct ufshpb_lu, kobj); + + if (!entry->show) + return -EIO; + + mutex_lock(&hpb->sysfs_lock); + error = entry->show(hpb, page); + mutex_unlock(&hpb->sysfs_lock); + return error; +} + +static ssize_t ufshpb_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t len) +{ + struct ufshpb_sysfs_entry *entry; + struct ufshpb_lu *hpb; + ssize_t error; + + entry = container_of(attr, struct ufshpb_sysfs_entry, attr); + hpb = container_of(kobj, struct ufshpb_lu, kobj); + + if (!entry->store) + return -EIO; + + mutex_lock(&hpb->sysfs_lock); + error = entry->store(hpb, page, len); + mutex_unlock(&hpb->sysfs_lock); + return error; +} + +static const struct sysfs_ops ufshpb_sysfs_ops = { + .show = ufshpb_attr_show, + .store = ufshpb_attr_store, +}; + +static struct kobj_type ufshpb_ktype = { + .sysfs_ops = &ufshpb_sysfs_ops, + .release = NULL, +}; + +static int ufshpb_create_sysfs(struct ufsf_feature *ufsf, struct ufshpb_lu *hpb) +{ + struct device *dev = ufsf->hba->dev; + struct ufshpb_sysfs_entry *entry; + int err; + + hpb->sysfs_entries = ufshpb_sysfs_entries; + + ufshpb_stat_init(hpb); + + kobject_init(&hpb->kobj, &ufshpb_ktype); + mutex_init(&hpb->sysfs_lock); + + INIT_INFO("ufshpb creates sysfs lu %d %p dev->kobj %p", hpb->lun, + &hpb->kobj, &dev->kobj); + + err = kobject_add(&hpb->kobj, kobject_get(&dev->kobj), + "ufshpb_lu%d", hpb->lun); + if (!err) { + for (entry = hpb->sysfs_entries; entry->attr.name != NULL; + entry++) { + INIT_INFO("ufshpb_lu%d sysfs attr creates: %s", + hpb->lun, entry->attr.name); + if (sysfs_create_file(&hpb->kobj, &entry->attr)) + break; + } + INIT_INFO("ufshpb_lu%d sysfs adds uevent", hpb->lun); + kobject_uevent(&hpb->kobj, KOBJ_ADD); + } + + return err; +} + +static int ufshpb_remove_sysfs(struct ufshpb_lu *hpb) +{ + struct ufshpb_sysfs_entry *entry; + + for (entry = hpb->sysfs_entries; entry->attr.name != NULL; + entry++) { + INIT_INFO("ufshpb_lu%d sysfs attr removes: %s", + hpb->lun, entry->attr.name); + sysfs_remove_file(&hpb->kobj, &entry->attr); + } + kobject_uevent(&hpb->kobj, KOBJ_REMOVE); + + INIT_INFO("ufshpb removes sysfs lu %d %p ", hpb->lun, &hpb->kobj); + kobject_del(&hpb->kobj); + + return 0; +} diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h new file mode 100644 index 000000000000..19a5c9310b58 --- /dev/null +++ b/drivers/scsi/ufs/ufshpb.h @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd. + */ + +#ifndef _UFSHPB_H_ +#define _UFSHPB_H_ + +#include +#include +#include +#include +#include +#include + +#include "../../../block/blk.h" +#include "../scsi_priv.h" + +/* Version info*/ +#define UFSHPB_VER 0x0200 +#define UFSHPB_DD_VER 0x0208 + +/* Constant value*/ +#define MAX_ACTIVE_NUM 2 +#define MAX_INACTIVE_NUM 2 + +#define HPB_ENTRY_SIZE 0x08 +#define HPB_ENTREIS_PER_OS_PAGE (OS_PAGE_SIZE / HPB_ENTRY_SIZE) + +#define RETRY_DELAY_MS 5000 + +/* HPB Support Chunk Size */ +#define HPB_MULTI_CHUNK_LOW 9 +#define HPB_MULTI_CHUNK_HIGH 128 +#define MAX_HPB_CONTEXT_ID 0x7f + +/* Description */ +#define UFS_FEATURE_SUPPORT_HPB_BIT 0x80 + +/* Response UPIU types */ +#define HPB_RSP_NONE 0x00 +#define HPB_RSP_REQ_REGION_UPDATE 0x01 + +/* Vender defined OPCODE */ +#define UFSHPB_READ_BUFFER 0xF9 +#define UFSHPB_WRITE_BUFFER 0xFA + +#define UFSHPB_GROUP_NUMBER 0x11 +#define UFSHPB_READ_BUFFER_ID 0x01 +#define UFSHPB_WRITE_BUFFER_ID 0x02 +#define TRANSFER_LEN 0x01 + +#define DEV_DATA_SEG_LEN 0x14 +#define DEV_SENSE_SEG_LEN 0x12 +#define DEV_DES_TYPE 0x80 +#define DEV_ADDITIONAL_LEN 0x10 + +/* For read10 debug */ +#define READ10_DEBUG_LUN 0x7F +#define READ10_DEBUG_LBA 0x48504230 + +/* + * UFSHPB DEBUG + */ + +#define HPB_DEBUG(hpb, msg, args...) \ + do { if (hpb->debug) \ + printk(KERN_ERR "%s:%d " msg "\n", \ + __func__, __LINE__, ##args); \ + } while (0) + +#define TMSG_CMD(hpb, msg, rq, rgn, srgn) \ + do { if (hpb->ufsf->sdev_ufs_lu[hpb->lun] && \ + hpb->ufsf->sdev_ufs_lu[hpb->lun]->request_queue) \ + blk_add_trace_msg( \ + hpb->ufsf->sdev_ufs_lu[hpb->lun]->request_queue,\ + "%llu + %u " msg " %d - %d", \ + (unsigned long long) blk_rq_pos(rq), \ + (unsigned int) blk_rq_sectors(rq), rgn, srgn); \ + } while (0) + +enum UFSHPB_STATE { + HPB_PRESENT = 1, + HPB_NOT_SUPPORTED = -1, + HPB_FAILED = -2, + HPB_NEED_INIT = 0, + HPB_RESET = -3, +}; + +enum HPBREGION_STATE { + HPBREGION_INACTIVE, HPBREGION_ACTIVE, HPBREGION_PINNED, +}; + +enum HPBSUBREGION_STATE { + HPBSUBREGION_UNUSED, + HPBSUBREGION_DIRTY, + HPBSUBREGION_CLEAN, + HPBSUBREGION_ISSUED, +}; + +struct ufshpb_dev_info { + bool hpb_device; + int hpb_number_lu; + int hpb_ver; + int hpb_rgn_size; + int hpb_srgn_size; + int hpb_device_max_active_rgns; +}; + +struct ufshpb_active_field { + __be16 active_rgn; + __be16 active_srgn; +}; + +struct ufshpb_rsp_field { + __be16 sense_data_len; + u8 desc_type; + u8 additional_len; + u8 hpb_type; + u8 reserved; + u8 active_rgn_cnt; + u8 inactive_rgn_cnt; + struct ufshpb_active_field hpb_active_field[2]; + __be16 hpb_inactive_field[2]; +}; + +struct ufshpb_map_ctx { + struct page **m_page; + unsigned int *ppn_dirty; + + struct list_head list_table; +}; + +struct ufshpb_subregion { + struct ufshpb_map_ctx *mctx; + enum HPBSUBREGION_STATE srgn_state; + int rgn_idx; + int srgn_idx; + + /* below information is used by rsp_list */ + struct list_head list_act_srgn; +}; + +struct ufshpb_region { + struct ufshpb_subregion *srgn_tbl; + enum HPBREGION_STATE rgn_state; + int rgn_idx; + int srgn_cnt; + + /* below information is used by rsp_list */ + struct list_head list_inact_rgn; + + /* below information is used by lru */ + struct list_head list_lru_rgn; +}; + +struct ufshpb_req { + struct request *req; + struct bio *bio; + struct ufshpb_lu *hpb; + struct list_head list_req; + void (*end_io)(struct request *rq, int err); + void *end_io_data; + char sense[SCSI_SENSE_BUFFERSIZE]; + + union { + struct { + struct ufshpb_map_ctx *mctx; + unsigned int rgn_idx; + unsigned int srgn_idx; + unsigned int lun; + } rb; + struct { + struct page *m_page; + unsigned int len; + unsigned long lpn; + } wb; + }; +}; + +enum selection_type { + LRU = 1, +}; + +struct victim_select_info { + int selection_type; + struct list_head lh_lru_rgn; + int max_lru_active_cnt; /* supported hpb #region - pinned #region */ + atomic64_t active_cnt; +}; + +struct ufshpb_lu { + struct ufsf_feature *ufsf; + u8 lun; + int qd; + struct ufshpb_region *rgn_tbl; + + spinlock_t hpb_lock; + + struct ufshpb_req *map_req; + int num_inflight_map_req; + int throttle_map_req; + struct list_head lh_map_req_free; + struct list_head lh_map_req_retry; + struct list_head lh_map_ctx_free; + + spinlock_t rsp_list_lock; + struct list_head lh_pinned_srgn; + struct list_head lh_act_srgn; + struct list_head lh_inact_rgn; + + struct kobject kobj; + struct mutex sysfs_lock; + struct ufshpb_sysfs_entry *sysfs_entries; + + struct ufshpb_req *pre_req; + int num_inflight_pre_req; + int throttle_pre_req; + struct list_head lh_pre_req_free; + struct list_head lh_pre_req_dummy; /* dummy for blk_start_requests() */ + int ctx_id_ticket; + int pre_req_min_tr_len; + int pre_req_max_tr_len; + + struct work_struct ufshpb_work; + struct delayed_work ufshpb_retry_work; + struct work_struct ufshpb_task_workq; + + /* for selecting victim */ + struct victim_select_info lru_info; + + int hpb_ver; + int lu_max_active_rgns; + int lu_pinned_rgn_startidx; + int lu_pinned_end_offset; + int lu_num_pinned_rgns; + int srgns_per_lu; + int rgns_per_lu; + int srgns_per_rgn; + int srgn_mem_size; + int entries_per_rgn_shift; + int entries_per_rgn_mask; + int entries_per_srgn; + int entries_per_srgn_shift; + int entries_per_srgn_mask; + int dwords_per_srgn; + unsigned long long srgn_unit_size; + int mpage_bytes; + int mpages_per_srgn; + int lu_num_blocks; + + /* for debug */ + int alloc_mctx; + int debug_free_table; + bool force_disable; + bool force_map_req_disable; + bool debug; + atomic64_t hit; + atomic64_t miss; + atomic64_t rb_noti_cnt; + atomic64_t rb_active_cnt; + atomic64_t rb_inactive_cnt; + atomic64_t map_req_cnt; + atomic64_t pre_req_cnt; +}; + +struct ufshpb_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct ufshpb_lu *hpb, char *buf); + ssize_t (*store)(struct ufshpb_lu *hpb, const char *, size_t); +}; + +struct ufs_hba; +struct ufshcd_lrb; + +int ufshpb_prepare_pre_req(struct ufsf_feature *ufsf, struct scsi_cmnd *cmd, + u8 lun); +int ufshpb_prepare_add_lrbp(struct ufsf_feature *ufsf, int add_tag); +void ufshpb_end_pre_req(struct ufsf_feature *ufsf, struct request *req); +void ufshpb_get_dev_info(struct ufshpb_dev_info *hpb_dev_info, u8 *desc_buf); +void ufshpb_get_geo_info(struct ufshpb_dev_info *hpb_dev_info, u8 *geo_buf); +int ufshpb_get_lu_info(struct ufsf_feature *ufsf, u8 lun, u8 *unit_buf); +void ufshpb_init_handler(struct work_struct *work); +void ufshpb_reset_handler(struct work_struct *work); +void ufshpb_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp); +void ufshpb_rsp_upiu(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp); +void ufshpb_release(struct ufsf_feature *ufsf, int state); +int ufshpb_issue_req_dev_ctx(struct ufshpb_lu *hpb, unsigned char *buf, + int buf_length); +void ufshpb_resume(struct ufsf_feature *ufsf); +void ufshpb_suspend(struct ufsf_feature *ufsf); +#endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshpb_skh.c b/drivers/scsi/ufs/ufshpb_skh.c new file mode 100644 index 000000000000..0137a72d1836 --- /dev/null +++ b/drivers/scsi/ufs/ufshpb_skh.c @@ -0,0 +1,3266 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd. + * Modified work Copyright (C) 2018, Google, Inc. + * Modified work Copyright (C) 2019 SK hynix + */ + +#include +#include +#include +#include +#include + +#include "../../../block/blk.h" + +#include "ufs.h" +#include "ufshcd.h" +#include "ufshpb_skh.h" +#include + +u32 skhpb_debug_mask = SKHPB_LOG_ERR | SKHPB_LOG_INFO; +//u32 skhpb_debug_mask = SKHPB_LOG_ERR | SKHPB_LOG_INFO | SKHPB_LOG_DEBUG | SKHPB_LOG_HEX; +int debug_map_req = SKHPB_MAP_RSP_DISABLE; + +/* + * debug variables + */ +static int skhpb_alloc_mctx; + +/* + * define global constants + */ +static int skhpb_sects_per_blk_shift; +static int skhpb_bits_per_dword_shift; +static int skhpb_bits_per_dword_mask; + +static int skhpb_create_sysfs(struct ufs_hba *hba, struct skhpb_lu *hpb); +static int skhpb_check_lru_evict(struct skhpb_lu *hpb, struct skhpb_region *cb); +static void skhpb_error_handler(struct work_struct *work); +static void skhpb_evict_region(struct skhpb_lu *hpb, + struct skhpb_region *cb); +static void skhpb_purge_active_block(struct skhpb_lu *hpb); +static int skhpb_set_map_req(struct skhpb_lu *hpb, + int region, int subregion, struct skhpb_map_ctx *mctx, + struct skhpb_rsp_info *rsp_info, + enum SKHPB_BUFFER_MODE flag); +static void skhpb_rsp_map_cmd_req(struct skhpb_lu *hpb, + struct skhpb_rsp_info *rsp_info); +static void skhpb_map_loading_trigger(struct skhpb_lu *hpb, + bool only_pinned, bool do_work_handler); + +static inline void skhpb_purge_active_page(struct skhpb_lu *hpb, + struct skhpb_subregion *cp, int state); + +static void skhpb_hit_lru_info(struct skhpb_victim_select_info *lru_info, + struct skhpb_region *cb); + +static inline void skhpb_get_bit_offset( + struct skhpb_lu *hpb, int subregion_offset, + int *dword, int *offset) +{ + *dword = subregion_offset >> skhpb_bits_per_dword_shift; + *offset = subregion_offset & skhpb_bits_per_dword_mask; +} + +/* called with hpb_lock (irq) */ +static bool skhpb_ppn_dirty_check(struct skhpb_lu *hpb, + struct skhpb_subregion *cp, int subregion_offset) +{ + bool is_dirty = false; + unsigned int bit_dword, bit_offset; + + if (!cp->mctx->ppn_dirty) + return true; + + skhpb_get_bit_offset(hpb, subregion_offset, + &bit_dword, &bit_offset); + is_dirty = cp->mctx->ppn_dirty[bit_dword] & (1 << bit_offset) ? true : false; + + return is_dirty; +} + +static void skhpb_ppn_prep(struct skhpb_lu *hpb, + struct ufshcd_lrb *lrbp, skhpb_t ppn, + unsigned int sector_len) +{ + unsigned char cmd[16] = { 0 }; + + if (hpb->hba->skhpb_quirk & SKHPB_QUIRK_USE_READ_16_FOR_ENCRYPTION) + cmd[0] = READ_16; + else + cmd[0] = SKHPB_READ; + cmd[2] = lrbp->cmd->cmnd[2]; + cmd[3] = lrbp->cmd->cmnd[3]; + cmd[4] = lrbp->cmd->cmnd[4]; + cmd[5] = lrbp->cmd->cmnd[5]; + put_unaligned(ppn, (u64 *)&cmd[6]); + cmd[14] = (u8)(sector_len >> skhpb_sects_per_blk_shift); //Transfer length + if (hpb->hba->skhpb_quirk & SKHPB_QUIRK_USE_READ_16_FOR_ENCRYPTION) + cmd[15] = 0x01; //Control + else + cmd[15] = 0x00; //Control + + memcpy(lrbp->cmd->cmnd, cmd, MAX_CDB_SIZE); + memcpy(lrbp->ucd_req_ptr->sc.cdb, cmd, MAX_CDB_SIZE); + + //To verify the values within READ command + /* SKHPB_DRIVER_HEXDUMP("[HPB] HPB READ ", 16, 1, cmd, sizeof(cmd), 1); */ +} + +static inline void skhpb_set_dirty_bits(struct skhpb_lu *hpb, + struct skhpb_region *cb, struct skhpb_subregion *cp, + int dword, int offset, unsigned int count) +{ + const unsigned long mask = ((1UL << count) - 1) & 0xffffffff; + + if (cb->region_state == SKHPB_REGION_INACTIVE) + return; + + BUG_ON(!cp->mctx); + cp->mctx->ppn_dirty[dword] |= (mask << offset); +} + +/* called with hpb_lock (irq) */ +static void skhpb_set_dirty(struct skhpb_lu *hpb, + struct ufshcd_lrb *lrbp, int region, + int subregion, int subregion_offset) +{ + struct skhpb_region *cb; + struct skhpb_subregion *cp; + int count; + int bit_count, bit_dword, bit_offset; + + count = blk_rq_sectors(lrbp->cmd->request) >> skhpb_sects_per_blk_shift; + skhpb_get_bit_offset(hpb, subregion_offset, + &bit_dword, &bit_offset); + + do { + bit_count = min(count, SKHPB_BITS_PER_DWORD - bit_offset); + + cb = hpb->region_tbl + region; + cp = cb->subregion_tbl + subregion; + + skhpb_set_dirty_bits(hpb, cb, cp, + bit_dword, bit_offset, bit_count); + + bit_offset = 0; + bit_dword++; + + if (bit_dword == hpb->dwords_per_subregion) { + bit_dword = 0; + subregion++; + + if (subregion == hpb->subregions_per_region) { + subregion = 0; + region++; + } + } + + count -= bit_count; + } while (count); +} + +#if 0 +static inline bool skhpb_is_encrypted_lrbp(struct ufshcd_lrb *lrbp) +{ + return (lrbp->utr_descriptor_ptr->header.dword_0 & UTRD_CRYPTO_ENABLE); +} +#endif + +static inline enum SKHPB_CMD skhpb_get_cmd(struct ufshcd_lrb *lrbp) +{ + unsigned char cmd = lrbp->cmd->cmnd[0]; + + if (cmd == READ_10 || cmd == READ_16) + return SKHPB_CMD_READ; + if (cmd == WRITE_10 || cmd == WRITE_16) + return SKHPB_CMD_WRITE; + if (cmd == UNMAP) + return SKHPB_CMD_DISCARD; + return SKHPB_CMD_OTHERS; +} + +static inline void skhpb_get_pos_from_lpn(struct skhpb_lu *hpb, + unsigned int lpn, int *region, int *subregion, int *offset) +{ + int region_offset; + + *region = lpn >> hpb->entries_per_region_shift; + region_offset = lpn & hpb->entries_per_region_mask; + *subregion = region_offset >> hpb->entries_per_subregion_shift; + *offset = region_offset & hpb->entries_per_subregion_mask; +} + +static inline bool skhpb_check_region_subregion_validity(struct skhpb_lu *hpb, + int region, int subregion) +{ + struct skhpb_region *cb; + + if (region >= hpb->regions_per_lu) { + SKHPB_DRIVER_E("[HCM] Out of REGION range - region[%d]:MAX[%d]\n", + region, hpb->regions_per_lu); + return false; + } + cb = hpb->region_tbl + region; + if (subregion >= cb->subregion_count) { + SKHPB_DRIVER_E("[HCM] Out of SUBREGION range - subregion[i]:MAX[%d]\n", + subregion, cb->subregion_count); + return false; + } + return true; +} + + +static inline skhpb_t skhpb_get_ppn(struct skhpb_map_ctx *mctx, int pos) +{ + skhpb_t *ppn_table; + int index, offset; + + index = pos / SKHPB_ENTREIS_PER_OS_PAGE; + offset = pos % SKHPB_ENTREIS_PER_OS_PAGE; + + ppn_table = page_address(mctx->m_page[index]); + return ppn_table[offset]; +} + + +#if defined(SKHPB_READ_LARGE_CHUNK_SUPPORT) +static bool skhpb_subregion_dirty_check(struct skhpb_lu *hpb, struct skhpb_subregion *cp, + int subregion_offset, int reqBlkCnt) +{ + unsigned int bit_dword, bit_offset; + unsigned int tmp; + int checkCnt; + + if (!cp->mctx) + return true; + + if (!cp->mctx->ppn_dirty) + return true; + + skhpb_get_bit_offset(hpb, subregion_offset, + &bit_dword, &bit_offset); + + while (true) { + checkCnt = SKHPB_BITS_PER_DWORD - bit_offset; + if (cp->mctx->ppn_dirty[bit_dword]) { + tmp = cp->mctx->ppn_dirty[bit_dword] << bit_offset; + if (SKHPB_BITS_PER_DWORD - reqBlkCnt > 0) + tmp = tmp >> (SKHPB_BITS_PER_DWORD - reqBlkCnt); + if (tmp) + return true; + } + reqBlkCnt -= checkCnt; + if (reqBlkCnt <= 0) + break; + bit_dword++; + if (bit_dword >= hpb->ppn_dirties_per_subregion) + break; + bit_offset = 0; + } + return false; +} + +static bool skhpb_lc_dirty_check(struct skhpb_lu *hpb, unsigned int lpn, unsigned int rq_sectors) +{ + int reg; + int subReg; + struct skhpb_region *cb; + struct skhpb_subregion *cp; + unsigned long cur_lpn = lpn; + int subRegOffset; + int reqBlkCnt = rq_sectors >> skhpb_sects_per_blk_shift; + + do { + skhpb_get_pos_from_lpn(hpb, cur_lpn, ®, &subReg, &subRegOffset); + if (!skhpb_check_region_subregion_validity(hpb, reg, subReg)) + return true; + cb = hpb->region_tbl + reg; + cp = cb->subregion_tbl + subReg; + + if (cb->region_state == SKHPB_REGION_INACTIVE || + cp->subregion_state != SKHPB_SUBREGION_CLEAN) { + atomic64_inc(&hpb->lc_reg_subreg_miss); + return true; + } + + if (skhpb_subregion_dirty_check(hpb, cp, subRegOffset, reqBlkCnt)) { + //SKHPB_DRIVER_D("[NORMAL READ] DIRTY: Region(%d), SubRegion(%d) \n", reg, subReg); + atomic64_inc(&hpb->lc_entry_dirty_miss); + return true; + } + + if (hpb->entries_per_subregion < subRegOffset + reqBlkCnt) { + reqBlkCnt -= (hpb->entries_per_subregion - subRegOffset); + } else { + reqBlkCnt = 0; + } + + cur_lpn += reqBlkCnt; + } while (reqBlkCnt); + + return false; +} +#endif + +void skhpb_prep_fn(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct skhpb_lu *hpb; + struct skhpb_region *cb; + struct skhpb_subregion *cp; + unsigned int lpn; + skhpb_t ppn = 0; + int region, subregion, subregion_offset; + const struct request *rq; + unsigned long long rq_pos; + unsigned int rq_sectors; + unsigned char cmd; + unsigned long flags; + + /* WKLU could not be HPB-LU */ + if (lrbp->lun >= UFS_UPIU_MAX_GENERAL_LUN) + return; + + hpb = hba->skhpb_lup[lrbp->lun]; + if (!hpb || !hpb->lu_hpb_enable) + return; + + if (hpb->force_hpb_read_disable) + return; + + cmd = skhpb_get_cmd(lrbp); + if (cmd == SKHPB_CMD_OTHERS) + return; + /* + * TODO: check if ICE is not supported or not. + * + * if (cmd==SKHPB_CMD_READ && skhpb_is_encrypted_lrbp(lrbp)) + * return; + */ + rq = lrbp->cmd->request; + rq_pos = blk_rq_pos(rq); + rq_sectors = blk_rq_sectors(rq); + + lpn = rq_pos / SKHPB_SECTORS_PER_BLOCK; + skhpb_get_pos_from_lpn( + hpb, lpn, ®ion, &subregion, &subregion_offset); + if (!skhpb_check_region_subregion_validity(hpb, region, subregion)) + return; + cb = hpb->region_tbl + region; + cp = cb->subregion_tbl + subregion; + + if (cmd == SKHPB_CMD_WRITE || + cmd == SKHPB_CMD_DISCARD) { + if (cb->region_state == SKHPB_REGION_INACTIVE) { + return; + } + spin_lock_irqsave(&hpb->hpb_lock, flags); + skhpb_set_dirty(hpb, lrbp, region, subregion, + subregion_offset); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return; + } + +#if defined(SKHPB_READ_LARGE_CHUNK_SUPPORT) + if (((rq_sectors & (SKHPB_SECTORS_PER_BLOCK - 1)) != 0) || + rq_sectors > + (SKHPB_READ_LARGE_CHUNK_MAX_BLOCK_COUNT << skhpb_sects_per_blk_shift)) { +#else + if (rq_sectors != SKHPB_SECTORS_PER_BLOCK) { +#endif + atomic64_inc(&hpb->size_miss); + return; + } + + spin_lock_irqsave(&hpb->hpb_lock, flags); + if (cb->region_state == SKHPB_REGION_INACTIVE) { + atomic64_inc(&hpb->region_miss); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return; + } else if (cp->subregion_state != SKHPB_SUBREGION_CLEAN) { + atomic64_inc(&hpb->subregion_miss); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return; + } + if (rq_sectors <= SKHPB_SECTORS_PER_BLOCK) { + if (skhpb_ppn_dirty_check(hpb, cp, subregion_offset)) { + atomic64_inc(&hpb->entry_dirty_miss); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return; + } + } +#if defined(SKHPB_READ_LARGE_CHUNK_SUPPORT) + else { + if (skhpb_lc_dirty_check(hpb, lpn, rq_sectors)) { + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return; + } + atomic64_inc(&hpb->lc_hit); + } +#endif + ppn = skhpb_get_ppn(cp->mctx, subregion_offset); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + //SKHPB_DRIVER_D("XXX ufs ppn %016llx, lba %u\n", (unsigned long long)ppn, lpn); + skhpb_ppn_prep(hpb, lrbp, ppn, rq_sectors); + atomic64_inc(&hpb->hit); + return; +} + +static int skhpb_clean_dirty_bitmap( + struct skhpb_lu *hpb, struct skhpb_subregion *cp) +{ + struct skhpb_region *cb; + + cb = hpb->region_tbl + cp->region; + + /* if mctx is null, active block had been evicted out */ + if (cb->region_state == SKHPB_REGION_INACTIVE || !cp->mctx) { + SKHPB_DRIVER_D("%d - %d error already evicted\n", + cp->region, cp->subregion); + return -EINVAL; + } + + memset(cp->mctx->ppn_dirty, 0x00, + hpb->entries_per_subregion / BITS_PER_BYTE); + return 0; +} + +static void skhpb_clean_active_subregion( + struct skhpb_lu *hpb, struct skhpb_subregion *cp) +{ + struct skhpb_region *cb; + + cb = hpb->region_tbl + cp->region; + + /* if mctx is null, active block had been evicted out */ + if (cb->region_state == SKHPB_REGION_INACTIVE || !cp->mctx) { + SKHPB_DRIVER_D("%d - %d clean already evicted\n", + cp->region, cp->subregion); + return; + } + cp->subregion_state = SKHPB_SUBREGION_CLEAN; +} + +static void skhpb_error_active_subregion( + struct skhpb_lu *hpb, struct skhpb_subregion *cp) +{ + struct skhpb_region *cb; + + cb = hpb->region_tbl + cp->region; + + /* if mctx is null, active block had been evicted out */ + if (cb->region_state == SKHPB_REGION_INACTIVE || !cp->mctx) { + SKHPB_DRIVER_E("%d - %d evicted\n", cp->region, cp->subregion); + return; + } + cp->subregion_state = SKHPB_SUBREGION_DIRTY; +} + +static void skhpb_map_compl_process(struct skhpb_lu *hpb, + struct skhpb_map_req *map_req) +{ + unsigned long flags; + + SKHPB_MAP_REQ_TIME(map_req, map_req->RSP_end, 1); + SKHPB_DRIVER_D("SKHPB RB COMPL BUFFER %d - %d\n", map_req->region, map_req->subregion); + + spin_lock_irqsave(&hpb->hpb_lock, flags); + skhpb_clean_active_subregion(hpb, + hpb->region_tbl[map_req->region].subregion_tbl + + map_req->subregion); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); +} + +static void skhpb_map_inactive_compl_process(struct skhpb_lu *hpb, + struct skhpb_map_req *map_req) +{ + SKHPB_DRIVER_D("SKHPB WB COMPL BUFFER %d\n", map_req->region); + SKHPB_MAP_REQ_TIME(map_req, map_req->RSP_end, 1); +} + +/* + * Must held rsp_list_lock before enter this function + */ +static struct skhpb_rsp_info *skhpb_get_req_info(struct skhpb_lu *hpb) +{ + struct skhpb_rsp_info *rsp_info = + list_first_entry_or_null(&hpb->lh_rsp_info_free, + struct skhpb_rsp_info, + list_rsp_info); + if (!rsp_info) { + SKHPB_DRIVER_E("there is no rsp_info"); + return NULL; + } + list_del(&rsp_info->list_rsp_info); + memset(rsp_info, 0x00, sizeof(struct skhpb_rsp_info)); + + INIT_LIST_HEAD(&rsp_info->list_rsp_info); + + return rsp_info; +} + +static void skhpb_map_req_compl_fn(struct request *req, blk_status_t error) +{ + struct skhpb_map_req *map_req = req->end_io_data; + struct ufs_hba *hba; + struct skhpb_lu *hpb; + struct scsi_sense_hdr sshdr = {0}; + struct skhpb_region *cb; + struct scsi_request *scsireq = scsi_req(req); + unsigned long flags; + + /* shut up "bio leak" warning */ + memcpy(map_req->sense, scsireq->sense, SCSI_SENSE_BUFFERSIZE); + req->bio = NULL; + __blk_put_request(req->q, req); + + hpb = map_req->hpb; + hba = hpb->hba; + cb = hpb->region_tbl + map_req->region; + + if (hba->skhpb_state != SKHPB_PRESENT) + goto free_map_req; + + if (!error) { + skhpb_map_compl_process(hpb, map_req); + goto free_map_req; + } + + SKHPB_DRIVER_E("error number %d ( %d - %d )\n", + error, map_req->region, map_req->subregion); + scsi_normalize_sense(map_req->sense, + SCSI_SENSE_BUFFERSIZE, &sshdr); + SKHPB_DRIVER_E("code %x sense_key %x asc %x ascq %x\n", + sshdr.response_code, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + SKHPB_DRIVER_E("byte4 %x byte5 %x byte6 %x additional_len %x\n", + sshdr.byte4, sshdr.byte5, + sshdr.byte6, sshdr.additional_length); + atomic64_inc(&hpb->rb_fail); + + if (sshdr.sense_key == ILLEGAL_REQUEST) { + if (sshdr.asc == 0x00 && sshdr.ascq == 0x16) { + /* OPERATION IN PROGRESS */ + SKHPB_DRIVER_E("retry rb %d - %d", + map_req->region, map_req->subregion); + + spin_lock_irqsave(&hpb->map_list_lock, flags); + INIT_LIST_HEAD(&map_req->list_map_req); + list_add_tail(&map_req->list_map_req, &hpb->lh_map_req_retry); + spin_unlock_irqrestore(&hpb->map_list_lock, flags); + + schedule_delayed_work(&hpb->skhpb_map_req_retry_work, + msecs_to_jiffies(5000)); + return; + } + } + // Only change subregion status at here. + // Do not put map_ctx, it will re-use when it is activated again. + spin_lock_irqsave(&hpb->hpb_lock, flags); + skhpb_error_active_subregion(hpb, cb->subregion_tbl + map_req->subregion); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); +free_map_req: + spin_lock_irqsave(&hpb->map_list_lock, flags); + INIT_LIST_HEAD(&map_req->list_map_req); + list_add_tail(&map_req->list_map_req, &hpb->lh_map_req_free); + spin_unlock_irqrestore(&hpb->map_list_lock, flags); + atomic64_dec(&hpb->alloc_map_req_cnt); +} + +static void skhpb_map_inactive_req_compl_fn( + struct request *req, blk_status_t error) +{ + struct skhpb_map_req *map_req = req->end_io_data; + struct ufs_hba *hba; + struct skhpb_lu *hpb; + struct scsi_sense_hdr sshdr; + struct skhpb_region *cb; + struct scsi_request *scsireq = scsi_req(req); + unsigned long flags; + + /* shut up "bio leak" warning */ + memcpy(map_req->sense, scsireq->sense, SCSI_SENSE_BUFFERSIZE); + req->bio = NULL; + __blk_put_request(req->q, req); + + hpb = map_req->hpb; + hba = hpb->hba; + cb = hpb->region_tbl + map_req->region; + + if (hba->skhpb_state != SKHPB_PRESENT) + goto free_map_req; + + if (!error) { + skhpb_map_inactive_compl_process(hpb, map_req); + goto free_map_req; + } + + SKHPB_DRIVER_E("error number %d ( %d - %d )", + error, map_req->region, map_req->subregion); + scsi_normalize_sense(map_req->sense, SCSI_SENSE_BUFFERSIZE, + &sshdr); + SKHPB_DRIVER_E("code %x sense_key %x asc %x ascq %x", + sshdr.response_code, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + SKHPB_DRIVER_E("byte4 %x byte5 %x byte6 %x additional_len %x", + sshdr.byte4, sshdr.byte5, + sshdr.byte6, sshdr.additional_length); + atomic64_inc(&hpb->rb_fail); + + if (sshdr.sense_key == ILLEGAL_REQUEST) { + if (cb->is_pinned) { + SKHPB_DRIVER_E("WRITE_BUFFER is not allowed on pinned area: region#%d", + cb->region); + } else { + if (sshdr.asc == 0x00 && sshdr.ascq == 0x16) { + /* OPERATION IN PROGRESS */ + SKHPB_DRIVER_E("retry wb %d", map_req->region); + + spin_lock(&hpb->map_list_lock); + INIT_LIST_HEAD(&map_req->list_map_req); + list_add_tail(&map_req->list_map_req, &hpb->lh_map_req_retry); + spin_unlock(&hpb->map_list_lock); + + schedule_delayed_work(&hpb->skhpb_map_req_retry_work, + msecs_to_jiffies(5000)); + return; + } + } + } + // Only change subregion status at here. + // Do not put map_ctx, it will re-use when it is activated again. + spin_lock_irqsave(&hpb->hpb_lock, flags); + skhpb_error_active_subregion(hpb, cb->subregion_tbl + map_req->subregion); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); +free_map_req: + spin_lock(&hpb->map_list_lock); + INIT_LIST_HEAD(&map_req->list_map_req); + list_add_tail(&map_req->list_map_req, &hpb->lh_map_req_free); + spin_unlock(&hpb->map_list_lock); + atomic64_dec(&hpb->alloc_map_req_cnt); +} + +static int skhpb_execute_req_dev_ctx(struct skhpb_lu *hpb, + unsigned char *cmd, void *buf, int length) +{ + unsigned long flags; + struct scsi_sense_hdr sshdr = {0}; + struct scsi_device *sdp; + struct ufs_hba *hba = hpb->hba; + int ret = 0; + + spin_lock_irqsave(hba->host->host_lock, flags); + sdp = hba->sdev_ufs_lu[hpb->lun]; + if (sdp) { + ret = scsi_device_get(sdp); + if (!ret && !scsi_device_online(sdp)) { + ret = -ENODEV; + scsi_device_put(sdp); + } else if (!ret) { + hba->issue_ioctl = true; + } + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ret) + return ret; + + ret = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, + buf, length, &sshdr, + msecs_to_jiffies(30000), 3, NULL); + spin_lock_irqsave(hba->host->host_lock, flags); + hba->issue_ioctl = false; + spin_unlock_irqrestore(hba->host->host_lock, flags); + scsi_device_put(sdp); + return ret; +} + +static inline void skhpb_set_read_dev_ctx_cmd(unsigned char *cmd, int lba, + int length) +{ + cmd[0] = READ_10; + cmd[1] = 0x02; + cmd[2] = SKHPB_GET_BYTE_3(lba); + cmd[3] = SKHPB_GET_BYTE_2(lba); + cmd[4] = SKHPB_GET_BYTE_1(lba); + cmd[5] = SKHPB_GET_BYTE_0(lba); + cmd[6] = SKHPB_GET_BYTE_2(length); + cmd[7] = SKHPB_GET_BYTE_1(length); + cmd[8] = SKHPB_GET_BYTE_0(length); +} + +int skhpb_issue_req_dev_ctx(struct skhpb_lu *hpb, unsigned char *buf, + int buf_length) +{ + unsigned char cmd[10] = { 0 }; + int cmd_len = buf_length >> PAGE_SHIFT; + int ret = 0; + + skhpb_set_read_dev_ctx_cmd(cmd, 0x48504230, cmd_len); + + ret = skhpb_execute_req_dev_ctx(hpb, cmd, buf, buf_length); + if (ret < 0) + SKHPB_DRIVER_E("failed with err %d\n", ret); + return ret; +} + +static inline void skhpb_set_write_buf_cmd(unsigned char *cmd, int region) +{ + cmd[0] = SKHPB_WRITE_BUFFER; + cmd[1] = 0x01; + cmd[2] = SKHPB_GET_BYTE_1(region); + cmd[3] = SKHPB_GET_BYTE_0(region); + cmd[4] = 0x00; + cmd[5] = 0x00; + cmd[6] = 0x00; + cmd[7] = 0x00; + cmd[8] = 0x00; + cmd[9] = 0x00; + + //To verify the values within WRITE_BUFFER command + SKHPB_DRIVER_HEXDUMP("[HPB] WRITE BUFFER ", 16, 1, cmd, 10, 1); +} + +static inline void skhpb_set_read_buf_cmd(unsigned char *cmd, + int region, int subregion, int subregion_mem_size) +{ + cmd[0] = SKHPB_READ_BUFFER; + cmd[1] = 0x01; + cmd[2] = SKHPB_GET_BYTE_1(region); + cmd[3] = SKHPB_GET_BYTE_0(region); + cmd[4] = SKHPB_GET_BYTE_1(subregion); + cmd[5] = SKHPB_GET_BYTE_0(subregion); + cmd[6] = SKHPB_GET_BYTE_2(subregion_mem_size); + cmd[7] = SKHPB_GET_BYTE_1(subregion_mem_size); + cmd[8] = SKHPB_GET_BYTE_0(subregion_mem_size); + cmd[9] = 0x00; + + //To verify the values within READ_BUFFER command + SKHPB_DRIVER_HEXDUMP("[HPB] READ BUFFER ", 16, 1, cmd, 10, 1); +} + +static int skhpb_add_bio_page(struct skhpb_lu *hpb, + struct request_queue *q, struct bio *bio, struct bio_vec *bvec, + struct skhpb_map_ctx *mctx) +{ + struct page *page = NULL; + int i, ret; + + bio_init(bio, bvec, hpb->mpages_per_subregion); + + for (i = 0; i < hpb->mpages_per_subregion; i++) { + page = mctx->m_page[i]; + if (!page) + return -ENOMEM; + + ret = bio_add_pc_page(q, bio, page, hpb->mpage_bytes, 0); + if (ret != hpb->mpage_bytes) { + SKHPB_DRIVER_E("error ret %d\n", ret); + return -EINVAL; + } + } + return 0; +} + +static int skhpb_map_req_issue( + struct skhpb_lu *hpb, struct skhpb_map_req *map_req) +{ + struct request_queue *q = hpb->hba->sdev_ufs_lu[hpb->lun]->request_queue; + struct request *req; + struct scsi_request *scsireq; + unsigned char cmd[10] = { 0 }; + int ret; + unsigned long flags; + + if (map_req->rwbuffer_flag == W_BUFFER) + skhpb_set_write_buf_cmd(cmd, map_req->region); + else + skhpb_set_read_buf_cmd(cmd, map_req->region, map_req->subregion, + map_req->subregion_mem_size); + if (map_req->rwbuffer_flag == W_BUFFER) + req = blk_get_request(q, REQ_OP_SCSI_OUT, __GFP_RECLAIM); + else + req = blk_get_request(q, REQ_OP_SCSI_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + int rv = PTR_ERR(req); + + if (map_req->rwbuffer_flag == W_BUFFER) + SKHPB_DRIVER_E("blk_get_request errno %d, \ + retry #%d, WRITE BUFFER %d\n", + rv, map_req->retry_cnt, map_req->region); + else + SKHPB_DRIVER_E("blk_get_request errno %d, \ + retry #%d, READ BUFFER %d:%d\n", + rv, map_req->retry_cnt, + map_req->region, map_req->subregion); + + if (map_req->retry_cnt == 10) { + /* give up */ + return rv; + } + + spin_lock_irqsave(&hpb->map_list_lock, flags); + list_add_tail(&map_req->list_map_req, &hpb->lh_map_req_retry); + spin_unlock_irqrestore(&hpb->map_list_lock, flags); + + schedule_delayed_work(&hpb->skhpb_map_req_retry_work, msecs_to_jiffies(10)); + + return 0; + } + scsireq = scsi_req(req); + + scsireq->cmd_len = COMMAND_SIZE(cmd[0]); + BUG_ON(scsireq->cmd_len > sizeof(scsireq->__cmd)); + scsireq->cmd = scsireq->__cmd; + memcpy(scsireq->cmd, cmd, scsireq->cmd_len); + + req->rq_flags |= RQF_QUIET | RQF_PREEMPT; + req->timeout = msecs_to_jiffies(30000); + req->end_io_data = map_req; + + if (map_req->rwbuffer_flag == R_BUFFER) { + ret = skhpb_add_bio_page( + hpb, q, &map_req->bio, map_req->bvec, map_req->mctx); + if (ret) { + SKHPB_DRIVER_E("skhpb_add_bio_page_error %d\n", ret); + goto out_put_request; + } + map_req->pbio = &map_req->bio; + blk_rq_append_bio(req, &map_req->pbio); + } + SKHPB_DRIVER_D("issue map_request: %d - %d\n", + map_req->region, map_req->subregion); + + SKHPB_MAP_REQ_TIME(map_req, map_req->RSP_issue, 0); + if (hpb->hpb_control_mode == HOST_CTRL_MODE) { + if (map_req->rwbuffer_flag == W_BUFFER) + blk_execute_rq_nowait( + q, NULL, req, 0, skhpb_map_inactive_req_compl_fn); + else + blk_execute_rq_nowait(q, NULL, req, 0, skhpb_map_req_compl_fn); + } else + blk_execute_rq_nowait(q, NULL, req, 1, skhpb_map_req_compl_fn); + + if (map_req->rwbuffer_flag == W_BUFFER) + atomic64_inc(&hpb->w_map_req_cnt); + else + atomic64_inc(&hpb->map_req_cnt); + + return 0; + +out_put_request: + blk_put_request(req); + return ret; +} + +static int skhpb_set_map_req(struct skhpb_lu *hpb, + int region, int subregion, struct skhpb_map_ctx *mctx, + struct skhpb_rsp_info *rsp_info, + enum SKHPB_BUFFER_MODE flag) +{ + bool last = hpb->region_tbl[region].subregion_tbl[subregion].last; + struct skhpb_map_req *map_req; + unsigned long flags; + + spin_lock_irqsave(&hpb->map_list_lock, flags); + map_req = list_first_entry_or_null(&hpb->lh_map_req_free, + struct skhpb_map_req, + list_map_req); + if (!map_req) { + SKHPB_DRIVER_E("There is no map_req\n"); + spin_unlock_irqrestore(&hpb->map_list_lock, flags); + return -ENOMEM; + } + list_del(&map_req->list_map_req); + spin_unlock_irqrestore(&hpb->map_list_lock, flags); + atomic64_inc(&hpb->alloc_map_req_cnt); + + memset(map_req, 0x00, sizeof(struct skhpb_map_req)); + + map_req->hpb = hpb; + map_req->region = region; + map_req->subregion = subregion; + map_req->subregion_mem_size = + last ? hpb->last_subregion_mem_size : hpb->subregion_mem_size; + map_req->mctx = mctx; + map_req->lun = hpb->lun; + map_req->RSP_start = rsp_info->RSP_start; + if (flag == W_BUFFER) { + map_req->rwbuffer_flag = W_BUFFER; + } else + map_req->rwbuffer_flag = R_BUFFER; + + if (skhpb_map_req_issue(hpb, map_req)) { + SKHPB_DRIVER_E("issue Failed!!!\n"); + return -ENOMEM; + } + + return 0; +} + +static struct skhpb_map_ctx *skhpb_get_map_ctx(struct skhpb_lu *hpb) +{ + struct skhpb_map_ctx *mctx; + + mctx = list_first_entry_or_null(&hpb->lh_map_ctx, + struct skhpb_map_ctx, list_table); + if (mctx) { + list_del_init(&mctx->list_table); + hpb->debug_free_table--; + return mctx; + } + return ERR_PTR(-ENOMEM); +} + +static inline void skhpb_add_lru_info(struct skhpb_victim_select_info *lru_info, + struct skhpb_region *cb) +{ + cb->region_state = SKHPB_REGION_ACTIVE; + list_add_tail(&cb->list_region, &lru_info->lru); + atomic64_inc(&lru_info->active_count); +} + +static inline int skhpb_add_region(struct skhpb_lu *hpb, + struct skhpb_region *cb) +{ + struct skhpb_victim_select_info *lru_info; + int subregion; + int err = 0; + + lru_info = &hpb->lru_info; + + //SKHPB_DRIVER_D("E->active region: %d", cb->region); + + for (subregion = 0; subregion < cb->subregion_count; subregion++) { + struct skhpb_subregion *cp; + + cp = cb->subregion_tbl + subregion; + cp->mctx = skhpb_get_map_ctx(hpb); + if (IS_ERR(cp->mctx)) { + err = PTR_ERR(cp->mctx); + goto out; + } + cp->subregion_state = SKHPB_SUBREGION_DIRTY; + } + if (!cb->is_pinned) + skhpb_add_lru_info(lru_info, cb); + + atomic64_inc(&hpb->region_add); +out: + if (err) + SKHPB_DRIVER_E("get mctx failed. err %d subregion %d free_table %d\n", + err, subregion, hpb->debug_free_table); + return err; +} + +static inline void skhpb_put_map_ctx( + struct skhpb_lu *hpb, struct skhpb_map_ctx *mctx) +{ + list_add(&mctx->list_table, &hpb->lh_map_ctx); + hpb->debug_free_table++; +} + +static inline void skhpb_purge_active_page(struct skhpb_lu *hpb, + struct skhpb_subregion *cp, int state) +{ + if (state == SKHPB_SUBREGION_UNUSED) { + skhpb_put_map_ctx(hpb, cp->mctx); + cp->mctx = NULL; + } + cp->subregion_state = state; +} + +static inline void skhpb_cleanup_lru_info( + struct skhpb_victim_select_info *lru_info, + struct skhpb_region *cb) +{ + list_del_init(&cb->list_region); + cb->region_state = SKHPB_REGION_INACTIVE; + cb->hit_count = 0; + atomic64_dec(&lru_info->active_count); +} + +static inline void skhpb_evict_region(struct skhpb_lu *hpb, + struct skhpb_region *cb) +{ + struct skhpb_victim_select_info *lru_info; + struct skhpb_subregion *cp; + int subregion; + + // If the maximum value is exceeded at the time of region addition, + // it may have already been processed. + if (cb->region_state == SKHPB_REGION_INACTIVE) { + SKHPB_DRIVER_D("Region:%d was already inactivated.\n", + cb->region); + return; + } + lru_info = &hpb->lru_info; + + //SKHPB_DRIVER_D("C->EVICT region: %d\n", cb->region); + + skhpb_cleanup_lru_info(lru_info, cb); + atomic64_inc(&hpb->region_evict); + for (subregion = 0; subregion < cb->subregion_count; subregion++) { + cp = cb->subregion_tbl + subregion; + + skhpb_purge_active_page(hpb, cp, SKHPB_SUBREGION_UNUSED); + } +} + +static void skhpb_hit_lru_info(struct skhpb_victim_select_info *lru_info, + struct skhpb_region *cb) +{ + switch (lru_info->selection_type) { + case TYPE_LRU: + list_move_tail(&cb->list_region, &lru_info->lru); + break; + case TYPE_LFU: + if (cb->hit_count != 0xffffffff) + cb->hit_count++; + + list_move_tail(&cb->list_region, &lru_info->lru); + break; + default: + break; + } +} + +static struct skhpb_region *skhpb_victim_lru_info( + struct skhpb_victim_select_info *lru_info) +{ + struct skhpb_region *cb; + struct skhpb_region *victim_cb = NULL; + u32 hit_count = 0xffffffff; + + switch (lru_info->selection_type) { + case TYPE_LRU: + victim_cb = list_first_entry(&lru_info->lru, + struct skhpb_region, list_region); + break; + case TYPE_LFU: + list_for_each_entry(cb, &lru_info->lru, list_region) { + if (hit_count > cb->hit_count) { + hit_count = cb->hit_count; + victim_cb = cb; + } + } + break; + default: + break; + } + return victim_cb; +} + +static int skhpb_check_lru_evict(struct skhpb_lu *hpb, struct skhpb_region *cb) +{ + struct skhpb_victim_select_info *lru_info = &hpb->lru_info; + struct skhpb_region *victim_cb; + unsigned long flags; + + if (cb->is_pinned) + return 0; + + spin_lock_irqsave(&hpb->hpb_lock, flags); + if (!list_empty(&cb->list_region)) { + skhpb_hit_lru_info(lru_info, cb); + goto out; + } + + if (cb->region_state != SKHPB_REGION_INACTIVE) + goto out; + + if (atomic64_read(&lru_info->active_count) == lru_info->max_lru_active_count) { + + victim_cb = skhpb_victim_lru_info(lru_info); + + if (!victim_cb) { + SKHPB_DRIVER_E("SKHPB victim_cb is NULL\n"); + goto unlock_error; + } + SKHPB_DRIVER_D("max lru case. victim : %d\n", victim_cb->region); + skhpb_evict_region(hpb, victim_cb); + } + if (skhpb_add_region(hpb, cb)) { + SKHPB_DRIVER_E("SKHPB memory allocation failed\n"); + goto unlock_error; + } + + +out: + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return 0; +unlock_error: + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + return -ENOMEM; +} + +static int skhpb_evict_load_region(struct skhpb_lu *hpb, + struct skhpb_rsp_info *rsp_info) +{ + struct skhpb_region *cb; + int region, iter; + unsigned long flags; + + for (iter = 0; iter < rsp_info->inactive_cnt; iter++) { + region = rsp_info->inactive_list.region[iter]; + cb = hpb->region_tbl + region; + + if (cb->is_pinned) { + /* + * Pinned active-block should not drop-out. + * But if so, it would treat error as critical, + * and it will run skhpb_eh_work + */ + SKHPB_DRIVER_E("SKHPB pinned active-block drop-out error\n"); + return -ENOMEM; + } + if (list_empty(&cb->list_region)) + continue; + + spin_lock_irqsave(&hpb->hpb_lock, flags); + skhpb_evict_region(hpb, cb); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + } + + for (iter = 0; iter < rsp_info->active_cnt; iter++) { + region = rsp_info->active_list.region[iter]; + cb = hpb->region_tbl + region; + if (skhpb_check_lru_evict(hpb, cb)) + return -ENOMEM; + } + return 0; +} + +static inline struct skhpb_rsp_field *skhpb_get_hpb_rsp( + struct ufshcd_lrb *lrbp) +{ + return (struct skhpb_rsp_field *)&lrbp->ucd_rsp_ptr->sr.sense_data_len; +} + +static void skhpb_rsp_map_cmd_req(struct skhpb_lu *hpb, + struct skhpb_rsp_info *rsp_info) +{ + struct skhpb_region *cb; + struct skhpb_subregion *cp; + int region, subregion; + int iter; + int ret; + unsigned long flags; + + /* + * Before Issue read buffer CMD for active active block, + * prepare the memory from memory pool. + */ + ret = skhpb_evict_load_region(hpb, rsp_info); + if (ret) { + SKHPB_DRIVER_E("region evict/load failed. ret %d\n", ret); + goto wakeup_ee_worker; + } + for (iter = 0; iter < rsp_info->active_cnt; iter++) { + region = rsp_info->active_list.region[iter]; + subregion = rsp_info->active_list.subregion[iter]; + cb = hpb->region_tbl + region; + + if (region >= hpb->regions_per_lu || + subregion >= cb->subregion_count) { + SKHPB_DRIVER_E("skhpb issue-map %d - %d range error\n", + region, subregion); + goto wakeup_ee_worker; + } + + cp = cb->subregion_tbl + subregion; + + /* + * if subregion_state set SKHPB_SUBREGION_ISSUED, + * active_page has already been added to list, + * so it just ends function. + */ + spin_lock_irqsave(&hpb->hpb_lock, flags); + if (cp->subregion_state == SKHPB_SUBREGION_ISSUED) { + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + continue; + } + + cp->subregion_state = SKHPB_SUBREGION_ISSUED; + + ret = skhpb_clean_dirty_bitmap(hpb, cp); + + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + if (ret) + continue; + + if (!hpb->hba->sdev_ufs_lu[hpb->lun] || + !hpb->hba->sdev_ufs_lu[hpb->lun]->request_queue) + return; + ret = skhpb_set_map_req(hpb, region, subregion, + cp->mctx, rsp_info, R_BUFFER); + SKHPB_DRIVER_D("SEND READ_BUFFER - Region:%d, SubRegion:%d\n", + region, subregion); + if (ret) { + SKHPB_DRIVER_E("skhpb_set_map_req error %d\n", ret); + goto wakeup_ee_worker; + } + } + return; + +wakeup_ee_worker: + hpb->hba->skhpb_state = SKHPB_FAILED; + schedule_work(&hpb->hba->skhpb_eh_work); +} + +/* routine : isr (ufs) */ +void skhpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct skhpb_lu *hpb; + struct skhpb_rsp_field *rsp_field; + struct skhpb_rsp_field sense_data; + struct skhpb_rsp_info *rsp_info; + int data_seg_len, num, blk_idx, update_alert; + + update_alert = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) + & MASK_RSP_UPIU_HPB_UPDATE_ALERT; + data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) + & MASK_RSP_UPIU_DATA_SEG_LEN; + if (!update_alert || !data_seg_len) { + bool do_tasklet = false; + + if (lrbp->lun >= UFS_UPIU_MAX_GENERAL_LUN) + return; + + hpb = hba->skhpb_lup[lrbp->lun]; + if (!hpb) + return; + + spin_lock(&hpb->rsp_list_lock); + do_tasklet = !list_empty(&hpb->lh_rsp_info); + spin_unlock(&hpb->rsp_list_lock); + + if (do_tasklet) + schedule_work(&hpb->skhpb_rsp_work); + return; + } + + memcpy(&sense_data, &lrbp->ucd_rsp_ptr->sr.sense_data_len, + sizeof(struct skhpb_rsp_field)); + rsp_field = &sense_data; + if ((get_unaligned_be16(rsp_field->sense_data_len + 0) + != SKHPB_DEV_SENSE_SEG_LEN) || + rsp_field->desc_type != SKHPB_DEV_DES_TYPE || + rsp_field->additional_len != SKHPB_DEV_ADDITIONAL_LEN || + rsp_field->hpb_type == SKHPB_RSP_NONE || + rsp_field->active_region_cnt > SKHPB_MAX_ACTIVE_NUM || + rsp_field->inactive_region_cnt > SKHPB_MAX_INACTIVE_NUM) + return; + + if (rsp_field->lun >= UFS_UPIU_MAX_GENERAL_LUN) { + SKHPB_DRIVER_E("lun is not general = %d", rsp_field->lun); + return; + } + hpb = hba->skhpb_lup[rsp_field->lun]; + if (!hpb) { + SKHPB_DRIVER_E("UFS-LU%d is not SKHPB LU\n", rsp_field->lun); + return; + } + if (hpb->force_map_req_disable) + return; + SKHPB_DRIVER_D("HPB-Info Noti: %d LUN: %d Seg-Len %d, Req_type = %d\n", + rsp_field->hpb_type, rsp_field->lun, + data_seg_len, rsp_field->hpb_type); + if (!hpb->lu_hpb_enable) { + SKHPB_DRIVER_E("LU(%d) not HPB-LU\n", rsp_field->lun); + return; + } + //To verify the values within RESPONSE UPIU. + SKHPB_DRIVER_HEXDUMP("[HPB] RESP UPIU ", 16, 1, + lrbp->ucd_rsp_ptr, sizeof(struct utp_upiu_rsp), 1); + //If wLUMaxActiveHPBRegions == wNumHPBPinnedRegions + if (hpb->lru_info.max_lru_active_count == 0) { + SKHPB_DRIVER_D("max_lru_active_count is 0"); + return; + } + + switch (rsp_field->hpb_type) { + case SKHPB_RSP_REQ_REGION_UPDATE: + atomic64_inc(&hpb->rb_noti_cnt); + WARN_ON(data_seg_len != SKHPB_DEV_DATA_SEG_LEN); + + spin_lock(&hpb->rsp_list_lock); + rsp_info = skhpb_get_req_info(hpb); + spin_unlock(&hpb->rsp_list_lock); + if (!rsp_info) + return; + rsp_info->type = SKHPB_RSP_REQ_REGION_UPDATE; + + for (num = 0; num < rsp_field->active_region_cnt; num++) { + blk_idx = num * SKHPB_PER_ACTIVE_INFO_BYTES; + rsp_info->active_list.region[num] = + get_unaligned_be16(rsp_field->hpb_active_field + blk_idx); + rsp_info->active_list.subregion[num] = + get_unaligned_be16(rsp_field->hpb_active_field + + blk_idx + 2); + SKHPB_DRIVER_D("active num: %d, #block: %d, page#: %d\n", + num + 1, + rsp_info->active_list.region[num], + rsp_info->active_list.subregion[num]); + } + rsp_info->active_cnt = num; + + for (num = 0; num < rsp_field->inactive_region_cnt; num++) { + blk_idx = num * SKHPB_PER_INACTIVE_INFO_BYTES; + rsp_info->inactive_list.region[num] = + get_unaligned_be16(rsp_field->hpb_inactive_field + blk_idx); + SKHPB_DRIVER_D("inactive num: %d, #block: %d\n", + num + 1, rsp_info->inactive_list.region[num]); + } + rsp_info->inactive_cnt = num; + + SKHPB_DRIVER_D("active cnt: %d, inactive cnt: %d\n", + rsp_info->active_cnt, rsp_info->inactive_cnt); + SKHPB_DRIVER_D("add_list %p -> %p\n", + rsp_info, &hpb->lh_rsp_info); + + spin_lock(&hpb->rsp_list_lock); + list_add_tail(&rsp_info->list_rsp_info, &hpb->lh_rsp_info); + spin_unlock(&hpb->rsp_list_lock); + + schedule_work(&hpb->skhpb_rsp_work); + break; + + case SKHPB_RSP_HPB_RESET: + for (num = 0 ; num < UFS_UPIU_MAX_GENERAL_LUN ; num++) { + hpb = hba->skhpb_lup[num]; + if (!hpb || !hpb->lu_hpb_enable) + continue; + atomic64_inc(&hpb->reset_noti_cnt); + + spin_lock(&hpb->rsp_list_lock); + rsp_info = skhpb_get_req_info(hpb); + spin_unlock(&hpb->rsp_list_lock); + if (!rsp_info) + return; + + rsp_info->type = SKHPB_RSP_HPB_RESET; + + spin_lock(&hpb->rsp_list_lock); + list_add_tail(&rsp_info->list_rsp_info, &hpb->lh_rsp_info); + spin_unlock(&hpb->rsp_list_lock); + + schedule_work(&hpb->skhpb_rsp_work); + } + break; + + default: + SKHPB_DRIVER_E("hpb_type is not available : %d\n", + rsp_field->hpb_type); + break; + } +} + +static int skhpb_read_desc(struct ufs_hba *hba, + u8 desc_id, u8 desc_index, u8 *desc_buf, u32 size) +{ + int err = 0; + + err = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, desc_buf, &size); + if (err) { + SKHPB_DRIVER_E("reading Device Desc failed. err = %d\n", err); + } + return err; +} + +static int skhpb_read_device_desc( + struct ufs_hba *hba, u8 *desc_buf, u32 size) +{ + return skhpb_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf, size); +} + +static int skhpb_read_geo_desc(struct ufs_hba *hba, u8 *desc_buf, u32 size) +{ + return skhpb_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0, + desc_buf, size); +} + +static int skhpb_read_unit_desc(struct ufs_hba *hba, int lun, + u8 *desc_buf, u32 size) +{ + return skhpb_read_desc(hba, QUERY_DESC_IDN_UNIT, + lun, desc_buf, size); +} + +static inline void skhpb_add_subregion_to_req_list(struct skhpb_lu *hpb, + struct skhpb_subregion *cp) +{ + list_add_tail(&cp->list_subregion, &hpb->lh_subregion_req); + cp->subregion_state = SKHPB_SUBREGION_ISSUED; +} + +static int skhpb_execute_req(struct skhpb_lu *hpb, unsigned char *cmd, + struct skhpb_subregion *cp) +{ + struct ufs_hba *hba = hpb->hba; + struct scsi_device *sdp; + struct request_queue *q; + struct request *req; + struct scsi_request *scsireq; + struct bio bio; + struct bio *pbio = &bio; + struct scsi_sense_hdr sshdr = {0}; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(hba->host->host_lock, flags); + sdp = hba->sdev_ufs_lu[hpb->lun]; + if (sdp) { + ret = scsi_device_get(sdp); + if (!ret && !scsi_device_online(sdp)) { + ret = -ENODEV; + scsi_device_put(sdp); + } + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ret) + return ret; + + q = sdp->request_queue; + + req = blk_get_request(q, REQ_OP_SCSI_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + goto out_put; + } + scsireq = scsi_req(req); + + scsireq->cmd_len = COMMAND_SIZE(cmd[0]); + if (scsireq->cmd_len > sizeof(scsireq->__cmd)) { + scsireq->cmd = kmalloc(scsireq->cmd_len, __GFP_RECLAIM); + if (!scsireq->cmd) { + ret = -ENOMEM; + goto out_put_request; + } + } else { + scsireq->cmd = scsireq->__cmd; + } + memcpy(scsireq->cmd, cmd, scsireq->cmd_len); + + scsireq->retries = 3; + req->timeout = msecs_to_jiffies(10000); + req->rq_flags |= RQF_QUIET | RQF_PREEMPT; + + ret = skhpb_add_bio_page(hpb, q, &bio, hpb->bvec, cp->mctx); + if (ret) + goto out_put_scsi_req; + blk_rq_append_bio(req, &pbio); + + blk_execute_rq(q, NULL, req, 1); + + if (scsireq->result) { + scsi_normalize_sense(scsireq->sense, SCSI_SENSE_BUFFERSIZE, &sshdr); + SKHPB_DRIVER_E("code %x sense_key %x asc %x ascq %x", + sshdr.response_code, sshdr.sense_key, sshdr.asc, + sshdr.ascq); + SKHPB_DRIVER_E("byte4 %x byte5 %x byte6 %x additional_len %x", + sshdr.byte4, sshdr.byte5, sshdr.byte6, + sshdr.additional_length); + spin_lock_irqsave(&hpb->hpb_lock, flags); + skhpb_error_active_subregion(hpb, cp); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + ret = -EIO; + } else { + spin_lock_irqsave(&hpb->hpb_lock, flags); + ret = skhpb_clean_dirty_bitmap(hpb, cp); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + if (ret) { + SKHPB_DRIVER_E("skhpb_clean_dirty_bitmap error %d", ret); + } + ret = 0; + } + +out_put_scsi_req: + scsi_req_free_cmd(scsireq); +out_put_request: + blk_put_request(req); +out_put: + scsi_device_put(sdp); + return ret; +} + +static int skhpb_issue_map_req_from_list(struct skhpb_lu *hpb) +{ + struct skhpb_subregion *cp, *next_cp; + int ret; + unsigned long flags; + + LIST_HEAD(req_list); + + spin_lock_irqsave(&hpb->hpb_lock, flags); + list_splice_init(&hpb->lh_subregion_req, &req_list); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + + list_for_each_entry_safe(cp, next_cp, &req_list, list_subregion) { + int subregion_mem_size = + cp->last ? hpb->last_subregion_mem_size : hpb->subregion_mem_size; + unsigned char cmd[10] = { 0 }; + + skhpb_set_read_buf_cmd(cmd, cp->region, cp->subregion, + subregion_mem_size); + + SKHPB_DRIVER_D("issue map_request: %d - %d/%d\n", + cp->region, cp->subregion, subregion_mem_size); + + ret = skhpb_execute_req(hpb, cmd, cp); + if (ret < 0) { + SKHPB_DRIVER_E("region %d sub %d failed with err %d", + cp->region, cp->subregion, ret); + continue; + } + + spin_lock_irqsave(&hpb->hpb_lock, flags); + skhpb_clean_active_subregion(hpb, cp); + list_del_init(&cp->list_subregion); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + } + + return 0; +} + +static void skhpb_pinned_work_handler(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct skhpb_lu *hpb = + container_of(dwork, struct skhpb_lu, skhpb_pinned_work); + int ret; + + SKHPB_DRIVER_D("worker start\n"); + + if (!list_empty(&hpb->lh_subregion_req)) { + pm_runtime_get_sync(SKHPB_DEV(hpb)); + ret = skhpb_issue_map_req_from_list(hpb); + /* + * if its function failed at init time, + * skhpb-device will request map-req, + * so it is not critical-error, and just finish work-handler + */ + if (ret) + SKHPB_DRIVER_E("failed map-issue. ret %d\n", ret); + pm_runtime_mark_last_busy(SKHPB_DEV(hpb)); + pm_runtime_put_noidle(SKHPB_DEV(hpb)); + } + + SKHPB_DRIVER_D("worker end\n"); +} + +static void skhpb_map_req_retry_work_handler(struct work_struct *work) +{ + struct skhpb_lu *hpb; + struct delayed_work *dwork = to_delayed_work(work); + struct skhpb_map_req *map_req; + int ret = 0; + unsigned long flags; + + LIST_HEAD(retry_list); + + hpb = container_of(dwork, struct skhpb_lu, skhpb_map_req_retry_work); + SKHPB_DRIVER_D("retry worker start"); + + spin_lock_irqsave(&hpb->map_list_lock, flags); + list_splice_init(&hpb->lh_map_req_retry, &retry_list); + spin_unlock_irqrestore(&hpb->map_list_lock, flags); + + while (1) { + map_req = list_first_entry_or_null(&retry_list, + struct skhpb_map_req, list_map_req); + if (!map_req) { + SKHPB_DRIVER_D("There is no map_req"); + break; + } + list_del(&map_req->list_map_req); + + map_req->retry_cnt++; + + ret = skhpb_map_req_issue(hpb, map_req); + if (ret) { + SKHPB_DRIVER_E("skhpb_map_req_issue error %d", ret); + goto wakeup_ee_worker; + } + } + SKHPB_DRIVER_D("worker end"); + return; + +wakeup_ee_worker: + hpb->hba->skhpb_state = SKHPB_FAILED; + schedule_work(&hpb->hba->skhpb_eh_work); +} + +static void skhpb_delayed_rsp_work_handler(struct work_struct *work) +{ + struct skhpb_lu *hpb = container_of(work, struct skhpb_lu, skhpb_rsp_work); + struct skhpb_rsp_info *rsp_info; + unsigned long flags; + + SKHPB_DRIVER_D("rsp_work enter"); + + while (1) { + if (hpb->hba->clk_gating.is_suspended) { + SKHPB_DRIVER_D("rsp_work break"); + break; + } + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + rsp_info = list_first_entry_or_null(&hpb->lh_rsp_info, + struct skhpb_rsp_info, list_rsp_info); + if (!rsp_info) { + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + break; + } + + SKHPB_RSP_TIME(rsp_info->RSP_start); + + list_del_init(&rsp_info->list_rsp_info); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + switch (rsp_info->type) { + case SKHPB_RSP_REQ_REGION_UPDATE: + skhpb_rsp_map_cmd_req(hpb, rsp_info); + break; + + case SKHPB_RSP_HPB_RESET: + skhpb_purge_active_block(hpb); + skhpb_map_loading_trigger(hpb, true, false); + break; + + default: + break; + } + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + list_add_tail(&rsp_info->list_rsp_info, &hpb->lh_rsp_info_free); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + SKHPB_DRIVER_D("rsp_work end"); +} + +static void skhpb_init_constant(void) +{ + skhpb_sects_per_blk_shift = ffs(SKHPB_BLOCK) - ffs(SKHPB_SECTOR); + SKHPB_DRIVER_D("skhpb_sects_per_blk_shift: %u %u\n", + skhpb_sects_per_blk_shift, ffs(SKHPB_SECTORS_PER_BLOCK) - 1); + + skhpb_bits_per_dword_shift = ffs(SKHPB_BITS_PER_DWORD) - 1; + skhpb_bits_per_dword_mask = SKHPB_BITS_PER_DWORD - 1; + SKHPB_DRIVER_D("bits_per_dword %u shift %u mask 0x%X\n", + SKHPB_BITS_PER_DWORD, skhpb_bits_per_dword_shift, skhpb_bits_per_dword_mask); +} + +static void skhpb_table_mempool_remove(struct skhpb_lu *hpb) +{ + struct skhpb_map_ctx *mctx, *next; + int i; + + /* + * the mctx in the lh_map_ctx has been allocated completely. + */ + list_for_each_entry_safe(mctx, next, &hpb->lh_map_ctx, list_table) { + list_del(&mctx->list_table); + + for (i = 0; i < hpb->mpages_per_subregion; i++) + __free_page(mctx->m_page[i]); + + kvfree(mctx->ppn_dirty); + kfree(mctx->m_page); + kfree(mctx); + skhpb_alloc_mctx--; + } +} + +static int skhpb_init_pinned_active_block(struct skhpb_lu *hpb, + struct skhpb_region *cb) +{ + struct skhpb_subregion *cp; + int subregion, j; + int err = 0; + unsigned long flags; + + for (subregion = 0 ; subregion < cb->subregion_count ; subregion++) { + cp = cb->subregion_tbl + subregion; + + cp->mctx = skhpb_get_map_ctx(hpb); + if (IS_ERR(cp->mctx)) { + err = PTR_ERR(cp->mctx); + goto release; + } + spin_lock_irqsave(&hpb->hpb_lock, flags); + skhpb_add_subregion_to_req_list(hpb, cp); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + } + return 0; + +release: + for (j = 0 ; j < subregion ; j++) { + cp = cb->subregion_tbl + j; + skhpb_put_map_ctx(hpb, cp->mctx); + } + return err; +} + +static inline bool skhpb_is_pinned( + struct skhpb_lu_desc *lu_desc, int region) +{ + if (lu_desc->lu_hpb_pinned_end_offset != -1 && + region >= lu_desc->hpb_pinned_region_startidx && + region <= lu_desc->lu_hpb_pinned_end_offset) + return true; + + return false; +} + +static inline void skhpb_init_jobs(struct skhpb_lu *hpb) +{ + INIT_DELAYED_WORK(&hpb->skhpb_pinned_work, skhpb_pinned_work_handler); + INIT_DELAYED_WORK(&hpb->skhpb_map_req_retry_work, skhpb_map_req_retry_work_handler); + INIT_WORK(&hpb->skhpb_rsp_work, skhpb_delayed_rsp_work_handler); +} + +static inline void skhpb_cancel_jobs(struct skhpb_lu *hpb) +{ + cancel_delayed_work_sync(&hpb->skhpb_pinned_work); + cancel_work_sync(&hpb->skhpb_rsp_work); + cancel_delayed_work_sync(&hpb->skhpb_map_req_retry_work); +} + +static void skhpb_init_subregion_tbl(struct skhpb_lu *hpb, + struct skhpb_region *cb, bool last_region) +{ + int subregion; + + for (subregion = 0 ; subregion < cb->subregion_count ; subregion++) { + struct skhpb_subregion *cp = cb->subregion_tbl + subregion; + + cp->region = cb->region; + cp->subregion = subregion; + cp->subregion_state = SKHPB_SUBREGION_UNUSED; + cp->last = (last_region && subregion == cb->subregion_count - 1); + } +} + +static inline int skhpb_alloc_subregion_tbl(struct skhpb_lu *hpb, + struct skhpb_region *cb, int subregion_count) +{ + cb->subregion_tbl = kzalloc(sizeof(struct skhpb_subregion) * subregion_count, + GFP_KERNEL); + if (!cb->subregion_tbl) + return -ENOMEM; + + cb->subregion_count = subregion_count; + + return 0; +} + +static int skhpb_table_mempool_init(struct skhpb_lu *hpb, + int num_regions, int subregions_per_region, + int entry_count, int entry_byte) +{ + int i, j; + struct skhpb_map_ctx *mctx = NULL; + + for (i = 0 ; i < num_regions * subregions_per_region ; i++) { + mctx = kzalloc(sizeof(struct skhpb_map_ctx), GFP_KERNEL); + if (!mctx) + goto release_mem; + + mctx->m_page = kzalloc(sizeof(struct page *) * + hpb->mpages_per_subregion, GFP_KERNEL); + if (!mctx->m_page) + goto release_mem; + + mctx->ppn_dirty = kvzalloc(entry_count / BITS_PER_BYTE, GFP_KERNEL); + if (!mctx->ppn_dirty) + goto release_mem; + + for (j = 0; j < hpb->mpages_per_subregion; j++) { + mctx->m_page[j] = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!mctx->m_page[j]) + goto release_mem; + } + /* SKSKHPB_DRIVER_D("[%d] mctx->m_page %p get_order %d\n", i, + mctx->m_page, get_order(hpb->mpages_per_subregion)); */ + + INIT_LIST_HEAD(&mctx->list_table); + list_add(&mctx->list_table, &hpb->lh_map_ctx); + + hpb->debug_free_table++; + } + + skhpb_alloc_mctx = num_regions * subregions_per_region; + /* SKSKHPB_DRIVER_D("number of mctx %d %d %d. debug_free_table %d\n", + num_regions * subregions_per_region, num_regions, + subregions_per_region, hpb->debug_free_table); */ + return 0; + +release_mem: + /* + * mctxs already added in lh_map_ctx will be removed + * in the caller function. + */ + if (!mctx) + goto out; + + if (mctx->m_page) { + for (j = 0; j < hpb->mpages_per_subregion; j++) + if (mctx->m_page[j]) + __free_page(mctx->m_page[j]); + kfree(mctx->m_page); + } + kvfree(mctx->ppn_dirty); + kfree(mctx); +out: + return -ENOMEM; +} + +static int skhpb_req_mempool_init(struct ufs_hba *hba, + struct skhpb_lu *hpb, int queue_depth) +{ + struct skhpb_rsp_info *rsp_info = NULL; + struct skhpb_map_req *map_req = NULL; + int i; + + if (!queue_depth) { + queue_depth = hba->nutrs; + SKHPB_DRIVER_E("lu_queue_depth is 0. we use device's queue info.\n"); + SKHPB_DRIVER_E("hba->nutrs = %d\n", hba->nutrs); + } + INIT_LIST_HEAD(&hpb->lh_rsp_info_free); + INIT_LIST_HEAD(&hpb->lh_map_req_free); + INIT_LIST_HEAD(&hpb->lh_map_req_retry); + + hpb->rsp_info = vzalloc(queue_depth * sizeof(struct skhpb_rsp_info)); + if (!hpb->rsp_info) + goto release_mem; + + hpb->map_req = vzalloc(queue_depth * sizeof(struct skhpb_map_req)); + if (!hpb->map_req) + goto release_mem; + + for (i = 0; i < queue_depth; i++) { + rsp_info = hpb->rsp_info + i; + INIT_LIST_HEAD(&rsp_info->list_rsp_info); + list_add_tail(&rsp_info->list_rsp_info, &hpb->lh_rsp_info_free); + } + + for (i = 0; i < queue_depth; i++) { + map_req = hpb->map_req + i; + INIT_LIST_HEAD(&map_req->list_map_req); + list_add_tail(&map_req->list_map_req, &hpb->lh_map_req_free); + } + + return 0; + +release_mem: + return -ENOMEM; +} + +static void skhpb_init_lu_constant(struct skhpb_lu *hpb, + struct skhpb_lu_desc *lu_desc, + struct skhpb_func_desc *func_desc) +{ + unsigned long long region_unit_size, region_mem_size; + unsigned long long last_subregion_unit_size; + int entries_per_region; + + /* From descriptors */ + region_unit_size = SKHPB_SECTOR * (1ULL << func_desc->hpb_region_size); + region_mem_size = region_unit_size / SKHPB_BLOCK * SKHPB_ENTRY_SIZE; + + hpb->subregion_unit_size = SKHPB_SECTOR * (1ULL << func_desc->hpb_subregion_size); + hpb->subregion_mem_size = hpb->subregion_unit_size / + SKHPB_BLOCK * SKHPB_ENTRY_SIZE; + if (func_desc->hpb_region_size == func_desc->hpb_subregion_size) + hpb->identical_size = true; + else + hpb->identical_size = false; + + last_subregion_unit_size = + lu_desc->lu_logblk_cnt * + (1ULL << lu_desc->lu_logblk_size) % hpb->subregion_unit_size; + hpb->last_subregion_mem_size = + last_subregion_unit_size / SKHPB_BLOCK * SKHPB_ENTRY_SIZE; + if (hpb->last_subregion_mem_size == 0) + hpb->last_subregion_mem_size = hpb->subregion_mem_size; + hpb->hpb_ver = func_desc->hpb_ver; + hpb->lu_max_active_regions = lu_desc->lu_max_active_hpb_regions; + hpb->lru_info.max_lru_active_count = + lu_desc->lu_max_active_hpb_regions + - lu_desc->lu_num_hpb_pinned_regions; + + /* relation : lu <-> region <-> sub region <-> entry */ + hpb->lu_num_blocks = lu_desc->lu_logblk_cnt; + entries_per_region = region_mem_size / SKHPB_ENTRY_SIZE; + hpb->entries_per_subregion = hpb->subregion_mem_size / SKHPB_ENTRY_SIZE; +#if BITS_PER_LONG == 32 + hpb->subregions_per_region = div_u64(region_mem_size, hpb->subregion_mem_size); +#else + hpb->subregions_per_region = region_mem_size / hpb->subregion_mem_size; +#endif + + hpb->hpb_control_mode = func_desc->hpb_control_mode; +#if defined(SKHPB_READ_LARGE_CHUNK_SUPPORT) + hpb->ppn_dirties_per_subregion = + hpb->entries_per_subregion/BITS_PER_PPN_DIRTY; + /* SKSKHPB_DRIVER_D("ppn_dirties_per_subregion:%d, %d \n", + hpb->ppn_dirties_per_subregion, BITS_PER_PPN_DIRTY); */ +#endif + /* + * 1. regions_per_lu + * = (lu_num_blocks * 4096) / region_unit_size + * = (lu_num_blocks * SKHPB_ENTRY_SIZE) / region_mem_size + * = lu_num_blocks / (region_mem_size / SKHPB_ENTRY_SIZE) + * + * 2. regions_per_lu = lu_num_blocks / subregion_mem_size (is trik...) + * if SKHPB_ENTRY_SIZE != subregions_per_region, it is error. + */ +#if BITS_PER_LONG == 32 + hpb->regions_per_lu = div_u64((hpb->lu_num_blocks + + (region_mem_size / SKHPB_ENTRY_SIZE) - 1), + (region_mem_size / SKHPB_ENTRY_SIZE)); + hpb->subregions_per_lu = div_u64((hpb->lu_num_blocks + + (hpb->subregion_mem_size / SKHPB_ENTRY_SIZE) - 1), + (hpb->subregion_mem_size / SKHPB_ENTRY_SIZE)); +#else + hpb->regions_per_lu = (hpb->lu_num_blocks + + (region_mem_size / SKHPB_ENTRY_SIZE) - 1) + / (region_mem_size / SKHPB_ENTRY_SIZE); + hpb->subregions_per_lu = (hpb->lu_num_blocks + + (hpb->subregion_mem_size / SKHPB_ENTRY_SIZE) - 1) + / (hpb->subregion_mem_size / SKHPB_ENTRY_SIZE); +#endif + + /* mempool info */ + hpb->mpage_bytes = PAGE_SIZE; + hpb->mpages_per_subregion = hpb->subregion_mem_size / hpb->mpage_bytes; + + /* Bitmask Info. */ + hpb->dwords_per_subregion = hpb->entries_per_subregion / SKHPB_BITS_PER_DWORD; + hpb->entries_per_region_shift = ffs(entries_per_region) - 1; + hpb->entries_per_region_mask = entries_per_region - 1; + hpb->entries_per_subregion_shift = ffs(hpb->entries_per_subregion) - 1; + hpb->entries_per_subregion_mask = hpb->entries_per_subregion - 1; + + SKHPB_DRIVER_I("===== Device Descriptor =====\n"); + SKHPB_DRIVER_I("hpb_region_size = %d, hpb_subregion_size = %d\n", + func_desc->hpb_region_size, + func_desc->hpb_subregion_size); + SKHPB_DRIVER_I("===== Constant Values =====\n"); + SKHPB_DRIVER_I("region_unit_size = %llu, region_mem_size %llu\n", + region_unit_size, region_mem_size); + SKHPB_DRIVER_I("subregion_unit_size = %llu, subregion_mem_size %d\n", + hpb->subregion_unit_size, hpb->subregion_mem_size); + SKHPB_DRIVER_I("last_subregion_mem_size = %d\n", hpb->last_subregion_mem_size); + SKHPB_DRIVER_I("lu_num_blks = %llu, reg_per_lu = %d, subreg_per_lu = %d\n", + hpb->lu_num_blocks, hpb->regions_per_lu, + hpb->subregions_per_lu); + SKHPB_DRIVER_I("subregions_per_region = %d\n", + hpb->subregions_per_region); + SKHPB_DRIVER_I("entries_per_region %u shift %u mask 0x%X\n", + entries_per_region, hpb->entries_per_region_shift, + hpb->entries_per_region_mask); + SKHPB_DRIVER_I("entries_per_subregion %u shift %u mask 0x%X\n", + hpb->entries_per_subregion, + hpb->entries_per_subregion_shift, + hpb->entries_per_subregion_mask); + SKHPB_DRIVER_I("mpages_per_subregion : %d\n", + hpb->mpages_per_subregion); + SKHPB_DRIVER_I("===================================\n"); +} + +static int skhpb_lu_hpb_init(struct ufs_hba *hba, struct skhpb_lu *hpb, + struct skhpb_func_desc *func_desc, + struct skhpb_lu_desc *lu_desc, u8 lun, + bool *do_work_lun) +{ + struct skhpb_region *cb; + struct skhpb_subregion *cp; + int region, subregion; + int total_subregion_count, subregion_count; + int ret, j; + + *do_work_lun = false; + + spin_lock_init(&hpb->hpb_lock); + spin_lock_init(&hpb->rsp_list_lock); + spin_lock_init(&hpb->map_list_lock); + + /* init lru information */ + INIT_LIST_HEAD(&hpb->lru_info.lru); + hpb->lru_info.selection_type = TYPE_LRU; + + INIT_LIST_HEAD(&hpb->lh_subregion_req); + INIT_LIST_HEAD(&hpb->lh_rsp_info); + INIT_LIST_HEAD(&hpb->lh_map_ctx); + + hpb->lu_hpb_enable = true; + + skhpb_init_lu_constant(hpb, lu_desc, func_desc); + + hpb->region_tbl = vzalloc(sizeof(struct skhpb_region) * hpb->regions_per_lu); + if (!hpb->region_tbl) + return -ENOMEM; + + SKHPB_DRIVER_D("active_block_table bytes: %lu\n", + (sizeof(struct skhpb_region) * hpb->regions_per_lu)); + + ret = skhpb_table_mempool_init(hpb, + lu_desc->lu_max_active_hpb_regions, + hpb->subregions_per_region, + hpb->entries_per_subregion, SKHPB_ENTRY_SIZE); + if (ret) { + SKHPB_DRIVER_E("ppn table mempool init fail!\n"); + goto release_mempool; + } + + ret = skhpb_req_mempool_init(hba, hpb, lu_desc->lu_queue_depth); + if (ret) { + SKHPB_DRIVER_E("rsp_info_mempool init fail!\n"); + goto release_mempool; + } + + total_subregion_count = hpb->subregions_per_lu; + + skhpb_init_jobs(hpb); + + SKHPB_DRIVER_D("total_subregion_count: %d\n", total_subregion_count); + for (region = 0, subregion_count = 0, + total_subregion_count = hpb->subregions_per_lu; + region < hpb->regions_per_lu; + region++, total_subregion_count -= subregion_count) { + cb = hpb->region_tbl + region; + cb->region = region; + + /* init lru region information*/ + INIT_LIST_HEAD(&cb->list_region); + cb->hit_count = 0; + + subregion_count = min(total_subregion_count, + hpb->subregions_per_region); + /* SKSKHPB_DRIVER_D("total: %d subregion_count: %d\n", + total_subregion_count, subregion_count); */ + + ret = skhpb_alloc_subregion_tbl(hpb, cb, subregion_count); + if (ret) + goto release_region_cp; + skhpb_init_subregion_tbl(hpb, cb, region == hpb->regions_per_lu - 1); + + if (skhpb_is_pinned(lu_desc, region)) { + SKHPB_DRIVER_D("region: %d PINNED %d ~ %d\n", + region, lu_desc->hpb_pinned_region_startidx, + lu_desc->lu_hpb_pinned_end_offset); + ret = skhpb_init_pinned_active_block(hpb, cb); + if (ret) + goto release_region_cp; + *do_work_lun = true; + cb->is_pinned = true; + cb->region_state = SKHPB_REGION_ACTIVE; + } else { + /* SKSKHPB_DRIVER_D("region: %d inactive\n", cb->region); */ + cb->is_pinned = false; + cb->region_state = SKHPB_REGION_INACTIVE; + } + } + if (total_subregion_count != 0) { + SKHPB_DRIVER_E("error total_subregion_count: %d\n", + total_subregion_count); + goto release_region_cp; + } + hpb->hba = hba; + hpb->lun = lun; + /* + * even if creating sysfs failed, skhpb could run normally. + * so we don't deal with error handling + */ + skhpb_create_sysfs(hba, hpb); + return 0; + +release_region_cp: + for (j = 0 ; j < region ; j++) { + cb = hpb->region_tbl + j; + if (cb->subregion_tbl) { + for (subregion = 0; subregion < cb->subregion_count; + subregion++) { + cp = cb->subregion_tbl + subregion; + + if (cp->mctx) + skhpb_put_map_ctx(hpb, cp->mctx); + } + kfree(cb->subregion_tbl); + } + } + +release_mempool: + skhpb_table_mempool_remove(hpb); + *do_work_lun = false; + return ret; +} + +static int skhpb_get_hpb_lu_desc(struct ufs_hba *hba, + struct skhpb_lu_desc *lu_desc, int lun) +{ + int ret; + u8 logical_buf[SKHPB_QUERY_DESC_UNIT_MAX_SIZE] = { 0 }; + + ret = skhpb_read_unit_desc(hba, lun, logical_buf, + SKHPB_QUERY_DESC_UNIT_MAX_SIZE); + if (ret) { + SKHPB_DRIVER_E("read unit desc failed. ret %d\n", ret); + return ret; + } + + lu_desc->lu_queue_depth = logical_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]; + + // 2^log, ex) 0x0C = 4KB + lu_desc->lu_logblk_size = logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_SIZE]; + lu_desc->lu_logblk_cnt = + get_unaligned_be64(&logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_COUNT]); + + if (logical_buf[UNIT_DESC_PARAM_LU_ENABLE] == LU_HPB_ENABLE) + lu_desc->lu_hpb_enable = true; + else + lu_desc->lu_hpb_enable = false; + + lu_desc->lu_max_active_hpb_regions = + get_unaligned_be16(logical_buf + + UNIT_DESC_HPB_LU_MAX_ACTIVE_REGIONS); + lu_desc->hpb_pinned_region_startidx = + get_unaligned_be16(logical_buf + + UNIT_DESC_HPB_LU_PIN_REGION_START_OFFSET); + lu_desc->lu_num_hpb_pinned_regions = + get_unaligned_be16(logical_buf + + UNIT_DESC_HPB_LU_NUM_PIN_REGIONS); + + if (lu_desc->lu_hpb_enable) { + SKHPB_DRIVER_D("LUN(%d) [0A] bLogicalBlockSize %d\n", + lun, lu_desc->lu_logblk_size); + SKHPB_DRIVER_D("LUN(%d) [0B] qLogicalBlockCount %llu\n", + lun, lu_desc->lu_logblk_cnt); + SKHPB_DRIVER_D("LUN(%d) [03] bLuEnable %d\n", + lun, logical_buf[UNIT_DESC_PARAM_LU_ENABLE]); + SKHPB_DRIVER_D("LUN(%d) [06] bLuQueueDepth %d\n", + lun, lu_desc->lu_queue_depth); + SKHPB_DRIVER_D("LUN(%d) [23:24] wLUMaxActiveHPBRegions %d\n", + lun, lu_desc->lu_max_active_hpb_regions); + SKHPB_DRIVER_D("LUN(%d) [25:26] wHPBPinnedRegionStartIdx %d\n", + lun, lu_desc->hpb_pinned_region_startidx); + SKHPB_DRIVER_D("LUN(%d) [27:28] wNumHPBPinnedRegions %d\n", + lun, lu_desc->lu_num_hpb_pinned_regions); + } + + if (lu_desc->lu_num_hpb_pinned_regions > 0) { + lu_desc->lu_hpb_pinned_end_offset = + lu_desc->hpb_pinned_region_startidx + + lu_desc->lu_num_hpb_pinned_regions - 1; + } else + lu_desc->lu_hpb_pinned_end_offset = -1; + + if (lu_desc->lu_hpb_enable) + SKHPB_DRIVER_I("Enable, LU: %d, MAX_REGION: %d, PIN: %d - %d\n", + lun, + lu_desc->lu_max_active_hpb_regions, + lu_desc->hpb_pinned_region_startidx, + lu_desc->lu_num_hpb_pinned_regions); + return 0; +} + +static void skhpb_quirk_setup(struct ufs_hba *hba, + struct skhpb_func_desc *desc) +{ + if (hba->dev_quirks & SKHPB_QUIRK_PURGE_HINT_INFO_WHEN_SLEEP) { + hba->skhpb_quirk |= SKHPB_QUIRK_PURGE_HINT_INFO_WHEN_SLEEP; + SKHPB_DRIVER_I("QUIRK set PURGE_HINT_INFO_WHEN_SLEEP\n"); + } + if (desc->hpb_ver >= 0x0101) { + hba->skhpb_quirk |= SKHPB_QUIRK_USE_READ_16_FOR_ENCRYPTION; + SKHPB_DRIVER_I("QUIRK set USE_READ_16_FOR_ENCRYPTION\n"); + } + if (desc->hpb_ver == 0x0102) { + hba->skhpb_quirk |= SKHPB_QUIRK_ALWAYS_DEVICE_CONTROL_MODE; + SKHPB_DRIVER_I("QUIRK set ALWAYS_DEVICE_CONTROL_MODE\n"); + } +} + +static int skhpb_read_dev_desc_support(struct ufs_hba *hba, + struct skhpb_func_desc *desc) +{ + u8 desc_buf[SKHPB_QUERY_DESC_DEVICE_MAX_SIZE]; + int err; + + err = skhpb_read_device_desc(hba, desc_buf, + SKHPB_QUERY_DESC_DEVICE_MAX_SIZE); + if (err) + return err; + + if (desc_buf[DEVICE_DESC_PARAM_UFS_FEAT] & + SKHPB_UFS_FEATURE_SUPPORT_HPB_BIT) { + hba->skhpb_feat |= SKHPB_UFS_FEATURE_SUPPORT_HPB_BIT; + SKHPB_DRIVER_I("FeaturesSupport= support\n"); + } else { + SKHPB_DRIVER_I("FeaturesSupport= not support\n"); + return -ENODEV; + } + + desc->lu_cnt = desc_buf[DEVICE_DESC_PARAM_NUM_LU]; + SKHPB_DRIVER_D("Dev LU count= %d\n", desc->lu_cnt); + + desc->spec_ver = + (u16)SKHPB_SHIFT_BYTE_1(desc_buf[DEVICE_DESC_PARAM_SPEC_VER]) | + (u16)SKHPB_SHIFT_BYTE_0(desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]); + SKHPB_DRIVER_I("Dev Spec Ver= %x.%x\n", + SKHPB_GET_BYTE_1(desc->spec_ver), + SKHPB_GET_BYTE_0(desc->spec_ver)); + + desc->hpb_ver = + (u16)SKHPB_SHIFT_BYTE_1(desc_buf[DEVICE_DESC_PARAM_HPB_VER]) | + (u16)SKHPB_SHIFT_BYTE_0(desc_buf[DEVICE_DESC_PARAM_HPB_VER + 1]); + SKHPB_DRIVER_I("Dev Ver= %x.%x.%x, DD Ver= %x.%x.%x\n", + (desc->hpb_ver >> 8) & 0xf, + (desc->hpb_ver >> 4) & 0xf, + (desc->hpb_ver >> 0) & 0xf, + SKHPB_GET_BYTE_2(SKHPB_DD_VER), + SKHPB_GET_BYTE_1(SKHPB_DD_VER), + SKHPB_GET_BYTE_0(SKHPB_DD_VER)); + + skhpb_quirk_setup(hba, desc); + + if (hba->skhpb_quirk & SKHPB_QUIRK_ALWAYS_DEVICE_CONTROL_MODE) + desc->hpb_control_mode = DEV_CTRL_MODE; + else + desc->hpb_control_mode = (u8)desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; + + + SKHPB_DRIVER_I("HPB Control Mode = %s", + (desc->hpb_control_mode)?"DEV MODE":"HOST MODE"); + if (desc->hpb_control_mode == HOST_CTRL_MODE) { + SKHPB_DRIVER_E("Driver does not support Host Control Mode"); + return -ENODEV; + } + hba->hpb_control_mode = desc->hpb_control_mode; + return 0; +} + +static int skhpb_read_geo_desc_support(struct ufs_hba *hba, + struct skhpb_func_desc *desc) +{ + int err; + u8 geometry_buf[SKHPB_QUERY_DESC_GEOMETRY_MAX_SIZE]; + + err = skhpb_read_geo_desc(hba, geometry_buf, + SKHPB_QUERY_DESC_GEOMETRY_MAX_SIZE); + if (err) + return err; + + desc->hpb_region_size = geometry_buf[GEOMETRY_DESC_HPB_REGION_SIZE]; + desc->hpb_number_lu = geometry_buf[GEOMETRY_DESC_HPB_NUMBER_LU]; + desc->hpb_subregion_size = + geometry_buf[GEOMETRY_DESC_HPB_SUBREGION_SIZE]; + desc->hpb_device_max_active_regions = + get_unaligned_be16(geometry_buf + + GEOMETRY_DESC_HPB_DEVICE_MAX_ACTIVE_REGIONS); + + SKHPB_DRIVER_D("[48] bHPBRegionSize %u\n", desc->hpb_region_size); + SKHPB_DRIVER_D("[49] bHPBNumberLU %u\n", desc->hpb_number_lu); + SKHPB_DRIVER_D("[4A] bHPBSubRegionSize %u\n", desc->hpb_subregion_size); + SKHPB_DRIVER_D("[4B:4C] wDeviceMaxActiveHPBRegions %u\n", + desc->hpb_device_max_active_regions); + + if (desc->hpb_number_lu == 0) { + SKHPB_DRIVER_E("HPB is not supported\n"); + return -ENODEV; + } + /* for activation */ + hba->skhpb_max_regions = desc->hpb_device_max_active_regions; + return 0; +} + +int skhpb_control_validation(struct ufs_hba *hba, + struct skhpb_config_desc *config) +{ + unsigned int num_regions = 0; + int lun; + + if (!(hba->skhpb_feat & SKHPB_UFS_FEATURE_SUPPORT_HPB_BIT)) + return -ENOTSUPP; + + for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) { + unsigned char *unit = config->unit[lun]; + + if (unit[SKHPB_CONF_LU_ENABLE] >= LU_SET_MAX) + return -EINVAL; + + /* total should not exceed max_active_regions */ + num_regions += unit[SKHPB_CONF_ACTIVE_REGIONS] << 8; + num_regions += unit[SKHPB_CONF_ACTIVE_REGIONS + 1]; + if (num_regions > hba->skhpb_max_regions) + return -EINVAL; + } + return 0; +} + +static int skhpb_init(struct ufs_hba *hba) +{ + struct skhpb_func_desc func_desc; + int ret, retries; + u8 lun; + int hpb_dev = 0; + bool do_work; + + pm_runtime_get_sync(hba->dev); + ret = skhpb_read_dev_desc_support(hba, &func_desc); + if (ret) + goto out_state; + + ret = skhpb_read_geo_desc_support(hba, &func_desc); + if (ret) + goto out_state; + + for (retries = 0; retries < 20; retries++) { + if (!hba->lrb_in_use) { + ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_HPB_RESET, NULL); + if (!ret) { + SKHPB_DRIVER_I("Query fHPBReset is successfully sent retries = %d\n", retries); + break; + } + } else + msleep(200); + } + if (ret == 0) { + bool fHPBReset = true; + + ufshcd_query_flag_retry(hba, + UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_RESET, + &fHPBReset); + if (fHPBReset) + SKHPB_DRIVER_I("fHPBReset still set\n"); + } + + skhpb_init_constant(); + + do_work = false; + for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) { + struct skhpb_lu_desc lu_desc = {0}; + bool do_work_lun = false; + + ret = skhpb_get_hpb_lu_desc(hba, &lu_desc, lun); + if (ret) + goto out_state; + + if (lu_desc.lu_hpb_enable == false) + continue; + + hba->skhpb_lup[lun] = kzalloc(sizeof(struct skhpb_lu), + GFP_KERNEL); + if (!hba->skhpb_lup[lun]) { + ret = -ENOMEM; + goto out_free_mem; + } + + ret = skhpb_lu_hpb_init(hba, hba->skhpb_lup[lun], + &func_desc, &lu_desc, lun, &do_work_lun); + if (ret) { + if (ret == -ENODEV) + continue; + else + goto out_free_mem; + } + do_work |= do_work_lun; + hba->skhpb_quicklist_lu_enable[hpb_dev] = lun; + SKHPB_DRIVER_D("skhpb_quicklist_lu_enable[%d] = %d\n", hpb_dev, lun); + hpb_dev++; + } + + if (hpb_dev) + goto done; + + goto out_free_mem; + +done: + INIT_WORK(&hba->skhpb_eh_work, skhpb_error_handler); + hba->skhpb_state = SKHPB_PRESENT; + hba->issue_ioctl = false; + pm_runtime_mark_last_busy(hba->dev); + pm_runtime_put_noidle(hba->dev); + + if (do_work) { + for (lun = 0; lun < UFS_UPIU_MAX_GENERAL_LUN; lun++) { + struct skhpb_lu *hpb = hba->skhpb_lup[lun]; + + if (hpb) + schedule_delayed_work(&hpb->skhpb_pinned_work, 0); + } + } + + return 0; + +out_free_mem: + skhpb_release(hba, SKHPB_NOT_SUPPORTED); +out_state: + hba->skhpb_state = SKHPB_NOT_SUPPORTED; + pm_runtime_mark_last_busy(hba->dev); + pm_runtime_put_noidle(hba->dev); + return ret; +} + +static void skhpb_map_loading_trigger(struct skhpb_lu *hpb, + bool only_pinned, bool do_work_handler) +{ + int region, subregion; + unsigned long flags; + + if (do_work_handler) + goto work_out; + flush_delayed_work(&hpb->skhpb_pinned_work); + for (region = 0 ; region < hpb->regions_per_lu ; region++) { + struct skhpb_region *cb; + + cb = hpb->region_tbl + region; + + if (cb->region_state != SKHPB_REGION_ACTIVE && + !cb->is_pinned) + continue; + + if ((only_pinned && cb->is_pinned) || + !only_pinned) { + spin_lock_irqsave(&hpb->hpb_lock, flags); + for (subregion = 0; subregion < cb->subregion_count; + subregion++) + skhpb_add_subregion_to_req_list(hpb, + cb->subregion_tbl + subregion); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + do_work_handler = true; + } + } +work_out: + if (do_work_handler) + schedule_delayed_work(&hpb->skhpb_pinned_work, 0); +} + +static void skhpb_purge_active_block(struct skhpb_lu *hpb) +{ + int region, subregion; + int state; + struct skhpb_region *cb; + struct skhpb_subregion *cp; + unsigned long flags; + + spin_lock_irqsave(&hpb->hpb_lock, flags); + for (region = 0 ; region < hpb->regions_per_lu ; region++) { + cb = hpb->region_tbl + region; + + if (cb->region_state == SKHPB_REGION_INACTIVE) { + continue; + } + + if (cb->is_pinned) { + state = SKHPB_SUBREGION_DIRTY; + } else if (cb->region_state == SKHPB_REGION_ACTIVE) { + state = SKHPB_SUBREGION_UNUSED; + skhpb_cleanup_lru_info(&hpb->lru_info, cb); + } else { + SKHPB_DRIVER_E("Unsupported state of region\n"); + continue; + } + + SKHPB_DRIVER_D("region %d state %d dft %d\n", + region, state, + hpb->debug_free_table); + for (subregion = 0 ; subregion < cb->subregion_count; + subregion++) { + cp = cb->subregion_tbl + subregion; + + skhpb_purge_active_page(hpb, cp, state); + } + SKHPB_DRIVER_D("region %d state %d dft %d\n", + region, state, hpb->debug_free_table); + } + spin_unlock_irqrestore(&hpb->hpb_lock, flags); +} + +static void skhpb_retrieve_rsp_info(struct skhpb_lu *hpb) +{ + struct skhpb_rsp_info *rsp_info; + unsigned long flags; + + while (1) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + rsp_info = list_first_entry_or_null(&hpb->lh_rsp_info, + struct skhpb_rsp_info, list_rsp_info); + if (!rsp_info) { + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + break; + } + list_move_tail(&rsp_info->list_rsp_info, + &hpb->lh_rsp_info_free); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + SKHPB_DRIVER_D("add_list %p -> %p", + &hpb->lh_rsp_info_free, rsp_info); + } +} + +static void skhpb_probe(struct ufs_hba *hba) +{ + struct skhpb_lu *hpb; + int lu; + + for (lu = 0 ; lu < UFS_UPIU_MAX_GENERAL_LUN ; lu++) { + hpb = hba->skhpb_lup[lu]; + + if (hpb && hpb->lu_hpb_enable) { + skhpb_cancel_jobs(hpb); + skhpb_retrieve_rsp_info(hpb); + skhpb_purge_active_block(hpb); + SKHPB_DRIVER_I("SKHPB lun %d reset\n", lu); +// tasklet_init(&hpb->hpb_work_handler, +// skhpb_work_handler_fn, (unsigned long)hpb); + } + } + hba->skhpb_state = SKHPB_PRESENT; +} + +static void skhpb_destroy_subregion_tbl(struct skhpb_lu *hpb, + struct skhpb_region *cb) +{ + int subregion; + + for (subregion = 0 ; subregion < cb->subregion_count ; subregion++) { + struct skhpb_subregion *cp; + + cp = cb->subregion_tbl + subregion; + cp->subregion_state = SKHPB_SUBREGION_UNUSED; + skhpb_put_map_ctx(hpb, cp->mctx); + } + + kfree(cb->subregion_tbl); +} + +static void skhpb_destroy_region_tbl(struct skhpb_lu *hpb) +{ + int region; + + if (!hpb->region_tbl) + return; + + for (region = 0 ; region < hpb->regions_per_lu ; region++) { + struct skhpb_region *cb; + + cb = hpb->region_tbl + region; + if (cb->region_state == SKHPB_REGION_ACTIVE) { + cb->region_state = SKHPB_REGION_INACTIVE; + + skhpb_destroy_subregion_tbl(hpb, cb); + } + } + vfree(hpb->region_tbl); +} + +void skhpb_suspend(struct ufs_hba *hba) +{ + int lun; + unsigned long flags; + struct skhpb_lu *hpb; + struct skhpb_rsp_info *rsp_info; + struct skhpb_map_req *map_req; + + if (hba->skhpb_quirk & SKHPB_QUIRK_PURGE_HINT_INFO_WHEN_SLEEP) { + for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) { + hpb = hba->skhpb_lup[lun]; + if (!hpb) + continue; + + while (1) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + /* break if lh_rsp_info list_head not init yet. */ + if (!hpb->lh_rsp_info.next) { + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + break; + } + rsp_info = list_first_entry_or_null(&hpb->lh_rsp_info, + struct skhpb_rsp_info, list_rsp_info); + if (!rsp_info) { + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + break; + } + list_del_init(&rsp_info->list_rsp_info); + list_add_tail(&rsp_info->list_rsp_info, &hpb->lh_rsp_info_free); + atomic64_inc(&hpb->canceled_resp); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + while (1) { + spin_lock_irqsave(&hpb->map_list_lock, flags); + /* break if lh_map_req_retry list_head not init yet. */ + if (!hpb->lh_map_req_retry.next) { + spin_unlock_irqrestore(&hpb->map_list_lock, flags); + break; + } + map_req = list_first_entry_or_null(&hpb->lh_map_req_retry, + struct skhpb_map_req, list_map_req); + if (!map_req) { + spin_unlock_irqrestore(&hpb->map_list_lock, flags); + break; + } + list_del_init(&map_req->list_map_req); + list_add_tail(&map_req->list_map_req, &hpb->lh_map_req_free); + atomic64_inc(&hpb->canceled_map_req); + spin_unlock_irqrestore(&hpb->map_list_lock, flags); + } + } + } + for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) { + hpb = hba->skhpb_lup[lun]; + if (!hpb) + continue; + skhpb_cancel_jobs(hpb); + } +} + +void skhpb_resume(struct ufs_hba *hba) +{ + int lun; + struct skhpb_lu *hpb; + unsigned long flags; + + for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) { + hpb = hba->skhpb_lup[lun]; + if (!hpb) + continue; + spin_lock_irqsave(&hpb->map_list_lock, flags); + if (!list_empty(&hpb->lh_map_req_retry)) + schedule_delayed_work(&hpb->skhpb_map_req_retry_work, msecs_to_jiffies(1000)); + spin_unlock_irqrestore(&hpb->map_list_lock, flags); + } +} + +void skhpb_release(struct ufs_hba *hba, int state) +{ + int lun; + + hba->skhpb_state = SKHPB_FAILED; + + for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) { + struct skhpb_lu *hpb = hba->skhpb_lup[lun]; + + if (!hpb) + continue; + + hba->skhpb_lup[lun] = NULL; + + if (!hpb->lu_hpb_enable) + continue; + + hpb->lu_hpb_enable = false; + + skhpb_cancel_jobs(hpb); + + skhpb_destroy_region_tbl(hpb); + skhpb_table_mempool_remove(hpb); + + vfree(hpb->rsp_info); + vfree(hpb->map_req); + + kobject_uevent(&hpb->kobj, KOBJ_REMOVE); + kobject_del(&hpb->kobj); // TODO --count & del? + + kfree(hpb); + } + + if (skhpb_alloc_mctx != 0) + SKHPB_DRIVER_E("warning: skhpb_alloc_mctx %d", skhpb_alloc_mctx); + + hba->skhpb_state = state; +} + +void skhpb_init_handler(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct ufs_hba *hba = + container_of(dwork, struct ufs_hba, skhpb_init_work); + struct Scsi_Host *shost = hba->host; + bool async_scan; + + if (hba->skhpb_state == SKHPB_NOT_SUPPORTED) + return; + + spin_lock_irq(shost->host_lock); + async_scan = shost->async_scan; + spin_unlock_irq(shost->host_lock); + if (async_scan) { + schedule_delayed_work(dwork, msecs_to_jiffies(100)); + return; + } + + if (hba->skhpb_state == SKHPB_NEED_INIT) { + int err = skhpb_init(hba); + + if (hba->skhpb_state == SKHPB_NOT_SUPPORTED) + SKHPB_DRIVER_E("Run without HPB - err=%d\n", err); + } else if (hba->skhpb_state == SKHPB_RESET) { + skhpb_probe(hba); + } +} + +void ufshcd_init_hpb(struct ufs_hba *hba) +{ + int lun; + + hba->skhpb_feat = 0; + hba->skhpb_quirk = 0; + hba->skhpb_state = SKHPB_NEED_INIT; + for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) { + hba->skhpb_lup[lun] = NULL; + hba->sdev_ufs_lu[lun] = NULL; + hba->skhpb_quicklist_lu_enable[lun] = SKHPB_U8_MAX; + } + + INIT_DELAYED_WORK(&hba->skhpb_init_work, skhpb_init_handler); +} + +static void skhpb_error_handler(struct work_struct *work) +{ + struct ufs_hba *hba; + + hba = container_of(work, struct ufs_hba, skhpb_eh_work); + + SKHPB_DRIVER_E("SKHPB driver runs without SKHPB\n"); + SKHPB_DRIVER_E("SKHPB will be removed from the kernel\n"); + + skhpb_release(hba, SKHPB_FAILED); +} + +static void skhpb_stat_init(struct skhpb_lu *hpb) +{ + atomic64_set(&hpb->hit, 0); + atomic64_set(&hpb->size_miss, 0); + atomic64_set(&hpb->region_miss, 0); + atomic64_set(&hpb->subregion_miss, 0); + atomic64_set(&hpb->entry_dirty_miss, 0); + atomic64_set(&hpb->w_map_req_cnt, 0); +#if defined(SKHPB_READ_LARGE_CHUNK_SUPPORT) + atomic64_set(&hpb->lc_entry_dirty_miss, 0); + atomic64_set(&hpb->lc_reg_subreg_miss, 0); + atomic64_set(&hpb->lc_hit, 0); +#endif + atomic64_set(&hpb->rb_noti_cnt, 0); + atomic64_set(&hpb->reset_noti_cnt, 0); + atomic64_set(&hpb->map_req_cnt, 0); + atomic64_set(&hpb->region_evict, 0); + atomic64_set(&hpb->region_add, 0); + atomic64_set(&hpb->rb_fail, 0); + atomic64_set(&hpb->canceled_resp, 0); + atomic64_set(&hpb->canceled_map_req, 0); + atomic64_set(&hpb->alloc_map_req_cnt, 0); +} + + +static ssize_t skhpb_sysfs_info_from_region_store(struct skhpb_lu *hpb, + const char *buf, size_t count) +{ + unsigned long long value = 0; + int region, subregion; + struct skhpb_region *cb; + struct skhpb_subregion *cp; + + if (kstrtoull(buf, 0, &value)) { + SKHPB_DRIVER_E("kstrtoul error\n"); + return -EINVAL; + } + + if (value >= hpb->regions_per_lu) { + SKHPB_DRIVER_E("value %llu >= regions_per_lu %d error\n", + value, hpb->regions_per_lu); + return -EINVAL; + } + + region = (int)value; + cb = hpb->region_tbl + region; + + SKHPB_DRIVER_I("get_info_from_region[%d]=", region); + SKHPB_DRIVER_I("region %u state %s", region, + ((cb->region_state == SKHPB_REGION_INACTIVE) ? "INACTIVE" : + ((cb->region_state == SKHPB_REGION_ACTIVE) ? "ACTIVE" : "INVALID")) + ); + for (subregion = 0; subregion < cb->subregion_count; subregion++) { + cp = cb->subregion_tbl + subregion; + SKHPB_DRIVER_I("subregion %u state %s", subregion, + ((cp->subregion_state == SKHPB_SUBREGION_UNUSED) ? "UNUSED" : + ((cp->subregion_state == SKHPB_SUBREGION_DIRTY) ? "DIRTY" : + ((cp->subregion_state == SKHPB_SUBREGION_CLEAN) ? "CLEAN" : + (cp->subregion_state == SKHPB_SUBREGION_ISSUED) ? + "ISSUED" : "INVALID"))) + ); + } + return count; +} + +static ssize_t skhpb_sysfs_info_from_lba_store(struct skhpb_lu *hpb, + const char *buf, size_t count) +{ + skhpb_t ppn; + unsigned long long value = 0; + unsigned int lpn; + int region, subregion, subregion_offset; + struct skhpb_region *cb; + struct skhpb_subregion *cp; + unsigned long flags; + int dirty; + + if (kstrtoull(buf, 0, &value)) { + SKHPB_DRIVER_E("kstrtoul error\n"); + return -EINVAL; + } + + if (value > hpb->lu_num_blocks * SKHPB_SECTORS_PER_BLOCK) { + SKHPB_DRIVER_E("value %llu > lu_num_blocks %llu error\n", + value, hpb->lu_num_blocks); + return -EINVAL; + } + lpn = value / SKHPB_SECTORS_PER_BLOCK; + + skhpb_get_pos_from_lpn(hpb, lpn, ®ion, &subregion, + &subregion_offset); + if (!skhpb_check_region_subregion_validity(hpb, region, subregion)) + return -EINVAL; + cb = hpb->region_tbl + region; + cp = cb->subregion_tbl + subregion; + + if (cb->region_state != SKHPB_REGION_INACTIVE) { + ppn = skhpb_get_ppn(cp->mctx, subregion_offset); + spin_lock_irqsave(&hpb->hpb_lock, flags); + dirty = skhpb_ppn_dirty_check(hpb, cp, subregion_offset); + spin_unlock_irqrestore(&hpb->hpb_lock, flags); + } else { + ppn = 0; + dirty = -1; + } + SKHPB_DRIVER_I("get_info_from_lba[%llu]=", value); + SKHPB_DRIVER_I("sector %llu region %d state %s subregion %d state %s", + value, region, + ((cb->region_state == SKHPB_REGION_INACTIVE) ? "INACTIVE" : + ((cb->region_state == SKHPB_REGION_ACTIVE) ? "ACTIVE" : "INVALID")), + subregion, + ((cp->subregion_state == SKHPB_SUBREGION_UNUSED) ? "UNUSED" : + ((cp->subregion_state == SKHPB_SUBREGION_DIRTY) ? "DIRTY" : + ((cp->subregion_state == SKHPB_SUBREGION_CLEAN) ? "CLEAN" : + (cp->subregion_state == SKHPB_SUBREGION_ISSUED) ? + "ISSUED":"INVALID"))) + ); + SKHPB_DRIVER_I("sector %llu lpn %u ppn %llx dirty %d", + value, lpn, ppn, dirty); + return count; +} + +static ssize_t skhpb_sysfs_map_req_show(struct skhpb_lu *hpb, char *buf) +{ + return snprintf(buf, PAGE_SIZE, + "map_req_count[RB_NOTI RESET_NOTI MAP_REQ]= %lld %lld %lld\n", + (long long)atomic64_read(&hpb->rb_noti_cnt), + (long long)atomic64_read(&hpb->reset_noti_cnt), + (long long)atomic64_read(&hpb->map_req_cnt)); +} + +static ssize_t skhpb_sysfs_count_reset_store(struct skhpb_lu *hpb, + const char *buf, size_t count) +{ + unsigned long debug; + + if (kstrtoul(buf, 0, &debug)) + return -EINVAL; + + skhpb_stat_init(hpb); + + return count; +} + +static ssize_t skhpb_sysfs_add_evict_show(struct skhpb_lu *hpb, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "add_evict_count[ADD EVICT]= %lld %lld\n", + (long long)atomic64_read(&hpb->region_add), + (long long)atomic64_read(&hpb->region_evict)); +} + +static ssize_t skhpb_sysfs_statistics_show(struct skhpb_lu *hpb, char *buf) +{ + long long size_miss, region_miss, subregion_miss, entry_dirty_miss, + hit, miss_all, rb_fail, canceled_resp, canceled_map_req; +#if defined(SKHPB_READ_LARGE_CHUNK_SUPPORT) + long long lc_dirty_miss = 0, lc_state_miss = 0, lc_hit = 0; +#endif + int count = 0; + + hit = atomic64_read(&hpb->hit); + size_miss = atomic64_read(&hpb->size_miss); + region_miss = atomic64_read(&hpb->region_miss); + subregion_miss = atomic64_read(&hpb->subregion_miss); + entry_dirty_miss = atomic64_read(&hpb->entry_dirty_miss); + rb_fail = atomic64_read(&hpb->rb_fail); + canceled_resp = atomic64_read(&hpb->canceled_resp); + canceled_map_req = atomic64_read(&hpb->canceled_map_req); + +#if defined(SKHPB_READ_LARGE_CHUNK_SUPPORT) + lc_dirty_miss = atomic64_read(&hpb->lc_entry_dirty_miss); + lc_state_miss = atomic64_read(&hpb->lc_reg_subreg_miss); + lc_hit = atomic64_read(&hpb->lc_hit); + entry_dirty_miss += lc_dirty_miss; + subregion_miss += lc_state_miss; +#endif + miss_all = size_miss + region_miss + subregion_miss + entry_dirty_miss; + count += snprintf(buf + count, PAGE_SIZE, + "Total: %lld\nHit_Counts: %lld\n", + hit + miss_all, hit); + count += snprintf(buf + count, PAGE_SIZE, + "Miss_Counts[ALL SIZE REG SUBREG DIRTY]= %lld %lld %lld %lld %lld\n", + miss_all, size_miss, region_miss, subregion_miss, entry_dirty_miss); + +#if defined(SKHPB_READ_LARGE_CHUNK_SUPPORT) + count += snprintf(buf + count, PAGE_SIZE, + "LARG_CHNK Hit_Count: %lld, miss_count[ALL DIRTY REG_SUBREG]= %lld %lld %lld\n", + lc_hit, lc_dirty_miss + lc_state_miss, lc_dirty_miss, lc_state_miss); +#endif + if (hpb->hba->skhpb_quirk & SKHPB_QUIRK_PURGE_HINT_INFO_WHEN_SLEEP) + count += snprintf(buf + count, PAGE_SIZE, + "READ_BUFFER miss_count[RB_FAIL CANCEL_RESP CANCEL_MAP]= %lld %lld %lld\n", + rb_fail, canceled_resp, canceled_map_req); + else + count += snprintf(buf + count, PAGE_SIZE, + "READ_BUFFER miss_count[RB_FAIL]= %lld\n", rb_fail); + return count; +} + +static ssize_t skhpb_sysfs_version_show(struct skhpb_lu *hpb, char *buf) +{ + return snprintf(buf, PAGE_SIZE, + "HPBversion[HPB DD]= %x.%x.%x %x.%x.%x\n", + (hpb->hpb_ver >> 8) & 0xf, + (hpb->hpb_ver >> 4) & 0xf, + (hpb->hpb_ver >> 0) & 0xf, + SKHPB_GET_BYTE_2(SKHPB_DD_VER), + SKHPB_GET_BYTE_1(SKHPB_DD_VER), + SKHPB_GET_BYTE_0(SKHPB_DD_VER)); +} + +static ssize_t skhpb_sysfs_active_count_show( + struct skhpb_lu *hpb, char *buf) +{ + struct skhpb_region *cb; + int region; + int pinned_cnt = 0; + + for (region = 0 ; region < hpb->regions_per_lu ; region++) { + cb = hpb->region_tbl + region; + + if (cb->is_pinned && cb->region_state == SKHPB_REGION_ACTIVE) + pinned_cnt++; + } + return snprintf(buf, PAGE_SIZE, + "active_count[ACTIVE_CNT PINNED_CNT]= %ld %d\n", + atomic64_read(&hpb->lru_info.active_count), pinned_cnt); +} + +static ssize_t skhpb_sysfs_hpb_disable_show( + struct skhpb_lu *hpb, char *buf) +{ + return snprintf(buf, PAGE_SIZE, + "[0] BOTH_enabled= %d\ + \n[1] ONLY_HPB_BUFFER_disabled= %d\ + \n[2] ONLY_HPB_READ_disabled= %d\ + \n[3] BOTH_disabled= %d\n", + (hpb->force_map_req_disable == 0 && + hpb->force_hpb_read_disable == 0)?1:0, + hpb->force_map_req_disable, + hpb->force_hpb_read_disable, + (hpb->force_map_req_disable == 1 && + hpb->force_hpb_read_disable == 1)?1:0); +} + +static ssize_t skhpb_sysfs_hpb_disable_store(struct skhpb_lu *hpb, + const char *buf, size_t count) +{ + unsigned long value = 0; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + if (value > 3) { + SKHPB_DRIVER_E("Error, Only [0-3] is valid:\ + \n[0] BOTH_enable\ + \n[1] ONLY_HPB_BUFFER_disable\ + \n[2] ONLY_HPB_READ_disable\ + \n[3] BOTH_disabie\n"); + return -EINVAL; + } + + hpb->force_map_req_disable = value & 0x1; + hpb->force_hpb_read_disable = value & 0x2; + return count; +} + +static ssize_t skhpb_sysfs_hpb_reset_store(struct skhpb_lu *hpb, + const char *buf, size_t count) +{ + unsigned long value = 0; + unsigned int doorbel; + struct skhpb_rsp_info *rsp_info; + unsigned long flags; + int ret = 0, retries, lun; + struct skhpb_lu *hpb_lu; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + if (!value) + return count; + + for (retries = 1; retries <= 20; retries++) { + pm_runtime_get_sync(SKHPB_DEV(hpb)); + ufshcd_hold(hpb->hba, false); + doorbel = ufshcd_readl(hpb->hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + ufshcd_release(hpb->hba); + pm_runtime_mark_last_busy(SKHPB_DEV(hpb)); + pm_runtime_put_noidle(SKHPB_DEV(hpb)); + + if (!doorbel) { + pm_runtime_get_sync(SKHPB_DEV(hpb)); + ret = ufshcd_query_flag_retry(hpb->hba, + UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_HPB_RESET, + NULL); + pm_runtime_mark_last_busy(SKHPB_DEV(hpb)); + pm_runtime_put_noidle(SKHPB_DEV(hpb)); + if (!ret) { + SKHPB_DRIVER_I("Query fHPBReset is successfully sent\n"); + break; + } + } + SKHPB_DRIVER_I("fHPBReset failed and will retry[%d] after some time, DOORBELL: 0x%x\n", + retries, doorbel); + msleep(200); + } + if (ret == 0) { + bool fHPBReset = true; + pm_runtime_get_sync(SKHPB_DEV(hpb)); + ufshcd_query_flag_retry(hpb->hba, + UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_RESET, + &fHPBReset); + pm_runtime_mark_last_busy(SKHPB_DEV(hpb)); + pm_runtime_put_noidle(SKHPB_DEV(hpb)); + if (fHPBReset) + SKHPB_DRIVER_E("fHPBReset is still in progress at device, but keep going\n"); + } else { + SKHPB_DRIVER_E("Fail to set fHPBReset flag\n"); + goto out; + } + flush_delayed_work(&hpb->skhpb_pinned_work); + for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) { + hpb_lu = hpb->hba->skhpb_lup[lun]; + if (!hpb_lu || !hpb_lu->lu_hpb_enable) + continue; + + skhpb_stat_init(hpb_lu); + + spin_lock_irqsave(&hpb_lu->rsp_list_lock, flags); + rsp_info = skhpb_get_req_info(hpb_lu); + spin_unlock_irqrestore(&hpb_lu->rsp_list_lock, flags); + if (!rsp_info) + goto out; + rsp_info->type = SKHPB_RSP_HPB_RESET; + SKHPB_RSP_TIME(rsp_info->RSP_start); + spin_lock_irqsave(&hpb_lu->rsp_list_lock, flags); + list_add_tail(&rsp_info->list_rsp_info, &hpb_lu->lh_rsp_info); + spin_unlock_irqrestore(&hpb_lu->rsp_list_lock, flags); + SKHPB_DRIVER_I("Host HPB reset start LU%d - fHPBReset\n", lun); + schedule_work(&hpb_lu->skhpb_rsp_work); + } +out: + return count; +} + +static ssize_t skhpb_sysfs_debug_log_show( + struct skhpb_lu *hpb, char *buf) +{ + int value = skhpb_debug_mask; + + if (value == (SKHPB_LOG_OFF)) { + value = SKHPB_LOG_LEVEL_OFF; + } else if (value == (SKHPB_LOG_ERR)) { + value = SKHPB_LOG_LEVEL_ERR; + } else if (value == (SKHPB_LOG_ERR | SKHPB_LOG_INFO)) { + value = SKHPB_LOG_LEVEL_INFO; + } else if (value == (SKHPB_LOG_ERR | SKHPB_LOG_INFO | SKHPB_LOG_DEBUG)) { + value = SKHPB_LOG_LEVEL_DEBUG; + } else if (value == (SKHPB_LOG_ERR | SKHPB_LOG_INFO | SKHPB_LOG_DEBUG | SKHPB_LOG_HEX)) { + value = SKHPB_LOG_LEVEL_HEX; + } + + return snprintf(buf, PAGE_SIZE, "[0] : LOG_LEVEL_OFF\ + \n[1] : LOG_LEVEL_ERR\ + \n[2] : LOG_LEVEL_INFO\ + \n[3] : LOG_LEVEL_DEBUG\ + \n[4] : LOG_LEVEL_HEX\ + \n-----------------------\ + \nLog-Level = %d\n", value); +} + +static ssize_t skhpb_sysfs_debug_log_store(struct skhpb_lu *hpb, + const char *buf, size_t count) +{ + unsigned long value = 0; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + if (value > SKHPB_LOG_LEVEL_HEX) { + SKHPB_DRIVER_E("Error, Only [0-4] is valid:\ + \n[0] : LOG_LEVEL_OFF\ + \n[1] : LOG_LEVEL_ERR\ + \n[2] : LOG_LEVEL_INFO\ + \n[3] : LOG_LEVEL_DEBUG\ + \n[4] : LOG_LEVEL_HEX\n"); + return -EINVAL; + } + + if (value == SKHPB_LOG_LEVEL_OFF) { + skhpb_debug_mask = SKHPB_LOG_OFF; + } else if (value == SKHPB_LOG_LEVEL_ERR) { + skhpb_debug_mask = SKHPB_LOG_ERR; + } else if (value == SKHPB_LOG_LEVEL_INFO) { + skhpb_debug_mask = SKHPB_LOG_ERR | SKHPB_LOG_INFO; + } else if (value == SKHPB_LOG_LEVEL_DEBUG) { + skhpb_debug_mask = SKHPB_LOG_ERR | SKHPB_LOG_INFO | SKHPB_LOG_DEBUG; + } else if (value == SKHPB_LOG_LEVEL_HEX) { + skhpb_debug_mask = SKHPB_LOG_ERR | SKHPB_LOG_INFO | SKHPB_LOG_DEBUG | SKHPB_LOG_HEX; + } + + return count; +} + +static ssize_t skhpb_sysfs_rsp_time_show(struct skhpb_lu *hpb, char *buf) +{ + return snprintf(buf, PAGE_SIZE, + "map_req_time: %s\n", (debug_map_req ? "Enable":"Disable")); +} + +static ssize_t skhpb_sysfs_rsp_time_store(struct skhpb_lu *hpb, + const char *buf, size_t count) +{ + unsigned long value = 0; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + if (value > 1) + return count; + + debug_map_req = value; + return count; +} + +static struct skhpb_sysfs_entry skhpb_sysfs_entries[] = { + __ATTR(hpb_disable, 0644, + skhpb_sysfs_hpb_disable_show, + skhpb_sysfs_hpb_disable_store), + __ATTR(HPBVersion, 0444, skhpb_sysfs_version_show, NULL), + __ATTR(statistics, 0444, skhpb_sysfs_statistics_show, NULL), + __ATTR(active_count, 0444, skhpb_sysfs_active_count_show, NULL), + __ATTR(add_evict_count, 0444, skhpb_sysfs_add_evict_show, NULL), + __ATTR(count_reset, 0200, NULL, skhpb_sysfs_count_reset_store), + __ATTR(map_req_count, 0444, skhpb_sysfs_map_req_show, NULL), + __ATTR(get_info_from_region, 0200, NULL, + skhpb_sysfs_info_from_region_store), + __ATTR(get_info_from_lba, 0200, NULL, skhpb_sysfs_info_from_lba_store), + __ATTR(hpb_reset, 0200, NULL, skhpb_sysfs_hpb_reset_store), + __ATTR(debug_log, 0644, + skhpb_sysfs_debug_log_show, + skhpb_sysfs_debug_log_store), + __ATTR(response_time, 0644, + skhpb_sysfs_rsp_time_show, + skhpb_sysfs_rsp_time_store), + __ATTR_NULL +}; + +static ssize_t skhpb_attr_show(struct kobject *kobj, + struct attribute *attr, char *page) +{ + struct skhpb_sysfs_entry *entry; + struct skhpb_lu *hpb; + ssize_t error; + + entry = container_of(attr, + struct skhpb_sysfs_entry, attr); + hpb = container_of(kobj, struct skhpb_lu, kobj); + + if (!entry->show) + return -EIO; + + mutex_lock(&hpb->sysfs_lock); + error = entry->show(hpb, page); + mutex_unlock(&hpb->sysfs_lock); + return error; +} + +static ssize_t skhpb_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *page, size_t length) +{ + struct skhpb_sysfs_entry *entry; + struct skhpb_lu *hpb; + ssize_t error; + + entry = container_of(attr, struct skhpb_sysfs_entry, attr); + hpb = container_of(kobj, struct skhpb_lu, kobj); + + if (!entry->store) + return -EIO; + + mutex_lock(&hpb->sysfs_lock); + error = entry->store(hpb, page, length); + mutex_unlock(&hpb->sysfs_lock); + return error; +} + +static const struct sysfs_ops skhpb_sysfs_ops = { + .show = skhpb_attr_show, + .store = skhpb_attr_store, +}; + +static struct kobj_type skhpb_ktype = { + .sysfs_ops = &skhpb_sysfs_ops, + .release = NULL, +}; + +static int skhpb_create_sysfs(struct ufs_hba *hba, + struct skhpb_lu *hpb) +{ + struct device *dev = hba->dev; + struct skhpb_sysfs_entry *entry; + int err; + + hpb->sysfs_entries = skhpb_sysfs_entries; + + skhpb_stat_init(hpb); + + kobject_init(&hpb->kobj, &skhpb_ktype); + mutex_init(&hpb->sysfs_lock); + + err = kobject_add(&hpb->kobj, kobject_get(&dev->kobj), + "ufshpb_lu%d", hpb->lun); + if (!err) { + for (entry = hpb->sysfs_entries; + entry->attr.name != NULL ; entry++) { + if (sysfs_create_file(&hpb->kobj, &entry->attr)) + break; + } + kobject_uevent(&hpb->kobj, KOBJ_ADD); + } + return err; +} diff --git a/drivers/scsi/ufs/ufshpb_skh.h b/drivers/scsi/ufs/ufshpb_skh.h new file mode 100644 index 000000000000..fd3a617e708f --- /dev/null +++ b/drivers/scsi/ufs/ufshpb_skh.h @@ -0,0 +1,482 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd. + * Modified work Copyright (C) 2018, Google, Inc. + * Modified work Copyright (C) 2019 SK hynix + */ + +#ifndef _SKHPB_H_ +#define _SKHPB_H_ + +#include +#include +#include + +/* Version info*/ +#define SKHPB_DD_VER 0x010506 + +/* QUIRKs */ +/* Use READ16 instead of HPB_READ command, + * This is workaround solution to countmeasure QCT ICE issue. */ +#define SKHPB_QUIRK_USE_READ_16_FOR_ENCRYPTION (1 << 0) + +/* This quirk makes HPB driver always works as Devie Control Mode. + * To cover old Configuration descriptor format which interpret + * the bHPBControl field as RESERVED. */ +#define SKHPB_QUIRK_ALWAYS_DEVICE_CONTROL_MODE (1 << 1) + +/* Discard SubRegion activation hint information that has been processed, + * when the host enters RPM/SPM sleep. + * Must not be set the bit in ufs_quirks.h.*/ +#define SKHPB_QUIRK_PURGE_HINT_INFO_WHEN_SLEEP (1 << 20) + +/* Constant value*/ +#define SKHPB_SECTOR 512 +#define SKHPB_BLOCK 4096 +#define SKHPB_SECTORS_PER_BLOCK (SKHPB_BLOCK / SKHPB_SECTOR) +#define SKHPB_BITS_PER_DWORD 32 +#define SKHPB_MAX_ACTIVE_NUM 2 +#define SKHPB_MAX_INACTIVE_NUM 2 + +#define SKHPB_ENTRY_SIZE 0x08 +#define SKHPB_ENTREIS_PER_OS_PAGE (PAGE_SIZE / SKHPB_ENTRY_SIZE) + +/* Description */ +#define SKHPB_UFS_FEATURE_SUPPORT_HPB_BIT 0x80 + +#define SKHPB_QUERY_DESC_DEVICE_MAX_SIZE 0x43 +#define SKHPB_QUERY_DESC_CONFIGURAION_MAX_SIZE 0xE6 +#define SKHPB_QUERY_DESC_UNIT_MAX_SIZE 0x29 +#define SKHPB_QUERY_DESC_GEOMETRY_MAX_SIZE 0x4D + +/* Configuration for HPB */ +#define SKHPB_CONF_LU_ENABLE 0x00 +#define SKHPB_CONF_ACTIVE_REGIONS 0x10 +#define SKHPB_CONF_PINNED_START 0x12 +#define SKHPB_CONF_PINNED_NUM 0x14 + +/* Parameter Macros */ +#define SKHPB_DEV(h) ((h)->hba->dev) +#define SKHPB_MAX_BVEC_SIZE 128 + +/* Use for HPB activate */ +#define SKHPB_CONFIG_LEN 0xd0 + +#define SKHPB_READ_LARGE_CHUNK_SUPPORT +#define SKHPB_READ_LARGE_CHUNK_MAX_BLOCK_COUNT (128) //TRANSFER LENGTH: 8bit + +typedef u64 skhpb_t; + +enum skhpb_lu_set { + LU_DISABLE = 0x00, + LU_ENABLE = 0x01, + LU_HPB_ENABLE = 0x02, + LU_SET_MAX, +}; + +struct skhpb_config_desc { + unsigned char conf_dev_desc[16]; + unsigned char unit[UFS_UPIU_MAX_GENERAL_LUN][24]; +}; + +/* Response UPIU types */ +#define SKHPB_RSP_NONE 0x00 +#define SKHPB_RSP_REQ_REGION_UPDATE 0x01 +#define SKHPB_RSP_HPB_RESET 0x02 +#define SKHPB_PER_ACTIVE_INFO_BYTES 4 +#define SKHPB_PER_INACTIVE_INFO_BYTES 2 + +/* Vender defined OPCODE */ +#define SKHPB_READ 0xF8 +#define SKHPB_READ_BUFFER 0xF9 +#define SKHPB_WRITE_BUFFER 0xFA + +#define SKHPB_DEV_DATA_SEG_LEN 0x14 +#define SKHPB_DEV_SENSE_SEG_LEN 0x12 +#define SKHPB_DEV_DES_TYPE 0x80 +#define SKHPB_DEV_ADDITIONAL_LEN 0x10 + +/* BYTE SHIFT */ +#define SKHPB_ZERO_BYTE_SHIFT 0 +#define SKHPB_ONE_BYTE_SHIFT 8 +#define SKHPB_TWO_BYTE_SHIFT 16 +#define SKHPB_THREE_BYTE_SHIFT 24 + +#define SKHPB_SHIFT_BYTE_0(num) ((num) << SKHPB_ZERO_BYTE_SHIFT) +#define SKHPB_SHIFT_BYTE_1(num) ((num) << SKHPB_ONE_BYTE_SHIFT) + +#define SKHPB_GET_BYTE_0(num) (((num) >> SKHPB_ZERO_BYTE_SHIFT) & 0xff) +#define SKHPB_GET_BYTE_1(num) (((num) >> SKHPB_ONE_BYTE_SHIFT) & 0xff) +#define SKHPB_GET_BYTE_2(num) (((num) >> SKHPB_TWO_BYTE_SHIFT) & 0xff) +#define SKHPB_GET_BYTE_3(num) (((num) >> SKHPB_THREE_BYTE_SHIFT) & 0xff) + +#define REGION_UNIT_SIZE(bit_offset) (0x01 << (bit_offset)) + +enum SKHPB_STATE { + SKHPB_PRESENT = 1, + SKHPB_NOT_SUPPORTED = -1, + SKHPB_FAILED = -2, + SKHPB_NEED_INIT = 0, + SKHPB_RESET = -3, +}; + +enum SKHPB_BUFFER_MODE { + R_BUFFER = 0, + W_BUFFER = 1, +}; + +enum SKHPB_CMD { + SKHPB_CMD_READ = 0, + SKHPB_CMD_WRITE = 1, + SKHPB_CMD_DISCARD = 2, + SKHPB_CMD_OTHERS = 3, +}; + +enum SKHPB_REGION_STATE { + SKHPB_REGION_INACTIVE, + SKHPB_REGION_ACTIVE, +}; + +enum SKHPB_SUBREGION_STATE { + SKHPB_SUBREGION_UNUSED, + SKHPB_SUBREGION_DIRTY, + SKHPB_SUBREGION_CLEAN, + SKHPB_SUBREGION_ISSUED, +}; + +enum SKHPB_CONTROL_MODE { + HOST_CTRL_MODE = 0, + DEV_CTRL_MODE = 1, +}; + +enum SKHPB_RST_TIME { + SKHPB_MAP_RSP_DISABLE = 0, + SKHPB_MAP_RSP_ENABLE = 1, +}; + +struct skhpb_func_desc { + /*** Device Descriptor ***/ + /* 06h bNumberLU */ + int lu_cnt; + /* 10h wSpecVersion */ + u16 spec_ver; + /* 40h HPB Version */ + u16 hpb_ver; + /* 42h HPB control mode */ + u8 hpb_control_mode; + + /*** Geometry Descriptor ***/ + /* 48h bHPBRegionSize (UNIT: 512KB) */ + u8 hpb_region_size; + /* 49h bHPBNumberLU */ + u8 hpb_number_lu; + /* 4Ah bHPBSubRegionSize */ + u8 hpb_subregion_size; + /* 4B:4Ch wDeviceMaxActiveHPBRegions */ + u16 hpb_device_max_active_regions; +}; + +struct skhpb_lu_desc { + /*** Unit Descriptor ****/ + /* 03h bLUEnable */ + int lu_enable; + /* 06h lu queue depth info*/ + int lu_queue_depth; + /* 0Ah bLogicalBlockSize. default 0x0C = 4KB */ + int lu_logblk_size; + /* 0Bh qLogicalBlockCount. same as the read_capacity ret val. */ + u64 lu_logblk_cnt; + + /* 23h:24h wLUMaxActiveHPBRegions */ + u16 lu_max_active_hpb_regions; + /* 25h:26h wHPBPinnedRegionStartIdx */ + u16 hpb_pinned_region_startidx; + /* 27h:28h wNumHPBPinnedRegions */ + u16 lu_num_hpb_pinned_regions; + + + /* if 03h value is 02h, hpb_enable is set. */ + bool lu_hpb_enable; + + int lu_hpb_pinned_end_offset; +}; + +struct skhpb_rsp_active_list { + u16 region[SKHPB_MAX_ACTIVE_NUM]; + u16 subregion[SKHPB_MAX_ACTIVE_NUM]; +}; + +struct skhpb_rsp_inactive_list { + u16 region[SKHPB_MAX_INACTIVE_NUM]; +}; + +struct skhpb_rsp_update_entry { + unsigned int lpn; + skhpb_t ppn; +}; + +struct skhpb_rsp_info { + int type; + int active_cnt; + int inactive_cnt; + struct skhpb_rsp_active_list active_list; + struct skhpb_rsp_inactive_list inactive_list; + + __u64 RSP_start; + __u64 RSP_tasklet_enter; + + struct list_head list_rsp_info; +}; + +struct skhpb_rsp_field { + u8 sense_data_len[2]; + u8 desc_type; + u8 additional_len; + u8 hpb_type; + u8 lun; + u8 active_region_cnt; + u8 inactive_region_cnt; + u8 hpb_active_field[8]; + u8 hpb_inactive_field[4]; +}; + +struct skhpb_map_ctx { + struct page **m_page; + unsigned int *ppn_dirty; + + struct list_head list_table; +}; + +struct skhpb_subregion { + struct skhpb_map_ctx *mctx; + enum SKHPB_SUBREGION_STATE subregion_state; + int region; + int subregion; + bool last; + struct list_head list_subregion; +}; + +struct skhpb_region { + struct skhpb_subregion *subregion_tbl; + enum SKHPB_REGION_STATE region_state; + bool is_pinned; + int region; + int subregion_count; + /*below information is used by lru*/ + struct list_head list_region; + int hit_count; +}; + +struct skhpb_map_req { + struct skhpb_lu *hpb; + struct skhpb_map_ctx *mctx; + struct bio bio; + struct bio *pbio; + struct bio_vec bvec[SKHPB_MAX_BVEC_SIZE]; + void (*end_io)(struct request *rq, int err); + void *end_io_data; + int region; + int subregion; + int subregion_mem_size; + int lun; + int retry_cnt; + + /* for debug : RSP Profiling */ + __u64 RSP_start; // get the request from device + __u64 RSP_issue; // issue scsi cmd + __u64 RSP_end; // complete the request + + char sense[SCSI_SENSE_BUFFERSIZE]; + + struct list_head list_map_req; + int rwbuffer_flag; +}; + +enum SKHPB_SELECTION_TYPE { + TYPE_LRU = 1, + TYPE_LFU = 2, +}; + +struct skhpb_victim_select_info { + int selection_type; + struct list_head lru; + int max_lru_active_count; // supported hpb #region - pinned #region + atomic64_t active_count; +}; + +struct skhpb_lu { + struct skhpb_region *region_tbl; + struct skhpb_rsp_info *rsp_info; + struct skhpb_map_req *map_req; + + struct list_head lh_map_ctx; + struct list_head lh_subregion_req; + struct list_head lh_rsp_info; + + struct list_head lh_rsp_info_free; + struct list_head lh_map_req_free; + struct list_head lh_map_req_retry; + int debug_free_table; + + bool lu_hpb_enable; + + struct delayed_work skhpb_pinned_work; + struct delayed_work skhpb_map_req_retry_work; + struct work_struct skhpb_rsp_work; + struct bio_vec bvec[SKHPB_MAX_BVEC_SIZE]; + + int subregions_per_lu; + int regions_per_lu; + int subregion_mem_size; + int last_subregion_mem_size; + + /* for selecting victim */ + struct skhpb_victim_select_info lru_info; + + int hpb_ver; + int lu_max_active_regions; + + int entries_per_subregion; + int entries_per_subregion_shift; + int entries_per_subregion_mask; + + int entries_per_region_shift; + int entries_per_region_mask; + int subregions_per_region; + + int dwords_per_subregion; + unsigned long long subregion_unit_size; + bool identical_size; + +#define BITS_PER_PPN_DIRTY (BITS_PER_BYTE * sizeof(unsigned int)) + int ppn_dirties_per_subregion; + + int mpage_bytes; + int mpages_per_subregion; + + /* for debug constant variables */ + unsigned long long lu_num_blocks; + + u8 lun; + + struct ufs_hba *hba; + + spinlock_t hpb_lock; + spinlock_t rsp_list_lock; + spinlock_t map_list_lock; + + struct kobject kobj; + struct mutex sysfs_lock; + struct skhpb_sysfs_entry *sysfs_entries; + + bool hpb_control_mode; + + /* for debug */ + bool force_hpb_read_disable; + bool force_map_req_disable; + bool read_buf_debug; + atomic64_t hit; + atomic64_t size_miss; + atomic64_t region_miss; + atomic64_t subregion_miss; + atomic64_t entry_dirty_miss; + atomic64_t rb_noti_cnt; + atomic64_t rb_fail; + atomic64_t reset_noti_cnt; + atomic64_t w_map_req_cnt; +#if defined(SKHPB_READ_LARGE_CHUNK_SUPPORT) + atomic64_t lc_entry_dirty_miss; + atomic64_t lc_reg_subreg_miss; + atomic64_t lc_hit; +#endif + atomic64_t map_req_cnt; + atomic64_t region_add; + atomic64_t region_evict; + atomic64_t canceled_resp; + atomic64_t canceled_map_req; + atomic64_t alloc_map_req_cnt; +}; + +struct skhpb_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct skhpb_lu *hpb, char *buf); + ssize_t (*store)(struct skhpb_lu *hpb, const char *, size_t); +}; + +struct ufshcd_lrb; + +void ufshcd_init_hpb(struct ufs_hba *hba); +void skhpb_init_handler(struct work_struct *work); +void skhpb_prep_fn(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +void skhpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +void skhpb_suspend(struct ufs_hba *hba); +void skhpb_resume(struct ufs_hba *hba); +void skhpb_release(struct ufs_hba *hba, int state); +int skhpb_issue_req_dev_ctx(struct skhpb_lu *hpb, unsigned char *buf, + int buf_length); +int skhpb_control_validation(struct ufs_hba *hba, + struct skhpb_config_desc *config); + +extern u32 skhpb_debug_mask; +extern int debug_map_req; +enum SKHPB_LOG_LEVEL { + SKHPB_LOG_LEVEL_OFF = 0, + SKHPB_LOG_LEVEL_ERR = 1, + SKHPB_LOG_LEVEL_INFO = 2, + SKHPB_LOG_LEVEL_DEBUG = 3, + SKHPB_LOG_LEVEL_HEX = 4, +}; +enum SKHPB_LOG_MASK { + SKHPB_LOG_OFF = SKHPB_LOG_LEVEL_OFF, /* 0 */ + SKHPB_LOG_ERR = (1U << SKHPB_LOG_LEVEL_ERR), /* 2 */ + SKHPB_LOG_INFO = (1U << SKHPB_LOG_LEVEL_INFO), /* 4 */ + SKHPB_LOG_DEBUG = (1U << SKHPB_LOG_LEVEL_DEBUG), /* 8 */ + SKHPB_LOG_HEX = (1U << SKHPB_LOG_LEVEL_HEX), /* 16 */ +}; +#define SKHPB_DRIVER_E(fmt, args...) \ + do { \ + if (likely(skhpb_debug_mask & SKHPB_LOG_ERR)) \ + pr_err("[HPB E][%s:%d] " fmt, __func__, __LINE__, ##args); \ + } while (0) + +#define SKHPB_DRIVER_I(fmt, args...) \ + do { \ + if (unlikely(skhpb_debug_mask & SKHPB_LOG_INFO)) \ + pr_err("[HPB][%s:%d] " fmt, __func__, __LINE__, ##args); \ + } while (0) + +#define SKHPB_DRIVER_D(fmt, args...) \ + do { \ + if (unlikely(skhpb_debug_mask & SKHPB_LOG_DEBUG)) \ + printk(KERN_DEBUG "[HPB][%s:%d] " fmt, __func__, __LINE__, ##args); \ + } while (0) + +#define SKHPB_DRIVER_HEXDUMP(fmt, args...) \ + do { \ + if (unlikely(skhpb_debug_mask & SKHPB_LOG_HEX)) { \ + print_hex_dump(KERN_DEBUG, fmt, DUMP_PREFIX_ADDRESS, ##args); \ + } \ + } while (0) + +#define SKHPB_MAP_REQ_TIME(map_req, val, print) \ + do { \ + if (unlikely(debug_map_req)) { \ + val = ktime_to_us(ktime_get()); \ + if (print) { \ + SKHPB_DRIVER_I("SKHPB COMPL BUFFER %d - %d\n", \ + map_req->region, map_req->subregion); \ + SKHPB_DRIVER_I("start~issue = %lluus, issue~end = %lluus\n", \ + map_req->RSP_issue - map_req->RSP_start, \ + map_req->RSP_end - map_req->RSP_issue); \ + } \ + } \ + } while (0) + +#define SKHPB_RSP_TIME(val) \ + do { \ + if (unlikely(debug_map_req)) { \ + val = ktime_to_us(ktime_get()); \ + } \ + } while (0) + +#endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufstw.c b/drivers/scsi/ufs/ufstw.c new file mode 100644 index 000000000000..2e00455348c0 --- /dev/null +++ b/drivers/scsi/ufs/ufstw.c @@ -0,0 +1,1714 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd. + */ + + +#include "ufshcd.h" +#include "ufstw.h" +#include "ufs_quirks.h" + +#define UFS_MTK_TW_AWAYS_ON + +static int ufstw_create_sysfs(struct ufsf_feature *ufsf, struct ufstw_lu *tw); +static int ufstw_clear_lu_flag(struct ufstw_lu *tw, u8 idn, bool *flag_res); +static int ufstw_read_lu_attr(struct ufstw_lu *tw, u8 idn, u32 *attr_val); + +static inline void ufstw_lu_get(struct ufstw_lu *tw) +{ + kref_get(&tw->ufsf->tw_kref); +} + +static inline void ufstw_lu_put(struct ufstw_lu *tw) +{ + kref_put(&tw->ufsf->tw_kref, ufstw_release); +} + +static inline bool ufstw_is_write_lrbp(struct ufshcd_lrb *lrbp) +{ + if (lrbp->cmd->cmnd[0] == WRITE_10 || lrbp->cmd->cmnd[0] == WRITE_16) + return true; + + return false; +} + +static int ufstw_switch_mode(struct ufstw_lu *tw, int tw_mode) +{ + int ret = 0; + + atomic_set(&tw->tw_mode, tw_mode); + if (tw->tw_enable) + ret = ufstw_clear_lu_flag(tw, QUERY_FLAG_IDN_TW_EN, + &tw->tw_enable); + return ret; +} + +static void ufstw_switch_disable_mode(struct ufstw_lu *tw) +{ + WARNING_MSG("dTurboWriteBUfferLifeTImeEst 0x%X", tw->tw_lifetime_est); + WARNING_MSG("tw-mode will change to disable-mode.."); + + mutex_lock(&tw->mode_lock); + ufstw_switch_mode(tw, TW_MODE_DISABLED); + mutex_unlock(&tw->mode_lock); +} + +static void ufstw_lifetime_work_fn(struct work_struct *work) +{ + struct ufstw_lu *tw; + + tw = container_of(work, struct ufstw_lu, tw_lifetime_work); + + ufstw_lu_get(tw); + + if (atomic_read(&tw->ufsf->tw_state) != TW_PRESENT) { + INFO_MSG("tw_state != TW_PRESENT (%d)", + atomic_read(&tw->ufsf->tw_state)); + goto out; + } + + if (ufstw_read_lu_attr(tw, QUERY_ATTR_IDN_TW_BUF_LIFETIME_EST, + &tw->tw_lifetime_est)) + goto out; + +#if defined(CONFIG_UFSTW_IGNORE_GUARANTEE_BIT) + if (tw->tw_lifetime_est & MASK_UFSTW_LIFETIME_NOT_GUARANTEE) { + WARNING_MSG("warn: lun %d - dTurboWriteBufferLifeTimeEst[31] == 1", tw->lun); + WARNING_MSG("Device not guarantee the lifetime of Turbo Write Buffer"); + WARNING_MSG("but we will ignore them for PoC"); + } +#else + if (tw->tw_lifetime_est & MASK_UFSTW_LIFETIME_NOT_GUARANTEE) { + WARNING_MSG("warn: lun %d - dTurboWriteBufferLifeTimeEst[31] == 1", tw->lun); + WARNING_MSG("Device not guarantee the lifetime of Turbo Write Buffer"); + WARNING_MSG("So tw_mode change to disable_mode"); + goto tw_disable; + } +#endif + if ((tw->tw_lifetime_est & ~MASK_UFSTW_LIFETIME_NOT_GUARANTEE) + < UFSTW_MAX_LIFETIME_VALUE) + goto out; + else + goto tw_disable; +tw_disable: + ufstw_switch_disable_mode(tw); +out: + ufstw_lu_put(tw); +} + +void ufstw_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp) +{ + struct ufstw_lu *tw; + + if (!lrbp || !ufsf_is_valid_lun(lrbp->lun)) + return; + + if (!ufstw_is_write_lrbp(lrbp)) + return; + + tw = ufsf->tw_lup[lrbp->lun]; + if (!tw) + return; + + if (atomic_read(&tw->tw_mode) == TW_MODE_DISABLED) + return; + + if (!tw->tw_enable) + return; + + spin_lock_bh(&tw->lifetime_lock); + tw->stat_write_sec += blk_rq_sectors(lrbp->cmd->request); + + if (tw->stat_write_sec > UFSTW_LIFETIME_SECT) { + tw->stat_write_sec = 0; + spin_unlock_bh(&tw->lifetime_lock); + schedule_work(&tw->tw_lifetime_work); + return; + } + + blk_add_trace_msg(tw->ufsf->sdev_ufs_lu[tw->lun]->request_queue, + "%s:%d tw_lifetime_work %u", + __func__, __LINE__, tw->stat_write_sec); + spin_unlock_bh(&tw->lifetime_lock); +} + +static u8 ufstw_get_query_idx(struct ufstw_lu *tw) +{ + u8 idx; + + /* Share buffer type only use idx 0 */ + if (tw->ufsf->tw_dev_info.tw_buf_type == WB_SINGLE_SHARE_BUFFER_TYPE) + idx = 0; + else + idx = (u8)tw->lun; + + return idx; +} + +static int ufstw_read_lu_attr(struct ufstw_lu *tw, u8 idn, u32 *attr_val) +{ + struct ufs_hba *hba = tw->ufsf->hba; + int err; + u32 val; + u8 idx; + + pm_runtime_get_sync(hba->dev); + + ufstw_lu_get(tw); + + idx = ufstw_get_query_idx(tw); + err = ufsf_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, idn, + idx, &val); + if (err) { + ERR_MSG("read attr [0x%.2X] failed...err %d", idn, err); + ufstw_lu_put(tw); + pm_runtime_put_sync(hba->dev); + return err; + } + + *attr_val = val; + + blk_add_trace_msg(tw->ufsf->sdev_ufs_lu[tw->lun]->request_queue, + "%s:%d IDN %s (%d)", __func__, __LINE__, + idn == QUERY_ATTR_IDN_TW_FLUSH_STATUS ? "TW_FLUSH_STATUS" : + idn == QUERY_ATTR_IDN_TW_BUF_SIZE ? "TW_BUF_SIZE" : + idn == QUERY_ATTR_IDN_TW_BUF_LIFETIME_EST ? "TW_BUF_LIFETIME_EST" : + "UNKNOWN", idn); + + TW_DEBUG(tw->ufsf, "tw_attr LUN(%d) [0x%.2X] %u", tw->lun, idn, + *attr_val); + + ufstw_lu_put(tw); + pm_runtime_put_sync(hba->dev); + + return 0; +} + +static int ufstw_set_lu_flag(struct ufstw_lu *tw, u8 idn, bool *flag_res) +{ + struct ufs_hba *hba = tw->ufsf->hba; + int err; + u8 idx; + + pm_runtime_get_sync(hba->dev); + ufstw_lu_get(tw); + + idx = ufstw_get_query_idx(tw); + err = ufsf_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, idn, + idx, NULL); + if (err) { + ERR_MSG("set flag [0x%.2X] failed...err %d", idn, err); + ufstw_lu_put(tw); + pm_runtime_put_sync(hba->dev); + return err; + } + + *flag_res = true; + blk_add_trace_msg(tw->ufsf->sdev_ufs_lu[tw->lun]->request_queue, + "%s:%d IDN %s (%d)", __func__, __LINE__, + idn == QUERY_FLAG_IDN_TW_EN ? "TW_EN" : + idn == QUERY_FLAG_IDN_TW_BUF_FLUSH_EN ? "FLUSH_EN" : + idn == QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN ? + "HIBERN_EN" : "UNKNOWN", idn); + + TW_DEBUG(tw->ufsf, "tw_flag LUN(%d) [0x%.2X] %u", tw->lun, idn, + *flag_res); + + ufstw_lu_put(tw); + pm_runtime_put_sync(hba->dev); + + return 0; +} + +static int ufstw_clear_lu_flag(struct ufstw_lu *tw, u8 idn, bool *flag_res) +{ + struct ufs_hba *hba = tw->ufsf->hba; + int err; + u8 idx; + + pm_runtime_get_sync(hba->dev); + ufstw_lu_get(tw); + + idx = ufstw_get_query_idx(tw); + err = ufsf_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, idn, + idx, NULL); + if (err) { + ERR_MSG("clear flag [0x%.2X] failed...err%d", idn, err); + ufstw_lu_put(tw); + pm_runtime_put_sync(hba->dev); + return err; + } + + *flag_res = false; + + blk_add_trace_msg(tw->ufsf->sdev_ufs_lu[tw->lun]->request_queue, + "%s:%d IDN %s (%d)", __func__, __LINE__, + idn == QUERY_FLAG_IDN_TW_EN ? "TW_EN" : + idn == QUERY_FLAG_IDN_TW_BUF_FLUSH_EN ? "FLUSH_EN" : + idn == QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN ? "HIBERN_EN" : + "UNKNOWN", idn); + + TW_DEBUG(tw->ufsf, "tw_flag LUN(%d) [0x%.2X] %u", tw->lun, idn, + *flag_res); + + ufstw_lu_put(tw); + pm_runtime_put_sync(hba->dev); + return 0; +} + +static inline int ufstw_read_lu_flag(struct ufstw_lu *tw, u8 idn, + bool *flag_res) +{ + struct ufs_hba *hba = tw->ufsf->hba; + int err; + bool val = 0; + u8 idx; + + pm_runtime_get_sync(hba->dev); + ufstw_lu_get(tw); + + idx = ufstw_get_query_idx(tw); + err = ufsf_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, idn, + idx, &val); + if (err) { + ERR_MSG("read flag [0x%.2X] failed...err%d", idn, err); + ufstw_lu_put(tw); + pm_runtime_put_sync(hba->dev); + return err; + } + + *flag_res = val; + + TW_DEBUG(tw->ufsf, "tw_flag LUN(%d) [0x%.2X] %u", tw->lun, idn, + *flag_res); + + ufstw_lu_put(tw); + pm_runtime_put_sync(hba->dev); + return 0; + +} + +/* device level (ufsf) */ +static int ufstw_auto_ee(struct ufsf_feature *ufsf) +{ + struct ufs_hba *hba = ufsf->hba; + u16 mask = MASK_EE_TW; + u32 val; + int err = 0; + + pm_runtime_get_sync(hba->dev); + + if (hba->ee_ctrl_mask & mask) + goto out; + + val = hba->ee_ctrl_mask | mask; + val &= 0xFFFF; /* 2 bytes */ + err = ufsf_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_EE_CONTROL, 0, &val); + if (err) { + ERR_MSG("failed to enable exception event err%d", err); + goto out; + } + + hba->ee_ctrl_mask |= mask; + ufsf->tw_ee_mode = true; + + TW_DEBUG(ufsf, "turbo_write_exception_event_enable"); +out: + pm_runtime_put_sync(hba->dev); + return err; +} + +/* device level (ufsf) */ +static int ufstw_disable_ee(struct ufsf_feature *ufsf) +{ + struct ufs_hba *hba = ufsf->hba; + u16 mask = MASK_EE_TW; + int err = 0; + u32 val; + + pm_runtime_get_sync(hba->dev); + + if (!(hba->ee_ctrl_mask & mask)) + goto out; + + val = hba->ee_ctrl_mask & ~mask; + val &= 0xFFFF; /* 2 bytes */ + err = ufsf_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_EE_CONTROL, 0, &val); + if (err) { + ERR_MSG("failed to disable exception event err%d", err); + goto out; + } + + hba->ee_ctrl_mask &= ~mask; + ufsf->tw_ee_mode = false; + + TW_DEBUG(ufsf, "turbo_write_exeception_event_disable"); +out: + pm_runtime_put_sync(hba->dev); + return err; +} + +static void ufstw_flush_work_fn(struct work_struct *dwork) +{ + struct ufs_hba *hba; + struct ufstw_lu *tw; + bool need_resched = false; + + tw = container_of(dwork, struct ufstw_lu, tw_flush_work.work); + + TW_DEBUG(tw->ufsf, "start flush worker"); + + ufstw_lu_get(tw); + if (atomic_read(&tw->ufsf->tw_state) != TW_PRESENT) { + ERR_MSG("tw_state != TW_PRESENT (%d)", + atomic_read(&tw->ufsf->tw_state)); + ufstw_lu_put(tw); + return; + } + + hba = tw->ufsf->hba; + if (tw->next_q && time_before(jiffies, tw->next_q)) { + if (schedule_delayed_work(&tw->tw_flush_work, + tw->next_q - jiffies)) + pm_runtime_get_noresume(hba->dev); + ufstw_lu_put(tw); + return; + } + + pm_runtime_get_sync(hba->dev); + if (ufstw_read_lu_attr(tw, QUERY_ATTR_IDN_TW_BUF_SIZE, + &tw->tw_available_buffer_size)) + goto error_put; + + mutex_lock(&tw->flush_lock); + + if (tw->tw_flush_during_hibern_enter && + tw->tw_available_buffer_size >= tw->flush_th_max) { + TW_DEBUG(tw->ufsf, "flush_disable QR (%d, %d)", + tw->lun, tw->tw_available_buffer_size); + + if (ufstw_clear_lu_flag(tw, + QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN, + &tw->tw_flush_during_hibern_enter)) + goto error_unlock; + tw->next_q = 0; + need_resched = false; + } else if (tw->tw_available_buffer_size < tw->flush_th_max) { + if (tw->tw_flush_during_hibern_enter) { + need_resched = true; + } else if (tw->tw_available_buffer_size < tw->flush_th_min) { + TW_DEBUG(tw->ufsf, "flush_enable QR (%d, %d)", + tw->lun, tw->tw_available_buffer_size); + if (ufstw_set_lu_flag(tw, + QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN, + &tw->tw_flush_during_hibern_enter)) + goto error_unlock; + need_resched = true; + } else { + need_resched = false; + } + } + mutex_unlock(&tw->flush_lock); + + pm_runtime_put_noidle(hba->dev); + pm_runtime_put(hba->dev); + + if (need_resched) { + tw->next_q = + jiffies + msecs_to_jiffies(UFSTW_FLUSH_CHECK_PERIOD_MS); + if (schedule_delayed_work(&tw->tw_flush_work, + msecs_to_jiffies(UFSTW_FLUSH_CHECK_PERIOD_MS))) + pm_runtime_get_noresume(hba->dev); + } + ufstw_lu_put(tw); + return; +error_unlock: + mutex_unlock(&tw->flush_lock); +error_put: + pm_runtime_put_noidle(hba->dev); + pm_runtime_put(hba->dev); + + if (tw->next_q) { + tw->next_q = + jiffies + msecs_to_jiffies(UFSTW_FLUSH_CHECK_PERIOD_MS); + if (schedule_delayed_work(&tw->tw_flush_work, + msecs_to_jiffies(UFSTW_FLUSH_CHECK_PERIOD_MS))) + pm_runtime_get_noresume(hba->dev); + } + ufstw_lu_put(tw); +} + +void ufstw_error_handler(struct ufsf_feature *ufsf) +{ + ERR_MSG("tw_state : %d -> %d", atomic_read(&ufsf->tw_state), TW_FAILED); + atomic_set(&ufsf->tw_state, TW_FAILED); + dump_stack(); + kref_put(&ufsf->tw_kref, ufstw_release); +} + +void ufstw_ee_handler(struct ufsf_feature *ufsf) +{ + struct ufs_hba *hba; + int lun; + + hba = ufsf->hba; + + if (ufsf->tw_debug) + atomic64_inc(&ufsf->tw_debug_ee_count); + + seq_scan_lu(lun) { + if (!ufsf->tw_lup[lun]) + continue; + + if (!ufsf->sdev_ufs_lu[lun]) { + WARNING_MSG("warn: lun %d don't have scsi_device", lun); + continue; + } + + ufstw_lu_get(ufsf->tw_lup[lun]); + if (!delayed_work_pending(&ufsf->tw_lup[lun]->tw_flush_work)) { + ufsf->tw_lup[lun]->next_q = jiffies; + if (schedule_delayed_work(&ufsf->tw_lup[lun]->tw_flush_work, + msecs_to_jiffies(0))) + pm_runtime_get_noresume(hba->dev); + } + ufstw_lu_put(ufsf->tw_lup[lun]); + } +} + +static void ufstw_flush_h8_work_fn(struct work_struct *dwork) +{ + struct ufs_hba *hba; + struct ufstw_lu *tw; + + tw = container_of(dwork, struct ufstw_lu, tw_flush_h8_work.work); + hba = tw->ufsf->hba; + + /* Exit runtime suspend and do flush in hibern 1 sec */ + pm_runtime_get_sync(hba->dev); + msleep(1000); + + /* Check again, if still need flush reschedule itself */ + if (ufstw_read_lu_attr(tw, QUERY_ATTR_IDN_TW_BUF_SIZE, + &tw->tw_available_buffer_size)) { + ERR_MSG("ERROR: get available tw buffer size error"); + goto out; + } + if (tw->tw_available_buffer_size < tw->flush_th_max) + schedule_delayed_work(&tw->tw_flush_h8_work, 0); + +out: + pm_runtime_put_sync(hba->dev); +} + +bool ufstw_need_flush(struct ufsf_feature *ufsf) +{ + struct ufs_hba *hba = ufsf->hba; + struct ufstw_lu *tw; + bool need_flush = false; + u8 idx; + + if (!ufsf->tw_lup[2]) + goto out; + + if (!ufsf->sdev_ufs_lu[2]) { + WARNING_MSG("warn: lun 2 don't have scsi_device"); + goto out; + } + + tw = ufsf->tw_lup[2]; + + if (atomic_read(&tw->tw_mode) == TW_MODE_DISABLED) + goto out; + + if (!tw->tw_enable) + goto out; + + if (atomic_read(&tw->ufsf->tw_state) != TW_PRESENT) + goto out; + + ufstw_lu_get(tw); + + /* + * No need check again, let ufstw_flush_h8_work_fn finish is enough. + * Only return need_flush to break runtime/system suspend. + */ + if (work_busy(&tw->tw_flush_h8_work.work)) { + need_flush = true; + goto out_put; + } + + idx = ufstw_get_query_idx(tw); + if (ufsf_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_TW_BUF_SIZE, + idx, + &tw->tw_available_buffer_size)) { + ERR_MSG("ERROR: get available tw buffer size error"); + goto out_put; + } + + /* No need flush */ + if (tw->tw_available_buffer_size >= tw->flush_th_max) + goto out_put; + + /* Need flush, check device flush method */ + if (hba->dev_quirks & UFS_DEVICE_QUIRK_WRITE_BOOSETER_FLUSH) { + /* Toshiba device recover WB by toggle fWriteBoosterEn */ + if (ufsf_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, + QUERY_FLAG_IDN_TW_EN, idx, NULL)) { + ERR_MSG("ERROR: disable tw error"); + goto out_put; + } + + if (ufsf_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_TW_EN, idx, NULL)) { + ERR_MSG("ERROR: enable tw error"); + goto out_put; + } + } else { + /* Other device recover WB by hibernate */ + schedule_delayed_work(&tw->tw_flush_h8_work, 0); + need_flush = true; + } + +out_put: + if (need_flush) { + INFO_MSG("UFS TW available buffer size(%d) < %d0%%", + tw->tw_available_buffer_size, tw->flush_th_max); + } + + ufstw_lu_put(tw); +out: + return need_flush; +} + +static inline void ufstw_init_dev_jobs(struct ufsf_feature *ufsf) +{ + INIT_INFO("INIT_WORK(tw_reset_work)"); + INIT_WORK(&ufsf->tw_reset_work, ufstw_reset_work_fn); +} + +static inline void ufstw_init_lu_jobs(struct ufstw_lu *tw) +{ + INIT_INFO("INIT_DELAYED_WORK(tw_flush_work) ufstw_lu%d", tw->lun); + INIT_DELAYED_WORK(&tw->tw_flush_work, ufstw_flush_work_fn); + INIT_DELAYED_WORK(&tw->tw_flush_h8_work, ufstw_flush_h8_work_fn); + INIT_INFO("INIT_WORK(tw_lifetime_work)"); + INIT_WORK(&tw->tw_lifetime_work, ufstw_lifetime_work_fn); +} + +static inline void ufstw_cancel_lu_jobs(struct ufstw_lu *tw) +{ + int ret; + + ret = cancel_delayed_work_sync(&tw->tw_flush_work); + ret = cancel_work_sync(&tw->tw_lifetime_work); +} + +void ufstw_get_dev_info(struct ufstw_dev_info *tw_dev_info, u8 *desc_buf) +{ + struct ufsf_feature *ufsf; + struct ufs_hba *hba; + + ufsf = container_of(tw_dev_info, struct ufsf_feature, tw_dev_info); + hba = ufsf->hba; + + tw_dev_info->tw_device = false; + + if (UFSF_EFS_TURBO_WRITE + & LI_EN_32(&desc_buf[DEVICE_DESC_PARAM_EX_FEAT_SUP])) + INIT_INFO("bUFSExFeaturesSupport: TW support"); + else { + INIT_INFO("bUFSExFeaturesSupport: TW not support"); + return; + } + tw_dev_info->tw_buf_no_reduct = + desc_buf[DEVICE_DESC_PARAM_TW_RETURN_TO_USER]; + tw_dev_info->tw_buf_type = desc_buf[DEVICE_DESC_PARAM_TW_BUF_TYPE]; + + /* Set TW device if TW support */ + tw_dev_info->tw_device = true; + + INFO_MSG("tw_dev [53] bTurboWriteBufferNoUserSpaceReductionEn %u", + tw_dev_info->tw_buf_no_reduct); + INFO_MSG("tw_dev [54] bTurboWriteBufferType %u", + tw_dev_info->tw_buf_type); +} + +void ufstw_get_geo_info(struct ufstw_dev_info *tw_dev_info, u8 *geo_buf) +{ + tw_dev_info->tw_number_lu = geo_buf[GEOMETRY_DESC_TW_NUMBER_LU]; + if (tw_dev_info->tw_number_lu == 0) { + ERR_MSG("Turbo Write is not supported"); + tw_dev_info->tw_device = false; + return; + } + + INFO_MSG("tw_geo [4F:52] dTurboWriteBufferMaxNAllocUnits %u", + LI_EN_32(&geo_buf[GEOMETRY_DESC_TW_MAX_SIZE])); + INFO_MSG("tw_geo [53] bDeviceMaxTurboWriteLUs %u", + tw_dev_info->tw_number_lu); + INFO_MSG("tw_geo [54] bTurboWriteBufferCapAdjFac %u", + geo_buf[GEOMETRY_DESC_TW_CAP_ADJ_FAC]); + INFO_MSG("tw_geo [55] bSupportedTurboWriteBufferUserSpaceReductionTypes %u", + geo_buf[GEOMETRY_DESC_TW_SUPPORT_USER_REDUCTION_TYPES]); + INFO_MSG("tw_geo [56] bSupportedTurboWriteBufferTypes %u", + geo_buf[GEOMETRY_DESC_TW_SUPPORT_BUF_TYPE]); +} + +int ufstw_get_lu_info(struct ufsf_feature *ufsf, unsigned int lun, u8 *lu_buf) +{ + struct ufsf_lu_desc lu_desc; + struct ufstw_lu *tw; + + lu_desc.tw_lu_buf_size = + LI_EN_32(&lu_buf[UNIT_DESC_TW_LU_MAX_BUF_SIZE]); + + ufsf->tw_lup[lun] = NULL; + + /* MTK: tw_buf_type = 0(LU base), 1(Single share) */ + if ((lu_desc.tw_lu_buf_size) || + ((ufsf->tw_dev_info.tw_buf_type == WB_SINGLE_SHARE_BUFFER_TYPE) + && (lun == 2))) { + ufsf->tw_lup[lun] = + kzalloc(sizeof(struct ufstw_lu), GFP_KERNEL); + if (!ufsf->tw_lup[lun]) + return -ENOMEM; + + tw = ufsf->tw_lup[lun]; + tw->ufsf = ufsf; + tw->lun = lun; + INIT_INFO("tw_lu LUN(%d) [29:2C] dLUNumTurboWriteBufferAllocUnits %u", + lun, lu_desc.tw_lu_buf_size); + } else { + INIT_INFO("tw_lu LUN(%d) [29:2C] dLUNumTurboWriteBufferAllocUnits %u", + lun, lu_desc.tw_lu_buf_size); + INIT_INFO("===== LUN(%d) is TurboWrite-disabled.", lun); + return -ENODEV; + } + + return 0; +} + +static inline void ufstw_print_lu_flag_attr(struct ufstw_lu *tw) +{ + INFO_MSG("tw_flag LUN(%d) [%u] fTurboWriteEn %u", tw->lun, + QUERY_FLAG_IDN_TW_EN, tw->tw_enable); + INFO_MSG("tw_flag LUN(%d) [%u] fTurboWriteBufferFlushEn %u", tw->lun, + QUERY_FLAG_IDN_TW_BUF_FLUSH_EN, tw->tw_flush_enable); + INFO_MSG("tw_flag LUN(%d) [%u] fTurboWriteBufferFlushDuringHibernateEnter %u", + tw->lun, QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN, + tw->tw_flush_during_hibern_enter); + + INFO_MSG("tw_attr LUN(%d) [%u] flush_status %u", tw->lun, + QUERY_ATTR_IDN_TW_FLUSH_STATUS, tw->tw_flush_status); + INFO_MSG("tw_attr LUN(%d) [%u] buffer_size %u", tw->lun, + QUERY_ATTR_IDN_TW_BUF_SIZE, tw->tw_available_buffer_size); + INFO_MSG("tw_attr LUN(%d) [%d] bufffer_lifetime %u(0x%X)", + tw->lun, QUERY_ATTR_IDN_TW_BUF_LIFETIME_EST, + tw->tw_lifetime_est, tw->tw_lifetime_est); +} + +static inline void ufstw_lu_update(struct ufstw_lu *tw) +{ + ufstw_lu_get(tw); + + /* Flag */ + if (ufstw_read_lu_flag(tw, QUERY_FLAG_IDN_TW_EN, &tw->tw_enable)) + goto error_put; + + if (ufstw_read_lu_flag(tw, QUERY_FLAG_IDN_TW_BUF_FLUSH_EN, + &tw->tw_flush_enable)) + goto error_put; + + if (ufstw_read_lu_flag(tw, QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN, + &tw->tw_flush_during_hibern_enter)) + goto error_put; + + /* Attribute */ + if (ufstw_read_lu_attr(tw, QUERY_ATTR_IDN_TW_FLUSH_STATUS, + &tw->tw_flush_status)) + goto error_put; + + if (ufstw_read_lu_attr(tw, QUERY_ATTR_IDN_TW_BUF_SIZE, + &tw->tw_available_buffer_size)) + goto error_put; + + ufstw_read_lu_attr(tw, QUERY_ATTR_IDN_TW_BUF_LIFETIME_EST, + &tw->tw_lifetime_est); +error_put: + ufstw_lu_put(tw); +} + +static void ufstw_lu_init(struct ufsf_feature *ufsf, unsigned int lun) +{ + struct ufstw_lu *tw = ufsf->tw_lup[lun]; + + ufstw_lu_get(tw); + tw->ufsf = ufsf; + + mutex_init(&tw->flush_lock); + mutex_init(&tw->mode_lock); + spin_lock_init(&tw->lifetime_lock); + + tw->stat_write_sec = 0; + atomic_set(&tw->active_cnt, 0); + + tw->flush_th_min = UFSTW_FLUSH_WORKER_TH_MIN; + tw->flush_th_max = UFSTW_FLUSH_WORKER_TH_MAX; + + /* for Debug */ + ufstw_init_lu_jobs(tw); + + if (ufstw_create_sysfs(ufsf, tw)) + INIT_INFO("sysfs init fail. but tw could run normally."); + + /* Read Flag, Attribute */ + ufstw_lu_update(tw); + +#if defined(CONFIG_UFSTW_IGNORE_GUARANTEE_BIT) + if (tw->tw_lifetime_est & MASK_UFSTW_LIFETIME_NOT_GUARANTEE) { + WARNING_MSG("warn: lun %d - dTurboWriteBufferLifeTimeEst[31] == 1", lun); + WARNING_MSG("Device not guarantee the lifetime of Turbo Write Buffer"); + WARNING_MSG("but we will ignore them for PoC"); + } +#else + if (tw->tw_lifetime_est & MASK_UFSTW_LIFETIME_NOT_GUARANTEE) { + WARNING_MSG("warn: lun %d - dTurboWriteBufferLifeTimeEst[31] == 1", lun); + WARNING_MSG("Device not guarantee the lifetime of Turbo Write Buffer"); + WARNING_MSG("So tw_mode change to disable_mode"); + goto tw_disable; + } +#endif + if ((tw->tw_lifetime_est & ~MASK_UFSTW_LIFETIME_NOT_GUARANTEE) + < UFSTW_MAX_LIFETIME_VALUE) { + atomic_set(&tw->tw_mode, TW_MODE_MANUAL); + goto out; + } else + goto tw_disable; + +tw_disable: + ufstw_switch_disable_mode(tw); +out: + ufstw_print_lu_flag_attr(tw); + ufstw_lu_put(tw); +} + +void ufstw_init(struct ufsf_feature *ufsf) +{ + unsigned int lun; + unsigned int tw_enabled_lun = 0; +#ifdef UFS_MTK_TW_AWAYS_ON + int tw_lun = 0; + struct ufstw_lu *tw; +#endif + + kref_init(&ufsf->tw_kref); + +#ifdef UFS_MTK_TW_AWAYS_ON + /* MTK: TW only enable in LU2, skip scan */ + lun = 2; + if (ufsf->tw_lup[lun]) { + ufstw_lu_init(ufsf, lun); + tw_lun = lun; + INIT_INFO("UFSTW LU %d working", lun); + tw_enabled_lun++; + } +#else + seq_scan_lu(lun) { + if (!ufsf->tw_lup[lun]) + continue; + + if (!ufsf->sdev_ufs_lu[lun]) { + WARNING_MSG("warn: lun %d don't have scsi_device", lun); + continue; + } + + ufstw_lu_init(ufsf, lun); + + ufsf->sdev_ufs_lu[lun]->request_queue->turbo_write_dev = true; + + INIT_INFO("UFSTW LU %d working", lun); + tw_enabled_lun++; + } +#endif + + if (tw_enabled_lun == 0) { + ERR_MSG("ERROR: tw_enabled_lun == 0. So TW disabled."); + goto out_free_mem; + } + + if (tw_enabled_lun > ufsf->tw_dev_info.tw_number_lu) { + ERR_MSG("ERROR: dev_info(bDeviceMaxTurboWriteLUs) mismatch. So TW disabled."); + goto out; + } + +#ifdef UFS_MTK_TW_AWAYS_ON + /* MTK: Disable TW if run out lifetime */ + tw = ufsf->tw_lup[tw_lun]; + if (atomic_read(&tw->tw_mode) == TW_MODE_DISABLED) + goto out; +#endif + /* + * Initialize Device Level... + */ + ufstw_disable_ee(ufsf); + ufstw_init_dev_jobs(ufsf); + atomic64_set(&ufsf->tw_debug_ee_count, 0); + ufsf->tw_debug = false; + atomic_set(&ufsf->tw_state, TW_PRESENT); + +#ifdef UFS_MTK_TW_AWAYS_ON + /* MTK: Enable TW and H8 flush in Manual mode */ + if ((atomic_read(&tw->tw_mode) == TW_MODE_MANUAL) && (tw_lun != 0)) { + if (ufstw_set_lu_flag(tw, QUERY_FLAG_IDN_TW_EN, &tw->tw_enable)) + goto out; + if (ufstw_set_lu_flag(tw, QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN, + &tw->tw_flush_during_hibern_enter)) + goto out; + } +#endif + return; +out_free_mem: + seq_scan_lu(lun) { + kfree(ufsf->tw_lup[lun]); + ufsf->tw_lup[lun] = NULL; + } +out: + /* MTK: not free because we still need querry */ + ufsf->tw_dev_info.tw_device = false; + atomic_set(&ufsf->tw_state, TW_NOT_SUPPORTED); +} + +static inline int ufstw_probe_lun_done(struct ufsf_feature *ufsf) +{ + return (ufsf->num_lu == ufsf->slave_conf_cnt); +} + +void ufstw_init_work_fn(struct work_struct *work) +{ + struct ufsf_feature *ufsf; +#ifndef UFS_MTK_TW_AWAYS_ON + int ret; +#endif + + ufsf = container_of(work, struct ufsf_feature, tw_init_work); + +#ifndef UFS_MTK_TW_AWAYS_ON + /* MTK: TW only enable in LU2, skip wait */ + init_waitqueue_head(&ufsf->tw_wait); + + ret = wait_event_timeout(ufsf->tw_wait, + ufstw_probe_lun_done(ufsf), + msecs_to_jiffies(10000)); + if (ret == 0) { + ERR_MSG("Probing LU is not fully completed."); + return; + } +#endif + + INIT_INFO("TW_INIT_START"); + + ufstw_init(ufsf); +} + +void ufstw_suspend(struct ufsf_feature *ufsf) +{ + struct ufstw_lu *tw; + int lun; +#if 0 + int ret; + +/* + * MTK: No ned flush work, else deadlock may happen. + * flush_work ->ufstw_reset_work_fn -> ufstw_reset -> ufstw_set_lu_flag -> + * pm_runtime_put_sync -> ufstw_suspend -> flush_work + * Beside, reset work only set tw flag, it can do later after suspend. + */ + ret = flush_work(&ufsf->tw_reset_work); + TW_DEBUG(ufsf, "flush_work(tw_reset_work) = %d", ret); +#endif + + seq_scan_lu(lun) { + tw = ufsf->tw_lup[lun]; + if (!tw) + continue; + + ufstw_lu_get(tw); + ufstw_cancel_lu_jobs(tw); + ufstw_lu_put(tw); + } +} + +void ufstw_resume(struct ufsf_feature *ufsf) +{ + struct ufstw_lu *tw; + int lun; + + seq_scan_lu(lun) { + tw = ufsf->tw_lup[lun]; + if (!tw) + continue; + + ufstw_lu_get(tw); + TW_DEBUG(ufsf, "ufstw_lu %d resume", lun); + if (tw->next_q) { + TW_DEBUG(ufsf, + "ufstw_lu %d flush_worker reschedule...", lun); + if (schedule_delayed_work(&tw->tw_flush_work, + (tw->next_q - jiffies))) + pm_runtime_get_noresume(ufsf->hba->dev); + } + ufstw_lu_put(tw); + } +} + +void ufstw_release(struct kref *kref) +{ + struct ufsf_feature *ufsf; + struct ufstw_lu *tw; + int lun; + int ret; + + dump_stack(); + ufsf = container_of(kref, struct ufsf_feature, tw_kref); + RELEASE_INFO("start release"); + + RELEASE_INFO("tw_state : %d -> %d", atomic_read(&ufsf->tw_state), + TW_FAILED); + atomic_set(&ufsf->tw_state, TW_FAILED); + + RELEASE_INFO("kref count %d", + atomic_read(&ufsf->tw_kref.refcount.refs)); + + ret = cancel_work_sync(&ufsf->tw_reset_work); + RELEASE_INFO("cancel_work_sync(tw_reset_work) = %d", ret); + + seq_scan_lu(lun) { + tw = ufsf->tw_lup[lun]; + + RELEASE_INFO("ufstw_lu%d %p", lun, tw); + + ufsf->tw_lup[lun] = NULL; + + if (!tw) + continue; + + ufstw_cancel_lu_jobs(tw); + tw->next_q = 0; + + ret = kobject_uevent(&tw->kobj, KOBJ_REMOVE); + RELEASE_INFO("kobject error %d", ret); + + kobject_del(&tw->kobj); + + kfree(tw); + } + + RELEASE_INFO("end release"); +} + +static void ufstw_reset(struct ufsf_feature *ufsf) +{ + struct ufstw_lu *tw; + int lun; + int ret; + + if (atomic_read(&ufsf->tw_state) == TW_FAILED) { + ERR_MSG("tw_state == TW_FAILED(%d)", + atomic_read(&ufsf->tw_state)); + return; + } + + seq_scan_lu(lun) { + tw = ufsf->tw_lup[lun]; + TW_DEBUG(ufsf, "reset tw[%d]=%p", lun, tw); + if (!tw) + continue; + + INFO_MSG("ufstw_lu%d reset", lun); + + ufstw_lu_get(tw); + ufstw_cancel_lu_jobs(tw); + + if (atomic_read(&tw->tw_mode) == TW_MODE_MANUAL && + tw->tw_enable) { + ret = ufstw_set_lu_flag(tw, QUERY_FLAG_IDN_TW_EN, + &tw->tw_enable); + if (ret) + tw->tw_enable = false; + } + + if (tw->tw_flush_enable) { + ret = ufstw_set_lu_flag(tw, + QUERY_FLAG_IDN_TW_BUF_FLUSH_EN, + &tw->tw_flush_enable); + if (ret) + tw->tw_flush_enable = false; + } + + if (tw->tw_flush_during_hibern_enter) { + ret = ufstw_set_lu_flag(tw, + QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN, + &tw->tw_flush_during_hibern_enter); + if (ret) + tw->tw_flush_during_hibern_enter = false; + } + + if (tw->next_q) { + TW_DEBUG(ufsf, + "ufstw_lu %d flush_worker reschedule...", lun); + if (schedule_delayed_work(&tw->tw_flush_work, + (tw->next_q - jiffies))) + pm_runtime_get_noresume(ufsf->hba->dev); + } + ufstw_lu_put(tw); + } + + if (ufsf->tw_ee_mode) + ufstw_auto_ee(ufsf); + + atomic_set(&ufsf->tw_state, TW_PRESENT); + INFO_MSG("reset complete.. tw_state %d", atomic_read(&ufsf->tw_state)); +} + +static inline int ufstw_wait_kref_init_value(struct ufsf_feature *ufsf) +{ + return (atomic_read(&ufsf->tw_kref.refcount.refs) == 1); +} + +void ufstw_reset_work_fn(struct work_struct *work) +{ + struct ufsf_feature *ufsf; + int ret; + + ufsf = container_of(work, struct ufsf_feature, tw_reset_work); + + /* + * If down eh_sem and runtime resume fail, it will block eh_work and + * cause deadlock. + * 1. eh_work wait eh_sem + * 2. tw_reset_work wait runtime resume + * 3. rumtime resume wait eh_work do link recovery + * Here make sure runtime resume success. + */ + pm_runtime_get_sync(ufsf->hba->dev); + + down(&ufsf->hba->eh_sem); + TW_DEBUG(ufsf, "reset tw_kref.refcount=%d", + atomic_read(&ufsf->tw_kref.refcount.refs)); + + init_waitqueue_head(&ufsf->tw_wait); + + ret = wait_event_timeout(ufsf->tw_wait, + ufstw_wait_kref_init_value(ufsf), + msecs_to_jiffies(15000)); + if (ret == 0) { + ERR_MSG("UFSTW kref is not init_value(=1). kref count = %d ret = %d. So, TW_RESET_FAIL", + atomic_read(&ufsf->tw_kref.refcount.refs), ret); + up(&ufsf->hba->eh_sem); + + pm_runtime_put_sync(ufsf->hba->dev); + return; + } + + INIT_INFO("TW_RESET_START"); + + ufstw_reset(ufsf); + up(&ufsf->hba->eh_sem); + + pm_runtime_put_sync(ufsf->hba->dev); +} + +/* protected by mutex mode_lock */ +static void __active_turbo_write(struct ufstw_lu *tw, int do_work) +{ + if (atomic_read(&tw->tw_mode) != TW_MODE_FS) + return; + + blk_add_trace_msg(tw->ufsf->sdev_ufs_lu[tw->lun]->request_queue, + "%s:%d do_work %d active_cnt %d", + __func__, __LINE__, do_work, + atomic_read(&tw->active_cnt)); + + if (do_work == TW_FLAG_ENABLE_SET && !tw->tw_enable) + ufstw_set_lu_flag(tw, QUERY_FLAG_IDN_TW_EN, &tw->tw_enable); + else if (do_work == TW_FLAG_ENABLE_CLEAR && tw->tw_enable) + ufstw_clear_lu_flag(tw, QUERY_FLAG_IDN_TW_EN, &tw->tw_enable); +} + +static void ufstw_active_turbo_write(struct request_queue *q, bool on) +{ + struct scsi_device *sdev = q->queuedata; + struct Scsi_Host *shost; + struct ufs_hba *hba; + struct ufstw_lu *tw; + int do_work = TW_FLAG_ENABLE_NONE; + u64 lun; + + lun = sdev->lun; + if (lun >= UFS_UPIU_MAX_GENERAL_LUN) + return; + + shost = sdev->host; + hba = shost_priv(shost); + tw = hba->ufsf.tw_lup[lun]; + if (!tw) + return; + + ufstw_lu_get(tw); + if (on) { + if (atomic_inc_return(&tw->active_cnt) == 1) + do_work = TW_FLAG_ENABLE_SET; + } else { + if (atomic_dec_return(&tw->active_cnt) == 0) + do_work = TW_FLAG_ENABLE_CLEAR; + } + + blk_add_trace_msg(q, "%s:%d on %d active cnt %d do_work %d state %d mode %d", + __func__, __LINE__, on, atomic_read(&tw->active_cnt), + do_work, atomic_read(&tw->ufsf->tw_state), + atomic_read(&tw->tw_mode)); + + if (!do_work) + goto out; + + if (atomic_read(&tw->ufsf->tw_state) != TW_PRESENT) { + WARNING_MSG("tw_state %d.. cannot enable turbo_write..", + atomic_read(&tw->ufsf->tw_state)); + goto out; + } + + if (atomic_read(&tw->tw_mode) != TW_MODE_FS) + goto out; + + mutex_lock(&tw->mode_lock); + __active_turbo_write(tw, do_work); + mutex_unlock(&tw->mode_lock); +out: + ufstw_lu_put(tw); +} + +void bdev_set_turbo_write(struct block_device *bdev) +{ + struct request_queue *q = bdev->bd_queue; + + blk_add_trace_msg(q, "%s:%d turbo_write_dev %d\n", + __func__, __LINE__, q->turbo_write_dev); + + if (q->turbo_write_dev) + ufstw_active_turbo_write(bdev->bd_queue, true); +} + +void bdev_clear_turbo_write(struct block_device *bdev) +{ + struct request_queue *q = bdev->bd_queue; + + blk_add_trace_msg(q, "%s:%d turbo_write_dev %d\n", + __func__, __LINE__, q->turbo_write_dev); + + if (q->turbo_write_dev) + ufstw_active_turbo_write(bdev->bd_queue, false); +} + +/* sysfs function */ +static ssize_t ufstw_sysfs_show_ee_mode(struct ufstw_lu *tw, char *buf) +{ + SYSFS_INFO("TW_ee_mode %d", tw->ufsf->tw_ee_mode); + + return snprintf(buf, PAGE_SIZE, "%d", tw->ufsf->tw_ee_mode); +} + +static ssize_t ufstw_sysfs_store_ee_mode(struct ufstw_lu *tw, + const char *buf, size_t count) +{ + unsigned long val = 0; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (atomic_read(&tw->ufsf->tw_state) != TW_PRESENT) { + SYSFS_INFO("ee_mode cannot change, because current state is not TW_PRESENT (%d)..", + atomic_read(&tw->ufsf->tw_state)); + return -EINVAL; + } + + if (val >= TW_EE_MODE_NUM) { + SYSFS_INFO("wrong input.. your input %lu", val); + return -EINVAL; + } + + if (val) + ufstw_auto_ee(tw->ufsf); + else + ufstw_disable_ee(tw->ufsf); + + SYSFS_INFO("TW_ee_mode %d", tw->ufsf->tw_ee_mode); + + return count; +} + +static ssize_t ufstw_sysfs_show_flush_during_hibern_enter(struct ufstw_lu *tw, + char *buf) +{ + int ret; + + mutex_lock(&tw->flush_lock); + if (ufstw_read_lu_flag(tw, QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN, + &tw->tw_flush_during_hibern_enter)) { + mutex_unlock(&tw->flush_lock); + return -EINVAL; + } + + SYSFS_INFO("TW_flush_during_hibern_enter %d", + tw->tw_flush_during_hibern_enter); + ret = snprintf(buf, PAGE_SIZE, "%d", tw->tw_flush_during_hibern_enter); + + mutex_unlock(&tw->flush_lock); + return ret; +} + +static ssize_t ufstw_sysfs_store_flush_during_hibern_enter(struct ufstw_lu *tw, + const char *buf, + size_t count) +{ + unsigned long val = 0; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (atomic_read(&tw->ufsf->tw_state) != TW_PRESENT) { + SYSFS_INFO("tw_mode cannot change, because current state is not TW_PRESENT (%d)..", + atomic_read(&tw->ufsf->tw_state)); + return -EINVAL; + } + + mutex_lock(&tw->flush_lock); + if (tw->ufsf->tw_ee_mode == TW_EE_MODE_AUTO) { + SYSFS_INFO("flush_during_hibern_enable cannot change on auto ee_mode"); + mutex_unlock(&tw->flush_lock); + return -EINVAL; + } + + if (val) { + if (ufstw_set_lu_flag(tw, + QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN, + &tw->tw_flush_during_hibern_enter)) { + mutex_unlock(&tw->flush_lock); + return -EINVAL; + } + } else { + if (ufstw_clear_lu_flag(tw, + QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN, + &tw->tw_flush_during_hibern_enter)) { + mutex_unlock(&tw->flush_lock); + return -EINVAL; + } + } + + SYSFS_INFO("TW_flush_during_hibern_enter %d", + tw->tw_flush_during_hibern_enter); + mutex_unlock(&tw->flush_lock); + + return count; +} + +static ssize_t ufstw_sysfs_show_flush_enable(struct ufstw_lu *tw, char *buf) +{ + int ret; + + if (ufstw_read_lu_flag(tw, QUERY_FLAG_IDN_TW_BUF_FLUSH_EN, + &tw->tw_flush_enable)) + return -EINVAL; + + SYSFS_INFO("TW_flush_enable %d", tw->tw_flush_enable); + + ret = snprintf(buf, PAGE_SIZE, "%d", tw->tw_flush_enable); + + return ret; +} + +static ssize_t ufstw_sysfs_store_flush_enable(struct ufstw_lu *tw, + const char *buf, size_t count) +{ + unsigned long val = 0; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (atomic_read(&tw->ufsf->tw_state) != TW_PRESENT) { + SYSFS_INFO("tw_mode cannot change, because current tw-state is not TW_PRESENT..(state:%d)..", + atomic_read(&tw->ufsf->tw_state)); + return -EINVAL; + } + + if (tw->ufsf->tw_ee_mode == TW_EE_MODE_AUTO) { + SYSFS_INFO("flush_enable cannot change on auto ee_mode"); + return -EINVAL; + } + + if (val) { + if (ufstw_set_lu_flag(tw, QUERY_FLAG_IDN_TW_BUF_FLUSH_EN, + &tw->tw_flush_enable)) + return -EINVAL; + } else { + if (ufstw_clear_lu_flag(tw, QUERY_FLAG_IDN_TW_BUF_FLUSH_EN, + &tw->tw_flush_enable)) + return -EINVAL; + } + + SYSFS_INFO("TW_flush_enable %d", tw->tw_flush_enable); + + return count; +} + +static ssize_t ufstw_sysfs_show_debug(struct ufstw_lu *tw, char *buf) +{ + SYSFS_INFO("debug %d", tw->ufsf->tw_debug); + + return snprintf(buf, PAGE_SIZE, "%d", tw->ufsf->tw_debug); +} + +static ssize_t ufstw_sysfs_store_debug(struct ufstw_lu *tw, const char *buf, + size_t count) +{ + unsigned long val = 0; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val) + tw->ufsf->tw_debug = true; + else + tw->ufsf->tw_debug = false; + + SYSFS_INFO("debug %d", tw->ufsf->tw_debug); + + return count; +} + +static ssize_t ufstw_sysfs_show_flush_th_min(struct ufstw_lu *tw, char *buf) +{ + SYSFS_INFO("flush_th_min%d", tw->flush_th_min); + + return snprintf(buf, PAGE_SIZE, "%d", tw->flush_th_min); +} + +static ssize_t ufstw_sysfs_store_flush_th_min(struct ufstw_lu *tw, + const char *buf, size_t count) +{ + unsigned long val = 0; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 0 || val > 10) { + SYSFS_INFO("input value is wrong.. your input %lu", val); + return -EINVAL; + } + + if (tw->flush_th_max <= val) { + SYSFS_INFO("input value could not be greater than flush_th_max.."); + SYSFS_INFO("your input %lu, flush_th_max %u", + val, tw->flush_th_max); + return -EINVAL; + } + + tw->flush_th_min = val; + SYSFS_INFO("flush_th_min %u", tw->flush_th_min); + + return count; +} + +static ssize_t ufstw_sysfs_show_flush_th_max(struct ufstw_lu *tw, char *buf) +{ + SYSFS_INFO("flush_th_max %d", tw->flush_th_max); + + return snprintf(buf, PAGE_SIZE, "%d", tw->flush_th_max); +} + +static ssize_t ufstw_sysfs_store_flush_th_max(struct ufstw_lu *tw, + const char *buf, size_t count) +{ + unsigned long val = 0; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 0 || val > 10) { + SYSFS_INFO("input value is wrong.. your input %lu", val); + return -EINVAL; + } + + if (tw->flush_th_min >= val) { + SYSFS_INFO("input value could not be less than flush_th_min.."); + SYSFS_INFO("your input %lu, flush_th_min %u", + val, tw->flush_th_min); + return -EINVAL; + } + + tw->flush_th_max = val; + SYSFS_INFO("flush_th_max %u", tw->flush_th_max); + + return count; +} + +static ssize_t ufstw_sysfs_show_version(struct ufstw_lu *tw, char *buf) +{ + SYSFS_INFO("TW version %.4X D/D version %.4X", + tw->ufsf->tw_dev_info.tw_ver, UFSTW_DD_VER); + + return snprintf(buf, PAGE_SIZE, "TW version %.4X DD version %.4X", + tw->ufsf->tw_dev_info.tw_ver, UFSTW_DD_VER); +} + +static ssize_t ufstw_sysfs_show_debug_active_cnt(struct ufstw_lu *tw, char *buf) +{ + SYSFS_INFO("debug active cnt %d", + atomic_read(&tw->active_cnt)); + + return snprintf(buf, PAGE_SIZE, "active_cnt %d", + atomic_read(&tw->active_cnt)); +} + +/* SYSFS DEFINE */ +#define define_sysfs_ro(_name) __ATTR(_name, 0444,\ + ufstw_sysfs_show_##_name, NULL), +#define define_sysfs_rw(_name) __ATTR(_name, 0644,\ + ufstw_sysfs_show_##_name, \ + ufstw_sysfs_store_##_name), + +#define define_sysfs_attr_r_function(_name, _IDN) \ +static ssize_t ufstw_sysfs_show_##_name(struct ufstw_lu *tw, char *buf) \ +{ \ + if (ufstw_read_lu_attr(tw, _IDN, &tw->tw_##_name))\ + return -EINVAL;\ + SYSFS_INFO("TW_"#_name" : %u (0x%X)", tw->tw_##_name, tw->tw_##_name); \ + return snprintf(buf, PAGE_SIZE, "%u", tw->tw_##_name); \ +} + +/* SYSFS FUNCTION */ +define_sysfs_attr_r_function(flush_status, QUERY_ATTR_IDN_TW_FLUSH_STATUS) +define_sysfs_attr_r_function(available_buffer_size, QUERY_ATTR_IDN_TW_BUF_SIZE) +define_sysfs_attr_r_function(current_tw_buffer_size, QUERY_ATTR_CUR_TW_BUF_SIZE) +define_sysfs_attr_r_function(lifetime_est, QUERY_ATTR_IDN_TW_BUF_LIFETIME_EST) + +static ssize_t ufstw_sysfs_show_tw_enable(struct ufstw_lu *tw, char *buf) +{ + if (ufstw_read_lu_flag(tw, QUERY_FLAG_IDN_TW_EN, &tw->tw_enable)) + return -EINVAL; + + SYSFS_INFO("TW_enable: %u (0x%X)", tw->tw_enable, tw->tw_enable); + return snprintf(buf, PAGE_SIZE, "%u", tw->tw_enable); +} + +static ssize_t ufstw_sysfs_store_tw_enable(struct ufstw_lu *tw, const char *buf, + size_t count) +{ + unsigned long val = 0; + ssize_t ret = count; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val > 2) { + SYSFS_INFO("wrong mode number.. your input %lu", val); + return -EINVAL; + } + + mutex_lock(&tw->mode_lock); + if (atomic_read(&tw->tw_mode) == TW_MODE_DISABLED) { + SYSFS_INFO("all turbo write life time is exhausted.."); + SYSFS_INFO("you could not change this value.."); + goto out; + } + + if (atomic_read(&tw->tw_mode) != TW_MODE_MANUAL) { + SYSFS_INFO("cannot set tw_enable.. current %s (%d) mode..", + atomic_read(&tw->tw_mode) == TW_MODE_FS ? + "TW_MODE_FS" : "UNKNOWN", + atomic_read(&tw->tw_mode)); + ret = -EINVAL; + goto out; + } + + if (val) { + if (ufstw_set_lu_flag(tw, QUERY_FLAG_IDN_TW_EN, + &tw->tw_enable)) { + ret = -EINVAL; + goto out; + } + } else { + if (ufstw_clear_lu_flag(tw, QUERY_FLAG_IDN_TW_EN, + &tw->tw_enable)) { + ret = -EINVAL; + goto out; + } + } +out: + mutex_unlock(&tw->mode_lock); + SYSFS_INFO("TW_enable : %u (0x%X)", tw->tw_enable, tw->tw_enable); + return ret; +} + +static ssize_t ufstw_sysfs_show_tw_mode(struct ufstw_lu *tw, char *buf) +{ + int tw_mode = atomic_read(&tw->tw_mode); + + SYSFS_INFO("TW_mode %s %d", + tw_mode == TW_MODE_MANUAL ? "manual" : + tw_mode == TW_MODE_FS ? "fs" : "unknown", tw_mode); + return snprintf(buf, PAGE_SIZE, "%d", tw_mode); +} + +static ssize_t ufstw_sysfs_store_tw_mode(struct ufstw_lu *tw, const char *buf, + size_t count) +{ + int tw_mode = 0; + + if (kstrtouint(buf, 0, &tw_mode)) + return -EINVAL; + + if (atomic_read(&tw->ufsf->tw_state) != TW_PRESENT) { + SYSFS_INFO("tw_mode cannot change, because current state is not TW_PRESENT (%d)..", + atomic_read(&tw->ufsf->tw_state)); + return -EINVAL; + } + + if (tw_mode >= TW_MODE_NUM || + tw_mode == TW_MODE_DISABLED) { + SYSFS_INFO("wrong mode number.. your input %d", tw_mode); + return -EINVAL; + } + + mutex_lock(&tw->mode_lock); + if (atomic_read(&tw->tw_mode) == TW_MODE_DISABLED) { + SYSFS_INFO("all turbo write life time is exhausted.."); + SYSFS_INFO("you could not change this value.."); + count = -EINVAL; + goto out; + } + + if (tw_mode == atomic_read(&tw->tw_mode)) + goto out; + + count = (ssize_t) ufstw_switch_mode(tw, tw_mode); +out: + mutex_unlock(&tw->mode_lock); + SYSFS_INFO("TW_mode: %d", atomic_read(&tw->tw_mode)); + return count; +} + +static struct ufstw_sysfs_entry ufstw_sysfs_entries[] = { + /* tw mode select */ + define_sysfs_rw(tw_mode) + + /* Flag */ + define_sysfs_rw(tw_enable) + define_sysfs_rw(flush_enable) + define_sysfs_rw(flush_during_hibern_enter) + + /* Attribute */ + define_sysfs_rw(ee_mode) + define_sysfs_ro(flush_status) + define_sysfs_ro(available_buffer_size) + define_sysfs_ro(current_tw_buffer_size) + define_sysfs_ro(lifetime_est) + + /* debug */ + define_sysfs_rw(debug) + define_sysfs_ro(debug_active_cnt) + + /* support */ + define_sysfs_rw(flush_th_max) + define_sysfs_rw(flush_th_min) + + /* device level */ + define_sysfs_ro(version) + __ATTR_NULL +}; + +static ssize_t ufstw_attr_show(struct kobject *kobj, struct attribute *attr, + char *page) +{ + struct ufstw_sysfs_entry *entry; + struct ufstw_lu *tw; + ssize_t error; + + entry = container_of(attr, struct ufstw_sysfs_entry, attr); + tw = container_of(kobj, struct ufstw_lu, kobj); + if (!entry->show) + return -EIO; + + ufstw_lu_get(tw); + mutex_lock(&tw->sysfs_lock); + error = entry->show(tw, page); + mutex_unlock(&tw->sysfs_lock); + ufstw_lu_put(tw); + return error; +} + +static ssize_t ufstw_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct ufstw_sysfs_entry *entry; + struct ufstw_lu *tw; + ssize_t error; + + entry = container_of(attr, struct ufstw_sysfs_entry, attr); + tw = container_of(kobj, struct ufstw_lu, kobj); + + if (!entry->store) + return -EIO; + + ufstw_lu_get(tw); + mutex_lock(&tw->sysfs_lock); + error = entry->store(tw, page, length); + mutex_unlock(&tw->sysfs_lock); + ufstw_lu_put(tw); + return error; +} + +static const struct sysfs_ops ufstw_sysfs_ops = { + .show = ufstw_attr_show, + .store = ufstw_attr_store, +}; + +static struct kobj_type ufstw_ktype = { + .sysfs_ops = &ufstw_sysfs_ops, + .release = NULL, +}; + +static int ufstw_create_sysfs(struct ufsf_feature *ufsf, struct ufstw_lu *tw) +{ + struct device *dev = ufsf->hba->dev; + struct ufstw_sysfs_entry *entry; + int err; + + ufstw_lu_get(tw); + tw->sysfs_entries = ufstw_sysfs_entries; + + kobject_init(&tw->kobj, &ufstw_ktype); + mutex_init(&tw->sysfs_lock); + + INIT_INFO("ufstw creates sysfs ufstw_lu(%d) %p dev->kobj %p", + tw->lun, &tw->kobj, &dev->kobj); + + err = kobject_add(&tw->kobj, kobject_get(&dev->kobj), + "ufstw_lu%d", tw->lun); + if (!err) { + for (entry = tw->sysfs_entries; entry->attr.name != NULL; + entry++) { + INIT_INFO("ufstw_lu%d sysfs attr creates: %s", + tw->lun, entry->attr.name); + if (sysfs_create_file(&tw->kobj, &entry->attr)) + break; + } + INIT_INFO("ufstw_lu%d sysfs adds uevent", tw->lun); + kobject_uevent(&tw->kobj, KOBJ_ADD); + } + ufstw_lu_put(tw); + return err; +} diff --git a/drivers/scsi/ufs/ufstw.h b/drivers/scsi/ufs/ufstw.h new file mode 100644 index 000000000000..3a6e852c414a --- /dev/null +++ b/drivers/scsi/ufs/ufstw.h @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd. + */ + +#ifndef _UFSTW_H_ +#define _UFSTW_H_ + +#include +#include +#include +#include +#include + +#include "../../../block/blk.h" + +#define UFSTW_VER 0x0101 +#define UFSTW_DD_VER 0x0103 + +#define UFSTW_FLUSH_CHECK_PERIOD_MS 1000 +#define UFSTW_FLUSH_WORKER_TH_MIN 3 +#define UFSTW_FLUSH_WORKER_TH_MAX 8 +#define UFSTW_LIFETIME_SECT 2097152 /* 1GB */ +#define UFSTW_MAX_LIFETIME_VALUE 0x0B +/* TW 1.0.1[31], TW 1.1.0[7] */ +#define MASK_UFSTW_LIFETIME_NOT_GUARANTEE 0x80000080 + +/* + * UFSTW DEBUG + */ +#define TW_DEBUG(ufsf, msg, args...) \ + do { if (ufsf->tw_debug) \ + printk(KERN_ERR "%s:%d " msg "\n", \ + __func__, __LINE__, ##args); \ + } while (0) + +enum { + FLUSH_IDLE = 0, + FLUSH_RUN, + FLUSH_COMPLETE, + FLUSH_FAIL, + FLUSH_NUM_OF_STATE, +}; + +enum UFSTW_STATE { + TW_NOT_SUPPORTED = -1, + TW_NEED_INIT = 0, + TW_PRESENT = 1, + TW_FAILED = -2, + TW_RESET = -3, +}; + +enum { + TW_MODE_DISABLED, + TW_MODE_MANUAL, + TW_MODE_FS, + TW_MODE_NUM +}; + +enum { + TW_EE_MODE_DISABLE, + TW_EE_MODE_AUTO, + TW_EE_MODE_NUM +}; + +enum { + TW_FLAG_ENABLE_NONE = 0, + TW_FLAG_ENABLE_CLEAR = 1, + TW_FLAG_ENABLE_SET = 2, +}; + +struct ufstw_dev_info { + bool tw_device; + + /* from Device Descriptor */ + u16 tw_ver; + u8 tw_buf_no_reduct; + u8 tw_buf_type; + + /* from Geometry Descriptor */ + u8 tw_number_lu; +}; + +struct ufstw_lu { + struct ufsf_feature *ufsf; + + int lun; + + /* Flags */ + bool tw_flush_enable; + bool tw_flush_during_hibern_enter; + struct mutex flush_lock; + + /* lifetiem estimated */ + unsigned int tw_lifetime_est; + spinlock_t lifetime_lock; + u32 stat_write_sec; + struct work_struct tw_lifetime_work; + + /* Attributes */ + unsigned int tw_flush_status; + unsigned int tw_available_buffer_size; + unsigned int tw_current_tw_buffer_size; + + /* mode manual/fs */ + atomic_t tw_mode; + bool tw_enable; + atomic_t active_cnt; + struct mutex mode_lock; + + /* Worker */ + struct delayed_work tw_flush_work; + struct delayed_work tw_flush_h8_work; + unsigned long next_q; + unsigned int flush_th_max; + unsigned int flush_th_min; + + /* for sysfs */ + struct kobject kobj; + struct mutex sysfs_lock; + struct ufstw_sysfs_entry *sysfs_entries; +}; + +struct ufstw_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct ufstw_lu *tw, char *buf); + ssize_t (*store)(struct ufstw_lu *tw, const char *buf, size_t count); +}; + +struct ufshcd_lrb; + +void ufstw_get_dev_info(struct ufstw_dev_info *tw_dev_info, u8 *desc_buf); +void ufstw_get_geo_info(struct ufstw_dev_info *tw_dev_info, u8 *geo_buf); +int ufstw_get_lu_info(struct ufsf_feature *ufsf, unsigned int lun, u8 *lu_buf); +void ufstw_init(struct ufsf_feature *ufsf); +void ufstw_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp); +void ufstw_init_work_fn(struct work_struct *work); +void ufstw_ee_handler(struct ufsf_feature *ufsf); +void ufstw_error_handler(struct ufsf_feature *ufsf); +void ufstw_reset_work_fn(struct work_struct *work); +void ufstw_suspend(struct ufsf_feature *ufsf); +void ufstw_resume(struct ufsf_feature *ufsf); +void ufstw_release(struct kref *kref); +bool ufstw_need_flush(struct ufsf_feature *ufsf); + + +#endif /* End of Header */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b587a3bf1966..7fac196c55f5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -676,6 +676,9 @@ struct request_queue { #define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; +#ifdef CONFIG_SCSI_UFS_TW + bool turbo_write_dev; +#endif }; #define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */ diff --git a/include/scsi/ufs/ufs-mtk-ioctl.h b/include/scsi/ufs/ufs-mtk-ioctl.h index e3f1176a256d..c0780d1ce11f 100644 --- a/include/scsi/ufs/ufs-mtk-ioctl.h +++ b/include/scsi/ufs/ufs-mtk-ioctl.h @@ -17,7 +17,7 @@ #define UFS_IOCTL_GET_FW_VER 0x5390 /* Query production revision level */ #define UFS_IOCTL_RPMB 0x5391 /* For RPMB access */ -#if defined(CONFIG_UFSFEATURE) +#if defined(CONFIG_SCSI_UFS_FEATURE) #define UFSFEATURE_QUERY_OPCODE 0x5500 #endif