[ALPS06428859] scsi: ufs: Add HPB&TW support

Add feature HPB&TW,kconfig

MTK-Commit-Id: 2d5afd5fdf170e4314e14c6702451c7b10eebb3d

Signed-off-by: Jonathan Hsu <jonathan.hsu@mediatek.com>
Signed-off-by: Qilin Tan <qilin.tan@mediatek.com>
CR-Id: ALPS06428859
Feature: UFS(Universal Flash Storage)
Change-Id: I01de24bbeb62fd036ce24522b700aec008906043
This commit is contained in:
Jonathan Hsu
2021-12-15 13:46:22 +08:00
committed by ivan.tseng
parent 350cef631d
commit b3f7ec0995
17 changed files with 10864 additions and 18 deletions

View File

@@ -145,6 +145,30 @@ config SCSI_UFS_BSG
Select this if you need a bsg device node for your UFS controller.
If unsure, say N.
config SCSI_UFS_FEATURE
bool "UFS feature activate"
depends on SCSI_UFSHCD
---help---
UFS feature activate such as hpb, tw and etc.
config SCSI_UFS_HPB
bool "UFSHPB"
depends on SCSI_UFSHCD && SCSI_UFS_FEATURE
---help---
UFS HPB Feature Enable
config SCSI_UFS_TW
bool "UFSTW"
depends on SCSI_UFSHCD && SCSI_UFS_FEATURE
---help---
UFS TW Feature Enable
config SCSI_SKHPB
bool "Activate HPB Host-aware Performance Booster"
depends on SCSI_UFSHCD
help
Activate or deactive SKHPB driver
config SCSI_UFS_CRYPTO
bool "UFS Crypto Engine Support"
depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION

View File

@@ -29,6 +29,10 @@ obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
ufshcd-core-objs := ufshcd.o ufs-sysfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
obj-$(CONFIG_SCSI_UFS_FEATURE) += ufsfeature.o
obj-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
obj-$(CONFIG_SCSI_UFS_TW) += ufstw.o
obj-$(CONFIG_SCSI_SKHPB) += ufshpb_skh.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o

View File

@@ -104,9 +104,10 @@ void ufsdbg_print_info(char **buff, unsigned long *size, struct seq_file *m)
/* Device info */
SPREAD_PRINTF(buff, size, m,
"Device vendor=0x%X, model=%s\n",
"Device vendor=0x%X, model=%s, ufs version=0x%X\n",
hba->dev_info.wmanufacturerid,
hba->dev_info.model);
hba->dev_info.model,
hba->dev_info.wspecversion);
/* Error history */
ufshcd_print_all_evt_hist(hba, m, buff, size);

View File

@@ -713,7 +713,7 @@ static ssize_t _pname##_show(struct device *dev, \
struct scsi_device *sdev = to_scsi_device(dev); \
struct ufs_hba *hba = shost_priv(sdev->host); \
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
if (!ufs_is_valid_unit_desc_lun(lun)) \
return -EINVAL; \
return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
lun, _duname##_DESC_PARAM##_puname, buf, _size); \

View File

@@ -40,6 +40,7 @@
#include <linux/types.h>
#include <uapi/scsi/scsi_bsg_ufs.h>
#define MAX_CDB_SIZE 16
#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
@@ -63,6 +64,7 @@
#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
#define UFS_UPIU_MAX_GENERAL_LUN 8
/* Well known logical unit id in LUN field of UPIU */
enum {
@@ -140,6 +142,14 @@ enum flag_idn {
QUERY_FLAG_IDN_BUSY_RTC = 0x09,
QUERY_FLAG_IDN_RESERVED3 = 0x0A,
QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
#if defined(CONFIG_SCSI_UFS_TW)
QUERY_FLAG_IDN_TW_EN = 0x0E,
QUERY_FLAG_IDN_TW_BUF_FLUSH_EN = 0x0F,
QUERY_FLAG_IDN_TW_FLUSH_DURING_HIBERN = 0x10,
#endif
#if defined(CONFIG_SCSI_SKHPB)
QUERY_FLAG_IDN_HPB_RESET = 0x11, /* JEDEC version */
#endif
};
/* Attribute idn for Query requests */
@@ -168,6 +178,15 @@ enum attr_idn {
QUERY_ATTR_IDN_PSA_STATE = 0x15,
QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
#if defined(CONFIG_SCSI_UFS_TW)
QUERY_ATTR_IDN_TW_FLUSH_STATUS = 0x1C,
QUERY_ATTR_IDN_TW_BUF_SIZE = 0x1D,
QUERY_ATTR_IDN_TW_BUF_LIFETIME_EST = 0x1E,
QUERY_ATTR_CUR_TW_BUF_SIZE = 0x1F,
#endif
#if defined(CONFIG_SCSI_UFS_FEATURE)
QUERY_ATTR_IDN_SUP_VENDOR_OPTIONS = 0xFF,
#endif
};
/* Descriptor idn for Query requests */
@@ -219,6 +238,14 @@ enum unit_desc_param {
UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
#if defined(CONFIG_SCSI_UFS_HPB) || defined(CONFIG_SCSI_SKHPB)
UNIT_DESC_HPB_LU_MAX_ACTIVE_REGIONS = 0x23,
UNIT_DESC_HPB_LU_PIN_REGION_START_OFFSET = 0x25,
UNIT_DESC_HPB_LU_NUM_PIN_REGIONS = 0x27,
#endif
#if defined(CONFIG_SCSI_UFS_TW)
UNIT_DESC_TW_LU_MAX_BUF_SIZE = 0x29,
#endif
};
/* Device descriptor parameters offsets in bytes*/
@@ -258,6 +285,18 @@ enum device_desc_param {
DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
DEVICE_DESC_PARAM_PSA_TMT = 0x29,
DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
#if defined(CONFIG_SCSI_UFS_HPB) || defined(CONFIG_SCSI_SKHPB)
DEVICE_DESC_PARAM_HPB_VER = 0x40,
DEVICE_DESC_PARAM_HPB_CONTROL = 0x42, /* JEDEC version */
#endif
#if defined(CONFIG_SCSI_UFS_FEATURE)
DEVICE_DESC_PARAM_EX_FEAT_SUP = 0x4F,
#endif
#if defined(CONFIG_SCSI_UFS_TW)
DEVICE_DESC_PARAM_TW_RETURN_TO_USER = 0x53,
DEVICE_DESC_PARAM_TW_BUF_TYPE = 0x54,
DEVICE_DESC_PARAM_NUM_SHARED_WB_BUF_AU = 0x55, /* JEDEC version */
#endif
};
/* Interconnect descriptor parameters offsets in bytes*/
@@ -302,6 +341,20 @@ enum geometry_desc_param {
GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
#if defined(CONFIG_SCSI_UFS_HPB) || defined(CONFIG_SCSI_SKHPB)
GEOMETRY_DESC_HPB_REGION_SIZE = 0x48,
GEOMETRY_DESC_HPB_NUMBER_LU = 0x49,
GEOMETRY_DESC_HPB_SUBREGION_SIZE = 0x4A,
GEOMETRY_DESC_HPB_DEVICE_MAX_ACTIVE_REGIONS = 0x4B,
#endif
#if defined(CONFIG_SCSI_UFS_TW)
GEOMETRY_DESC_TW_MAX_SIZE = 0x4F,
GEOMETRY_DESC_TW_NUMBER_LU = 0x53,
GEOMETRY_DESC_TW_CAP_ADJ_FAC = 0x54,
GEOMETRY_DESC_TW_SUPPORT_USER_REDUCTION_TYPES = 0x55,
GEOMETRY_DESC_TW_SUPPORT_BUF_TYPE = 0x56,
GEOMETRY_DESC_TW_GROUP_NUM_CAP = 0x57,
#endif
};
/* Health descriptor parameters offsets in bytes*/
@@ -354,8 +407,20 @@ enum power_desc_param_offset {
enum {
MASK_EE_STATUS = 0xFFFF,
MASK_EE_URGENT_BKOPS = (1 << 2),
#if defined(CONFIG_SCSI_UFS_TW)
MASK_EE_TW = (1 << 5),
#endif
};
#if defined(CONFIG_SCSI_UFS_TW)
/* TW buffer type */
enum {
WB_LU_DEDICATED_BUFFER_TYPE = 0x0,
WB_SINGLE_SHARE_BUFFER_TYPE = 0x1
};
#endif
/* Background operation status */
enum bkops_status {
BKOPS_STATUS_NO_OP = 0x0,
@@ -429,6 +494,9 @@ enum {
MASK_RSP_EXCEPTION_EVENT = 0x10000,
MASK_TM_SERVICE_RESP = 0xFF,
MASK_TM_FUNC = 0xFF,
#if defined(CONFIG_SCSI_UFS_HPB) || defined(CONFIG_SCSI_SKHPB)
MASK_RSP_UPIU_HPB_UPDATE_ALERT = 0x20000, /* JEDEC version */
#endif
};
/* Task management service response */
@@ -575,15 +643,9 @@ struct ufs_dev_info {
* @lun: LU number to check
* @return: true if the lun has a matching unit descriptor, false otherwise
*/
static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
u8 lun)
static inline bool ufs_is_valid_unit_desc_lun(u8 lun)
{
if (!dev_info || !dev_info->max_lu_supported) {
pr_err("Max General LU supported by UFS isn't initialized\n");
return false;
}
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
return (lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN));
}
#endif /* End of Header */

View File

@@ -0,0 +1,700 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018 Samsung Electronics Co., Ltd.
*/
#include "ufsfeature.h"
#include "ufshcd.h"
#include "ufs_quirks.h"
#if defined(CONFIG_SCSI_UFS_HPB)
#include "ufshpb.h"
#endif
#define QUERY_REQ_TIMEOUT 1500 /* msec */
static inline void ufsf_init_query(struct ufs_hba *hba,
struct ufs_query_req **request,
struct ufs_query_res **response,
enum query_opcode opcode, u8 idn,
u8 index, u8 selector)
{
*request = &hba->dev_cmd.query.request;
*response = &hba->dev_cmd.query.response;
memset(*request, 0, sizeof(struct ufs_query_req));
memset(*response, 0, sizeof(struct ufs_query_res));
(*request)->upiu_req.opcode = opcode;
(*request)->upiu_req.idn = idn;
(*request)->upiu_req.index = index;
(*request)->upiu_req.selector = selector;
}
/*
* ufs feature common functions.
*/
int ufsf_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
u8 selector;
int err;
BUG_ON(!hba);
ufshcd_hold(hba, false);
mutex_lock(&hba->dev_cmd.lock);
if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SAMSUNG ||
hba->dev_info.wmanufacturerid == UFS_VENDOR_MICRON)
selector = UFSFEATURE_SELECTOR;
else
selector = 0;
/*
* Init the query response and request parameters
*/
ufsf_init_query(hba, &request, &response, opcode, idn, index,
selector);
switch (opcode) {
case UPIU_QUERY_OPCODE_SET_FLAG:
case UPIU_QUERY_OPCODE_CLEAR_FLAG:
case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
break;
case UPIU_QUERY_OPCODE_READ_FLAG:
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
if (!flag_res) {
/* No dummy reads */
dev_err(hba->dev, "%s: Invalid argument for read request\n",
__func__);
err = -EINVAL;
goto out_unlock;
}
break;
default:
dev_err(hba->dev,
"%s: Expected query flag opcode but got = %d\n",
__func__, opcode);
err = -EINVAL;
goto out_unlock;
}
/* Send query request */
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
dev_err(hba->dev,
"%s: Sending flag query for idn %d failed, err = %d\n",
__func__, idn, err);
goto out_unlock;
}
if (flag_res)
*flag_res = (be32_to_cpu(response->upiu_res.value) &
MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba);
return err;
}
int ufsf_query_flag_retry(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 idx, bool *flag_res)
{
int ret;
int retries;
for (retries = 0; retries < UFSF_QUERY_REQ_RETRIES; retries++) {
ret = ufsf_query_flag(hba, opcode, idn, idx, flag_res);
if (ret)
dev_dbg(hba->dev,
"%s: failed with error %d, retries %d\n",
__func__, ret, retries);
else
break;
}
if (ret)
dev_err(hba->dev,
"%s: query flag, opcode %d, idn %d, failed with error %d after %d retires\n",
__func__, opcode, idn, ret, retries);
return ret;
}
int ufsf_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 idx, u32 *attr_val)
{
int ret;
int retries;
u8 selector;
if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SAMSUNG ||
hba->dev_info.wmanufacturerid == UFS_VENDOR_MICRON)
selector = UFSFEATURE_SELECTOR;
else
selector = 0;
for (retries = 0; retries < UFSF_QUERY_REQ_RETRIES; retries++) {
ret = ufshcd_query_attr(hba, opcode, idn, idx,
selector, attr_val);
if (ret)
dev_dbg(hba->dev,
"%s: failed with error %d, retries %d\n",
__func__, ret, retries);
else
break;
}
if (ret)
dev_err(hba->dev,
"%s: query attr, opcode %d, idn %d, failed with error %d after %d retires\n",
__func__, opcode, idn, ret, retries);
return ret;
}
static int ufsf_read_desc(struct ufs_hba *hba, u8 desc_id, u8 desc_index,
u8 selector, u8 *desc_buf, u32 size)
{
int err = 0;
pm_runtime_get_sync(hba->dev);
err = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
desc_id, desc_index,
selector,
desc_buf, &size);
if (err)
ERR_MSG("reading Device Desc failed. err = %d", err);
pm_runtime_put_sync(hba->dev);
return err;
}
static int ufsf_read_dev_desc(struct ufsf_feature *ufsf, u8 selector)
{
u8 desc_buf[UFSF_QUERY_DESC_DEVICE_MAX_SIZE] = {0};
int ret;
ret = ufsf_read_desc(ufsf->hba, QUERY_DESC_IDN_DEVICE, 0, selector,
desc_buf, UFSF_QUERY_DESC_DEVICE_MAX_SIZE);
if (ret)
return ret;
ufsf->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
INIT_INFO("device lu count %d", ufsf->num_lu);
INIT_INFO("sel=%u length=%u(0x%x) bSupport=0x%.2x, extend=0x%.2x_%.2x",
selector, desc_buf[DEVICE_DESC_PARAM_LEN],
desc_buf[DEVICE_DESC_PARAM_LEN],
desc_buf[DEVICE_DESC_PARAM_UFS_FEAT],
desc_buf[DEVICE_DESC_PARAM_EX_FEAT_SUP+2],
desc_buf[DEVICE_DESC_PARAM_EX_FEAT_SUP+3]);
#if defined(CONFIG_SCSI_UFS_HPB)
ufshpb_get_dev_info(&ufsf->hpb_dev_info, desc_buf);
#endif
#if defined(CONFIG_SCSI_UFS_TW)
ufstw_get_dev_info(&ufsf->tw_dev_info, desc_buf);
#endif
return 0;
}
static int ufsf_read_geo_desc(struct ufsf_feature *ufsf, u8 selector)
{
u8 geo_buf[UFSF_QUERY_DESC_GEOMETRY_MAX_SIZE];
int ret;
ret = ufsf_read_desc(ufsf->hba, QUERY_DESC_IDN_GEOMETRY, 0, selector,
geo_buf, UFSF_QUERY_DESC_GEOMETRY_MAX_SIZE);
if (ret)
return ret;
#if defined(CONFIG_SCSI_UFS_HPB)
if (ufsf->hpb_dev_info.hpb_device)
ufshpb_get_geo_info(&ufsf->hpb_dev_info, geo_buf);
#endif
#if defined(CONFIG_SCSI_UFS_TW)
if (ufsf->tw_dev_info.tw_device)
ufstw_get_geo_info(&ufsf->tw_dev_info, geo_buf);
#endif
return 0;
}
static int ufsf_read_unit_desc(struct ufsf_feature *ufsf,
unsigned int lun, u8 selector)
{
u8 unit_buf[UFSF_QUERY_DESC_UNIT_MAX_SIZE];
int lu_enable, ret = 0;
ret = ufsf_read_desc(ufsf->hba, QUERY_DESC_IDN_UNIT, lun, selector,
unit_buf, UFSF_QUERY_DESC_UNIT_MAX_SIZE);
if (ret) {
ERR_MSG("read unit desc failed. ret %d", ret);
goto out;
}
lu_enable = unit_buf[UNIT_DESC_PARAM_LU_ENABLE];
if (!lu_enable)
return 0;
#if defined(CONFIG_SCSI_UFS_HPB)
if (ufsf->hpb_dev_info.hpb_device) {
ret = ufshpb_get_lu_info(ufsf, lun, unit_buf);
if (ret == -ENOMEM)
goto out;
}
#endif
#if defined(CONFIG_SCSI_UFS_TW)
if (ufsf->tw_dev_info.tw_device) {
ret = ufstw_get_lu_info(ufsf, lun, unit_buf);
if (ret == -ENOMEM)
goto out;
}
#endif
out:
return ret;
}
void ufsf_device_check(struct ufs_hba *hba)
{
struct ufsf_feature *ufsf = &hba->ufsf;
int ret;
unsigned int lun;
u8 selector = 0;
ufsf->slave_conf_cnt = 0;
ufsf->hba = hba;
if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SAMSUNG ||
hba->dev_info.wmanufacturerid == UFS_VENDOR_MICRON)
selector = UFSFEATURE_SELECTOR;
ret = ufsf_read_dev_desc(ufsf, selector);
if (ret)
return;
ret = ufsf_read_geo_desc(ufsf, selector);
if (ret)
return;
seq_scan_lu(lun) {
ret = ufsf_read_unit_desc(ufsf, lun, selector);
if (ret == -ENOMEM)
goto out_free_mem;
}
return;
out_free_mem:
#if defined(CONFIG_SCSI_UFS_HPB)
seq_scan_lu(lun)
kfree(ufsf->ufshpb_lup[lun]);
/* don't call init handler */
ufsf->ufshpb_state = HPB_NOT_SUPPORTED;
#endif
#if defined(CONFIG_SCSI_UFS_TW)
seq_scan_lu(lun)
kfree(ufsf->tw_lup[lun]);
ufsf->tw_dev_info.tw_device = false;
atomic_set(&ufsf->tw_state, TW_NOT_SUPPORTED);
#endif
return;
}
static void ufsf_print_query_buf(unsigned char *field, int size)
{
unsigned char buf[255];
unsigned int count = 0;
int i;
count += snprintf(buf, 8, "(0x00):");
for (i = 0; i < size; i++) {
count += snprintf(buf + count, 4, " %.2X", field[i]);
if ((i + 1) % 16 == 0) {
buf[count] = '\n';
buf[count + 1] = '\0';
printk(buf);
count = 0;
count += snprintf(buf, 8, "(0x%.2X):", i + 1);
} else if ((i + 1) % 4 == 0)
count += snprintf(buf + count, 3, " :");
}
buf[count] = '\n';
buf[count + 1] = '\0';
printk(buf);
}
inline int ufsf_check_query(__u32 opcode)
{
return (opcode & 0xffff0000) >> 16 == UFSFEATURE_QUERY_OPCODE;
}
int ufsf_query_ioctl(struct ufsf_feature *ufsf, unsigned int lun,
void __user *buffer,
struct ufs_ioctl_query_data_hpb *ioctl_data, u8 selector)
{
unsigned char *kernel_buf;
int opcode;
int err = 0;
int index = 0;
int length = 0;
int buf_len = 0;
opcode = ioctl_data->opcode & 0xffff;
INFO_MSG("op %u idn %u sel %u size %u(0x%X)", opcode, ioctl_data->idn,
selector, ioctl_data->buf_size, ioctl_data->buf_size);
buf_len = (ioctl_data->idn == QUERY_DESC_IDN_STRING) ?
IOCTL_DEV_CTX_MAX_SIZE : QUERY_DESC_MAX_SIZE;
if (ioctl_data->buf_size > buf_len) {
err = -EINVAL;
goto out;
}
kernel_buf = kzalloc(buf_len, GFP_KERNEL);
if (!kernel_buf) {
err = -ENOMEM;
goto out;
}
switch (opcode) {
case UPIU_QUERY_OPCODE_WRITE_DESC:
err = copy_from_user(kernel_buf, buffer +
sizeof(struct ufs_ioctl_query_data_hpb),
ioctl_data->buf_size);
INFO_MSG("buf size %d", ioctl_data->buf_size);
ufsf_print_query_buf(kernel_buf, ioctl_data->buf_size);
if (err)
goto out_release_mem;
break;
case UPIU_QUERY_OPCODE_READ_DESC:
switch (ioctl_data->idn) {
case QUERY_DESC_IDN_UNIT:
if (!ufs_is_valid_unit_desc_lun(lun)) {
ERR_MSG("No unit descriptor for lun 0x%x", lun);
err = -EINVAL;
goto out_release_mem;
}
index = lun;
INFO_MSG("read lu desc lun: %d", index);
break;
case QUERY_DESC_IDN_STRING:
#if defined(CONFIG_SCSI_UFS_HPB)
if (!ufs_is_valid_unit_desc_lun(lun)) {
ERR_MSG("No unit descriptor for lun 0x%x", lun);
err = -EINVAL;
goto out_release_mem;
}
err = ufshpb_issue_req_dev_ctx(ufsf->ufshpb_lup[lun],
kernel_buf,
ioctl_data->buf_size);
if (err < 0)
goto out_release_mem;
goto copy_buffer;
#endif
case QUERY_DESC_IDN_DEVICE:
case QUERY_DESC_IDN_GEOMETRY:
case QUERY_DESC_IDN_CONFIGURATION:
break;
default:
ERR_MSG("invalid idn %d", ioctl_data->idn);
err = -EINVAL;
goto out_release_mem;
}
break;
default:
ERR_MSG("invalid opcode %d", opcode);
err = -EINVAL;
goto out_release_mem;
}
length = ioctl_data->buf_size;
err = ufshcd_query_descriptor_retry(ufsf->hba, opcode, ioctl_data->idn,
index, selector, kernel_buf,
&length);
if (err)
goto out_release_mem;
#if defined(CONFIG_SCSI_UFS_HPB)
copy_buffer:
#endif
if (opcode == UPIU_QUERY_OPCODE_READ_DESC) {
err = copy_to_user(buffer, ioctl_data,
sizeof(struct ufs_ioctl_query_data_hpb));
if (err)
ERR_MSG("Failed copying back to user.");
err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data_hpb),
kernel_buf, ioctl_data->buf_size);
if (err)
ERR_MSG("Fail: copy rsp_buffer to user space.");
}
out_release_mem:
kfree(kernel_buf);
out:
return err;
}
inline bool ufsf_is_valid_lun(int lun)
{
return lun < UFS_UPIU_MAX_GENERAL_LUN;
}
inline int ufsf_get_ee_status(struct ufs_hba *hba, u32 *status)
{
return ufsf_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_EE_STATUS, 0, status);
}
/*
* Wrapper functions for ufshpb.
*/
#if defined(CONFIG_SCSI_UFS_HPB)
inline int ufsf_hpb_prepare_pre_req(struct ufsf_feature *ufsf,
struct scsi_cmnd *cmd, int lun)
{
if (ufsf->ufshpb_state == HPB_PRESENT)
return ufshpb_prepare_pre_req(ufsf, cmd, lun);
return -ENODEV;
}
inline int ufsf_hpb_prepare_add_lrbp(struct ufsf_feature *ufsf, int add_tag)
{
if (ufsf->ufshpb_state == HPB_PRESENT)
return ufshpb_prepare_add_lrbp(ufsf, add_tag);
return -ENODEV;
}
inline void ufsf_hpb_end_pre_req(struct ufsf_feature *ufsf,
struct request *req)
{
ufshpb_end_pre_req(ufsf, req);
}
inline void ufsf_hpb_change_lun(struct ufsf_feature *ufsf,
struct ufshcd_lrb *lrbp)
{
int ctx_lba = LI_EN_32(lrbp->cmd->cmnd + 2);
if (ufsf->ufshpb_state == HPB_PRESENT &&
ufsf->issue_ioctl == true && ctx_lba == READ10_DEBUG_LBA) {
lrbp->lun = READ10_DEBUG_LUN;
INFO_MSG("lun 0x%X lba 0x%X", lrbp->lun, ctx_lba);
}
}
inline void ufsf_hpb_prep_fn(struct ufsf_feature *ufsf,
struct ufshcd_lrb *lrbp)
{
if (ufsf->ufshpb_state == HPB_PRESENT
&& ufsf->issue_ioctl == false)
ufshpb_prep_fn(ufsf, lrbp);
}
inline void ufsf_hpb_noti_rb(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp)
{
if (ufsf->ufshpb_state == HPB_PRESENT)
ufshpb_rsp_upiu(ufsf, lrbp);
}
inline void ufsf_hpb_reset_lu(struct ufsf_feature *ufsf)
{
ufsf->ufshpb_state = HPB_RESET;
schedule_work(&ufsf->ufshpb_reset_work);
}
inline void ufsf_hpb_reset_host(struct ufsf_feature *ufsf)
{
if (ufsf->ufshpb_state == HPB_PRESENT)
ufsf->ufshpb_state = HPB_RESET;
}
inline void ufsf_hpb_init(struct ufsf_feature *ufsf)
{
if (ufsf->hpb_dev_info.hpb_device &&
ufsf->ufshpb_state == HPB_NEED_INIT) {
INIT_WORK(&ufsf->ufshpb_init_work, ufshpb_init_handler);
schedule_work(&ufsf->ufshpb_init_work);
}
}
inline void ufsf_hpb_reset(struct ufsf_feature *ufsf)
{
if (ufsf->hpb_dev_info.hpb_device &&
ufsf->ufshpb_state == HPB_RESET)
schedule_work(&ufsf->ufshpb_reset_work);
}
inline void ufsf_hpb_suspend(struct ufsf_feature *ufsf)
{
if (ufsf->ufshpb_state == HPB_PRESENT)
ufshpb_suspend(ufsf);
}
inline void ufsf_hpb_resume(struct ufsf_feature *ufsf)
{
if (ufsf->ufshpb_state == HPB_PRESENT)
ufshpb_resume(ufsf);
}
inline void ufsf_hpb_release(struct ufsf_feature *ufsf)
{
ufshpb_release(ufsf, HPB_NEED_INIT);
}
inline void ufsf_hpb_set_init_state(struct ufsf_feature *ufsf)
{
ufsf->ufshpb_state = HPB_NEED_INIT;
}
#else
inline int ufsf_hpb_prepare_pre_req(struct ufsf_feature *ufsf,
struct scsi_cmnd *cmd, int lun)
{
return 0;
}
inline int ufsf_hpb_prepare_add_lrbp(struct ufsf_feature *ufsf, int add_tag)
{
return 0;
}
inline void ufsf_hpb_end_pre_req(struct ufsf_feature *ufsf,
struct request *req) {}
inline void ufsf_hpb_change_lun(struct ufsf_feature *ufsf,
struct ufshcd_lrb *lrbp) {}
inline void ufsf_hpb_prep_fn(struct ufsf_feature *ufsf,
struct ufshcd_lrb *lrbp) {}
inline void ufsf_hpb_noti_rb(struct ufsf_feature *ufsf,
struct ufshcd_lrb *lrbp) {}
inline void ufsf_hpb_reset_lu(struct ufsf_feature *ufsf) {}
inline void ufsf_hpb_reset_host(struct ufsf_feature *ufsf) {}
inline void ufsf_hpb_init(struct ufsf_feature *ufsf) {}
inline void ufsf_hpb_reset(struct ufsf_feature *ufsf) {}
inline void ufsf_hpb_suspend(struct ufsf_feature *ufsf) {}
inline void ufsf_hpb_resume(struct ufsf_feature *ufsf) {}
inline void ufsf_hpb_release(struct ufsf_feature *ufsf) {}
inline void ufsf_hpb_set_init_state(struct ufsf_feature *ufsf) {}
#endif
/*
* Wrapper functions for ufstw.
*/
#if defined(CONFIG_SCSI_UFS_TW)
inline void ufsf_tw_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp)
{
ufstw_prep_fn(ufsf, lrbp);
}
inline void ufsf_tw_init(struct ufsf_feature *ufsf)
{
INIT_INFO("init start.. tw_state %d\n",
atomic_read(&ufsf->tw_state));
if (ufsf->tw_dev_info.tw_device &&
atomic_read(&ufsf->tw_state) == TW_NEED_INIT) {
INIT_WORK(&ufsf->tw_init_work, ufstw_init_work_fn);
schedule_work(&ufsf->tw_init_work);
}
}
inline void ufsf_tw_reset(struct ufsf_feature *ufsf)
{
INIT_INFO("reset start.. tw_state %d\n",
atomic_read(&ufsf->tw_state));
if (ufsf->tw_dev_info.tw_device &&
atomic_read(&ufsf->tw_state) == TW_RESET)
schedule_work(&ufsf->tw_reset_work);
}
inline void ufsf_tw_suspend(struct ufsf_feature *ufsf)
{
if (atomic_read(&ufsf->tw_state) == TW_PRESENT)
ufstw_suspend(ufsf);
}
inline void ufsf_tw_resume(struct ufsf_feature *ufsf)
{
if (atomic_read(&ufsf->tw_state) == TW_PRESENT)
ufstw_resume(ufsf);
}
inline void ufsf_tw_release(struct ufsf_feature *ufsf)
{
ufstw_release(&ufsf->tw_kref);
}
inline void ufsf_tw_set_init_state(struct ufsf_feature *ufsf)
{
atomic_set(&ufsf->tw_state, TW_NEED_INIT);
}
inline void ufsf_tw_reset_lu(struct ufsf_feature *ufsf)
{
INFO_MSG("run reset_lu.. tw_state(%d) -> TW_RESET",
atomic_read(&ufsf->tw_state));
atomic_set(&ufsf->tw_state, TW_RESET);
if (ufsf->tw_dev_info.tw_device)
schedule_work(&ufsf->tw_reset_work);
}
inline void ufsf_tw_reset_host(struct ufsf_feature *ufsf)
{
INFO_MSG("run reset_host.. tw_state(%d) -> TW_RESET",
atomic_read(&ufsf->tw_state));
if (atomic_read(&ufsf->tw_state) == TW_PRESENT)
atomic_set(&ufsf->tw_state, TW_RESET);
}
inline void ufsf_tw_ee_handler(struct ufsf_feature *ufsf)
{
u32 status = 0;
int err;
if (ufsf->tw_debug && (atomic_read(&ufsf->tw_state) != TW_PRESENT)) {
ERR_MSG("tw_state %d", atomic_read(&ufsf->tw_state));
return;
}
if ((atomic_read(&ufsf->tw_state) == TW_PRESENT)
&& (ufsf->tw_ee_mode == TW_EE_MODE_AUTO)) {
err = ufsf_get_ee_status(ufsf->hba, &status);
if (err) {
dev_err(ufsf->hba->dev,
"%s: failed to get tw ee status %d\n",
__func__, err);
return;
}
if (status & MASK_EE_TW)
ufstw_ee_handler(ufsf);
}
}
#else
inline void ufsf_tw_prep_fn(struct ufsf_feature *ufsf,
struct ufshcd_lrb *lrbp) {}
inline void ufsf_tw_init(struct ufsf_feature *ufsf) {}
inline void ufsf_tw_reset(struct ufsf_feature *ufsf) {}
inline void ufsf_tw_suspend(struct ufsf_feature *ufsf) {}
inline void ufsf_tw_resume(struct ufsf_feature *ufsf) {}
inline void ufsf_tw_release(struct ufsf_feature *ufsf) {}
inline void ufsf_tw_set_init_state(struct ufsf_feature *ufsf) {}
inline void ufsf_tw_reset_lu(struct ufsf_feature *ufsf) {}
inline void ufsf_tw_reset_host(struct ufsf_feature *ufsf) {}
inline void ufsf_tw_ee_handler(struct ufsf_feature *ufsf) {}
#endif

View File

@@ -0,0 +1,180 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018 Samsung Electronics Co., Ltd.
*/
#ifndef _UFSFEATURE_H_
#define _UFSFEATURE_H_
#include "ufs.h"
#include <scsi/ufs/ufs-mtk-ioctl.h>
#if defined(CONFIG_SCSI_UFS_HPB)
#include "ufshpb.h"
#endif
#include <scsi/scsi_cmnd.h>
#if defined(CONFIG_SCSI_UFS_TW)
#include "ufstw.h"
#endif
/* Constant value*/
#define SECTOR 512
#define BLOCK 4096
#define SECTORS_PER_BLOCK (BLOCK / SECTOR)
#define BITS_PER_DWORD 32
#define IOCTL_DEV_CTX_MAX_SIZE OS_PAGE_SIZE
#define OS_PAGE_SIZE 4096
#define OS_PAGE_SHIFT 12
#define UFSF_QUERY_REQ_RETRIES 1
/* Description */
#define UFSF_QUERY_DESC_DEVICE_MAX_SIZE 0x57
#define UFSF_QUERY_DESC_CONFIGURAION_MAX_SIZE 0xE2
#define UFSF_QUERY_DESC_UNIT_MAX_SIZE 0x2D
#define UFSF_QUERY_DESC_GEOMETRY_MAX_SIZE 0x58
#define UFSFEATURE_SELECTOR 0x01
/* Extended UFS Feature Support */
#define UFSF_EFS_TURBO_WRITE 0x100
/* query_flag */
#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
/* BIG -> LI */
#define LI_EN_16(x) be16_to_cpu(*(__be16 *)(x))
#define LI_EN_32(x) be32_to_cpu(*(__be32 *)(x))
#define LI_EN_64(x) be64_to_cpu(*(__be64 *)(x))
/* LI -> BIG */
#define GET_BYTE_0(num) (((num) >> 0) & 0xff)
#define GET_BYTE_1(num) (((num) >> 8) & 0xff)
#define GET_BYTE_2(num) (((num) >> 16) & 0xff)
#define GET_BYTE_3(num) (((num) >> 24) & 0xff)
#define GET_BYTE_4(num) (((num) >> 32) & 0xff)
#define GET_BYTE_5(num) (((num) >> 40) & 0xff)
#define GET_BYTE_6(num) (((num) >> 48) & 0xff)
#define GET_BYTE_7(num) (((num) >> 56) & 0xff)
#define INFO_MSG(msg, args...) printk(KERN_INFO "%s:%d " msg "\n", \
__func__, __LINE__, ##args)
#define INIT_INFO(msg, args...) INFO_MSG(msg, ##args)
#define RELEASE_INFO(msg, args...) INFO_MSG(msg, ##args)
#define SYSFS_INFO(msg, args...) INFO_MSG(msg, ##args)
#define ERR_MSG(msg, args...) printk(KERN_ERR "%s:%d " msg "\n", \
__func__, __LINE__, ##args)
#define WARNING_MSG(msg, args...) printk(KERN_WARNING "%s:%d " msg "\n", \
__func__, __LINE__, ##args)
#define seq_scan_lu(lun) for (lun = 0; lun < UFS_UPIU_MAX_GENERAL_LUN; lun++)
#define TMSG(ufsf, lun, msg, args...) \
do { if (ufsf->sdev_ufs_lu[lun] && \
ufsf->sdev_ufs_lu[lun]->request_queue) \
blk_add_trace_msg( \
ufsf->sdev_ufs_lu[lun]->request_queue, \
msg, ##args); \
} while (0) \
struct ufsf_lu_desc {
/* Common info */
int lu_enable; /* 03h bLUEnable */
int lu_queue_depth; /* 06h lu queue depth info*/
int lu_logblk_size; /* 0Ah bLogicalBlockSize. default 0x0C = 4KB */
u64 lu_logblk_cnt; /* 0Bh qLogicalBlockCount. */
#if defined(CONFIG_SCSI_UFS_HPB)
u16 lu_max_active_hpb_rgns; /* 23h:24h wLUMaxActiveHPBRegions */
u16 lu_hpb_pinned_rgn_startidx; /* 25h:26h wHPBPinnedRegionStartIdx */
u16 lu_num_hpb_pinned_rgns; /* 27h:28h wNumHPBPinnedRegions */
int lu_hpb_pinned_end_offset;
#endif
#if defined(CONFIG_SCSI_UFS_TW)
unsigned int tw_lu_buf_size;
#endif
};
struct ufsf_feature {
struct ufs_hba *hba;
int num_lu;
int slave_conf_cnt;
struct scsi_device *sdev_ufs_lu[UFS_UPIU_MAX_GENERAL_LUN];
#if defined(CONFIG_SCSI_UFS_HPB)
struct ufshpb_dev_info hpb_dev_info;
struct ufshpb_lu *ufshpb_lup[UFS_UPIU_MAX_GENERAL_LUN];
struct work_struct ufshpb_init_work;
struct work_struct ufshpb_reset_work;
struct work_struct ufshpb_eh_work;
wait_queue_head_t wait_hpb;
int ufshpb_state;
struct kref ufshpb_kref;
bool issue_ioctl;
#endif
#if defined(CONFIG_SCSI_UFS_TW)
struct ufstw_dev_info tw_dev_info;
struct ufstw_lu *tw_lup[UFS_UPIU_MAX_GENERAL_LUN];
struct work_struct tw_init_work;
struct work_struct tw_reset_work;
wait_queue_head_t tw_wait;
atomic_t tw_state;
struct kref tw_kref;
/* turbo write exception event control */
bool tw_ee_mode;
/* for debug */
bool tw_debug;
int tw_debug_no;
atomic64_t tw_debug_ee_count;
#endif
};
struct ufs_hba;
struct ufshcd_lrb;
void ufsf_device_check(struct ufs_hba *hba);
int ufsf_check_query(__u32 opcode);
int ufsf_query_ioctl(struct ufsf_feature *ufsf, unsigned int lun,
void __user *buffer,
struct ufs_ioctl_query_data_hpb *ioctl_data,
u8 selector);
int ufsf_query_flag_retry(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 idx, bool *flag_res);
int ufsf_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 idx, u32 *attr_val);
bool ufsf_is_valid_lun(int lun);
int ufsf_get_ee_status(struct ufs_hba *hba, u32 *status);
/* for hpb */
int ufsf_hpb_prepare_pre_req(struct ufsf_feature *ufsf, struct scsi_cmnd *cmd,
int lun);
int ufsf_hpb_prepare_add_lrbp(struct ufsf_feature *ufsf, int add_tag);
void ufsf_hpb_end_pre_req(struct ufsf_feature *ufsf, struct request *req);
void ufsf_hpb_change_lun(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp);
void ufsf_hpb_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp);
void ufsf_hpb_noti_rb(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp);
void ufsf_hpb_reset_lu(struct ufsf_feature *ufsf);
void ufsf_hpb_reset_host(struct ufsf_feature *ufsf);
void ufsf_hpb_init(struct ufsf_feature *ufsf);
void ufsf_hpb_reset(struct ufsf_feature *ufsf);
void ufsf_hpb_suspend(struct ufsf_feature *ufsf);
void ufsf_hpb_resume(struct ufsf_feature *ufsf);
void ufsf_hpb_release(struct ufsf_feature *ufsf);
void ufsf_hpb_set_init_state(struct ufsf_feature *ufsf);
/* for tw*/
void ufsf_tw_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp);
void ufsf_tw_init(struct ufsf_feature *ufsf);
void ufsf_tw_reset(struct ufsf_feature *ufsf);
int ufsf_tw_check_flush(struct ufsf_feature *ufsf);
void ufsf_tw_suspend(struct ufsf_feature *ufsf);
void ufsf_tw_resume(struct ufsf_feature *ufsf);
void ufsf_tw_release(struct ufsf_feature *ufsf);
void ufsf_tw_set_init_state(struct ufsf_feature *ufsf);
void ufsf_tw_reset_lu(struct ufsf_feature *ufsf);
void ufsf_tw_reset_host(struct ufsf_feature *ufsf);
void ufsf_tw_ee_handler(struct ufsf_feature *ufsf);
#endif /* End of Header */

View File

@@ -2266,7 +2266,12 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*
* Returns 0 in case of success, non-zero value in case of failure
*/
static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
#if defined(CONFIG_SCSI_UFS_FEATURE)
int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
#else
static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
#endif
{
struct ufshcd_sg_entry *prd;
struct scatterlist *sg;
@@ -2528,7 +2533,12 @@ static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*/
static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
#if defined(CONFIG_SCSI_UFS_FEATURE)
int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
#else
static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
#endif
{
u32 upiu_flags;
int ret = 0;
@@ -2540,9 +2550,21 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
if (likely(lrbp->cmd)) {
#if defined(CONFIG_SCSI_UFS_FEATURE)
ufsf_hpb_change_lun(&hba->ufsf, lrbp);
ufsf_tw_prep_fn(&hba->ufsf, lrbp);
ufsf_hpb_prep_fn(&hba->ufsf, lrbp);
#endif
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
lrbp->cmd->sc_data_direction);
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
#if defined(CONFIG_SCSI_SKHPB)
if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX) {
if (hba->skhpb_state == SKHPB_PRESENT && hba->issue_ioctl == false) {
skhpb_prep_fn(hba, lrbp);
}
}
#endif
} else {
ret = -EINVAL;
}
@@ -2575,6 +2597,15 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
unsigned long flags;
int tag;
int err = 0;
#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_HPB)
u32 line = 0;
struct scsi_cmnd *pre_cmd;
struct ufshcd_lrb *add_lrbp;
int add_tag = -ENODEV;
int pre_req_err = -EBUSY;
int lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
bool req_sent = false;
#endif
hba = shost_priv(host);
@@ -2637,6 +2668,37 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_HPB)
/* Micron version 2.0 not support write buffer id 2 */
if (hba->dev_info.wmanufacturerid != UFS_VENDOR_SAMSUNG)
goto send_orig_cmd;
if (ufshcd_vops_has_ufshci_perf_heuristic(hba))
goto send_orig_cmd;
add_tag = ufsf_hpb_prepare_pre_req(&hba->ufsf, cmd, lun);
if (add_tag == -EAGAIN) {
clear_bit_unlock(tag, &hba->lrb_in_use);
err = SCSI_MLQUEUE_HOST_BUSY;
ufshcd_release(hba);
line = __LINE__;
goto out;
}
if (add_tag < 0) {
hba->lrb[tag].hpb_ctx_id = MAX_HPB_CONTEXT_ID;
goto send_orig_cmd;
}
add_lrbp = &hba->lrb[add_tag];
pre_req_err = ufsf_hpb_prepare_add_lrbp(&hba->ufsf, add_tag);
if (pre_req_err)
hba->lrb[tag].hpb_ctx_id = MAX_HPB_CONTEXT_ID;
send_orig_cmd:
#endif
WARN_ON(hba->clk_gating.state != CLKS_ON);
lrbp = &hba->lrb[tag];
@@ -2672,6 +2734,17 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufs_mtk_biolog_queue_command(tag, lrbp->cmd);
#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_HPB)
if (!pre_req_err) {
ufshcd_vops_setup_xfer_req(hba, add_tag,
(add_lrbp->cmd ? true : false));
ufshcd_send_command(hba, add_tag);
req_sent = true;
pre_req_err = -EBUSY;
atomic64_inc(&hba->ufsf.ufshpb_lup[add_lrbp->lun]->pre_req_cnt);
}
#endif
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
@@ -2681,6 +2754,17 @@ out_unlock:
if (!err)
ufs_mtk_biolog_send_command(tag);
out:
#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_HPB)
if (!pre_req_err) {
pre_cmd = add_lrbp->cmd;
scsi_dma_unmap(pre_cmd);
add_lrbp->cmd = NULL;
clear_bit_unlock(add_tag, &hba->lrb_in_use);
ufshcd_release(hba);
ufsf_hpb_end_pre_req(&hba->ufsf, pre_cmd->request);
}
#endif
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -2863,8 +2947,13 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
*/
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
#if defined(CONFIG_SCSI_UFS_FEATURE)
int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
#else
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
#endif
{
struct ufshcd_lrb *lrbp;
int err;
@@ -2934,8 +3023,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
(*request)->upiu_req.selector = selector;
}
#if defined(CONFIG_SCSI_SKHPB)
int ufshcd_query_flag_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
#else
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
#endif
{
int ret;
int retries;
@@ -3538,7 +3632,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
* Unit descriptors are only available for general purpose LUs (LUN id
* from 0 to 7) and RPMB Well known LU.
*/
if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
if (!ufs_is_valid_unit_desc_lun(lun))
return -EOPNOTSUPP;
return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -4463,7 +4557,6 @@ void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
/**
* ufshcd_hba_enable - initialize the controller
* @hba: per adapter instance
@@ -4966,6 +5059,18 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
if (schedule_work(&hba->eeh_work))
pm_runtime_get_noresume(hba->dev);
}
#if defined(CONFIG_SCSI_UFS_FEATURE)
if (scsi_status == SAM_STAT_GOOD)
ufsf_hpb_noti_rb(&hba->ufsf, lrbp);
#endif
#if defined(CONFIG_SCSI_SKHPB)
if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX) {
if (hba->skhpb_state == SKHPB_PRESENT &&
scsi_status == SAM_STAT_GOOD)
skhpb_rsp_upiu(hba, lrbp);
}
#endif
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -6308,6 +6413,18 @@ out:
hba->req_abort_count = 0;
ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
if (!err) {
#if defined(CONFIG_SCSI_UFS_FEATURE)
ufsf_hpb_reset_lu(&hba->ufsf);
ufsf_tw_reset_lu(&hba->ufsf);
#endif
#if defined(CONFIG_SCSI_SKHPB)
if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX) {
if (hba->skhpb_state == SKHPB_PRESENT)
hba->skhpb_state = SKHPB_RESET;
schedule_delayed_work(&hba->skhpb_init_work,
msecs_to_jiffies(10));
}
#endif
err = SUCCESS;
} else {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
@@ -6529,6 +6646,10 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
*/
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_hba_stop(hba, false);
#if defined(CONFIG_SCSI_UFS_FEATURE)
ufsf_hpb_reset_host(&hba->ufsf);
ufsf_tw_reset_host(&hba->ufsf);
#endif
hba->silence_err_logs = true;
ufshcd_complete_requests(hba);
hba->silence_err_logs = false;
@@ -7368,6 +7489,16 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
}
ufshcd_print_info(hba, UFS_INFO_PWR);
}
#if defined(CONFIG_SCSI_UFS_FEATURE)
ufsf_device_check(hba);
ufsf_hpb_init(&hba->ufsf);
ufsf_tw_init(&hba->ufsf);
#endif
scsi_scan_host(hba->host);
#if defined(CONFIG_SCSI_SKHPB)
if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX)
schedule_delayed_work(&hba->skhpb_init_work, 0);
#endif
/*
* bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
@@ -8408,6 +8539,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
req_link_state = UIC_LINK_OFF_STATE;
}
#if defined(CONFIG_SCSI_UFS_FEATURE)
ufsf_hpb_suspend(&hba->ufsf);
ufsf_tw_suspend(&hba->ufsf);
#endif
#if defined(CONFIG_SCSI_SKHPB)
if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX)
skhpb_suspend(hba);
#endif
ret = ufshcd_crypto_suspend(hba, pm_op);
if (ret)
@@ -8456,6 +8595,13 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_disable_auto_bkops(hba);
}
}
#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_TW)
if (ufstw_need_flush(&hba->ufsf)) {
ret = -EAGAIN;
pm_runtime_mark_last_busy(hba->dev);
goto enable_gating;
}
#endif
if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
@@ -8625,6 +8771,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
#if defined(CONFIG_SCSI_UFS_FEATURE)
ufsf_hpb_resume(&hba->ufsf);
ufsf_tw_resume(&hba->ufsf);
#endif
#if defined(CONFIG_SCSI_SKHPB)
skhpb_resume(hba);
#endif
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
@@ -8898,6 +9051,14 @@ EXPORT_SYMBOL(ufshcd_shutdown);
*/
void ufshcd_remove(struct ufs_hba *hba)
{
#if defined(CONFIG_SCSI_UFS_FEATURE)
ufsf_hpb_release(&hba->ufsf);
ufsf_tw_release(&hba->ufsf);
#endif
#if defined(CONFIG_SCSI_SKHPB)
if (hba->dev_info.wmanufacturerid == UFS_VENDOR_SKHYNIX)
skhpb_release(hba, SKHPB_NEED_INIT);
#endif
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
scsi_remove_host(hba->host);
@@ -9155,6 +9316,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
* ufshcd_probe_hba().
*/
ufshcd_set_ufs_dev_active(hba);
#if defined(CONFIG_SCSI_UFS_FEATURE)
ufsf_hpb_set_init_state(&hba->ufsf);
ufsf_tw_set_init_state(&hba->ufsf);
#endif
#if defined(CONFIG_SCSI_SKHPB) /* initialize hpb structures */
ufshcd_init_hpb(hba);
#endif
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);

View File

@@ -71,6 +71,13 @@
#include "ufs.h"
#include "ufshci.h"
#if defined(CONFIG_SCSI_UFS_FEATURE)
#include "ufsfeature.h"
#endif
#if defined(CONFIG_SCSI_SKHPB)
#include "ufshpb_skh.h"
#endif
#define UFSHCD "ufshcd"
#define UFSHCD_DRIVER_VERSION "0.2"
@@ -239,6 +246,10 @@ struct ufshcd_lrb {
u64 data_unit_num;
bool req_abort_skip;
#if defined(CONFIG_SCSI_UFS_FEATURE) && defined(CONFIG_SCSI_UFS_HPB)
int hpb_ctx_id;
#endif
};
/**
@@ -747,6 +758,7 @@ struct ufs_hba {
u16 ee_ctrl_mask;
u16 hba_enable_delay_us;
bool is_powered;
struct semaphore eh_sem;
/* Work Queues */
struct work_struct eh_work;
@@ -830,6 +842,24 @@ struct ufs_hba {
struct device bsg_dev;
struct request_queue *bsg_queue;
#if defined(CONFIG_SCSI_UFS_FEATURE)
struct ufsf_feature ufsf;
#endif
#if defined(CONFIG_SCSI_SKHPB)
/* HPB support */
u32 skhpb_feat;
int skhpb_state;
int skhpb_max_regions;
struct delayed_work skhpb_init_work;
bool issue_ioctl;
struct skhpb_lu *skhpb_lup[UFS_UPIU_MAX_GENERAL_LUN];
struct work_struct skhpb_eh_work;
u32 skhpb_quirk;
u8 hpb_control_mode;
#define SKHPB_U8_MAX 0xFF
u8 skhpb_quicklist_lu_enable[UFS_UPIU_MAX_GENERAL_LUN];
struct scsi_device *sdev_ufs_lu[UFS_UPIU_MAX_GENERAL_LUN];
#endif
#ifdef CONFIG_SCSI_UFS_CRYPTO
/* crypto */
@@ -1073,6 +1103,17 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
#if defined(CONFIG_SCSI_UFS_FEATURE)
int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout);
int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
#endif
#if defined(CONFIG_SCSI_SKHPB)
int ufshcd_query_flag_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum flag_idn idn, bool *flag_res);
#endif
int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
int *desc_length);

3762
drivers/scsi/ufs/ufshpb.c Normal file

File diff suppressed because it is too large Load Diff

292
drivers/scsi/ufs/ufshpb.h Normal file
View File

@@ -0,0 +1,292 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018 Samsung Electronics Co., Ltd.
*/
#ifndef _UFSHPB_H_
#define _UFSHPB_H_
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/blktrace_api.h>
#include <linux/blkdev.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_driver.h>
#include "../../../block/blk.h"
#include "../scsi_priv.h"
/* Version info*/
#define UFSHPB_VER 0x0200
#define UFSHPB_DD_VER 0x0208
/* Constant value*/
#define MAX_ACTIVE_NUM 2
#define MAX_INACTIVE_NUM 2
#define HPB_ENTRY_SIZE 0x08
#define HPB_ENTREIS_PER_OS_PAGE (OS_PAGE_SIZE / HPB_ENTRY_SIZE)
#define RETRY_DELAY_MS 5000
/* HPB Support Chunk Size */
#define HPB_MULTI_CHUNK_LOW 9
#define HPB_MULTI_CHUNK_HIGH 128
#define MAX_HPB_CONTEXT_ID 0x7f
/* Description */
#define UFS_FEATURE_SUPPORT_HPB_BIT 0x80
/* Response UPIU types */
#define HPB_RSP_NONE 0x00
#define HPB_RSP_REQ_REGION_UPDATE 0x01
/* Vender defined OPCODE */
#define UFSHPB_READ_BUFFER 0xF9
#define UFSHPB_WRITE_BUFFER 0xFA
#define UFSHPB_GROUP_NUMBER 0x11
#define UFSHPB_READ_BUFFER_ID 0x01
#define UFSHPB_WRITE_BUFFER_ID 0x02
#define TRANSFER_LEN 0x01
#define DEV_DATA_SEG_LEN 0x14
#define DEV_SENSE_SEG_LEN 0x12
#define DEV_DES_TYPE 0x80
#define DEV_ADDITIONAL_LEN 0x10
/* For read10 debug */
#define READ10_DEBUG_LUN 0x7F
#define READ10_DEBUG_LBA 0x48504230
/*
* UFSHPB DEBUG
*/
#define HPB_DEBUG(hpb, msg, args...) \
do { if (hpb->debug) \
printk(KERN_ERR "%s:%d " msg "\n", \
__func__, __LINE__, ##args); \
} while (0)
#define TMSG_CMD(hpb, msg, rq, rgn, srgn) \
do { if (hpb->ufsf->sdev_ufs_lu[hpb->lun] && \
hpb->ufsf->sdev_ufs_lu[hpb->lun]->request_queue) \
blk_add_trace_msg( \
hpb->ufsf->sdev_ufs_lu[hpb->lun]->request_queue,\
"%llu + %u " msg " %d - %d", \
(unsigned long long) blk_rq_pos(rq), \
(unsigned int) blk_rq_sectors(rq), rgn, srgn); \
} while (0)
enum UFSHPB_STATE {
HPB_PRESENT = 1,
HPB_NOT_SUPPORTED = -1,
HPB_FAILED = -2,
HPB_NEED_INIT = 0,
HPB_RESET = -3,
};
enum HPBREGION_STATE {
HPBREGION_INACTIVE, HPBREGION_ACTIVE, HPBREGION_PINNED,
};
enum HPBSUBREGION_STATE {
HPBSUBREGION_UNUSED,
HPBSUBREGION_DIRTY,
HPBSUBREGION_CLEAN,
HPBSUBREGION_ISSUED,
};
struct ufshpb_dev_info {
bool hpb_device;
int hpb_number_lu;
int hpb_ver;
int hpb_rgn_size;
int hpb_srgn_size;
int hpb_device_max_active_rgns;
};
struct ufshpb_active_field {
__be16 active_rgn;
__be16 active_srgn;
};
struct ufshpb_rsp_field {
__be16 sense_data_len;
u8 desc_type;
u8 additional_len;
u8 hpb_type;
u8 reserved;
u8 active_rgn_cnt;
u8 inactive_rgn_cnt;
struct ufshpb_active_field hpb_active_field[2];
__be16 hpb_inactive_field[2];
};
struct ufshpb_map_ctx {
struct page **m_page;
unsigned int *ppn_dirty;
struct list_head list_table;
};
struct ufshpb_subregion {
struct ufshpb_map_ctx *mctx;
enum HPBSUBREGION_STATE srgn_state;
int rgn_idx;
int srgn_idx;
/* below information is used by rsp_list */
struct list_head list_act_srgn;
};
struct ufshpb_region {
struct ufshpb_subregion *srgn_tbl;
enum HPBREGION_STATE rgn_state;
int rgn_idx;
int srgn_cnt;
/* below information is used by rsp_list */
struct list_head list_inact_rgn;
/* below information is used by lru */
struct list_head list_lru_rgn;
};
struct ufshpb_req {
struct request *req;
struct bio *bio;
struct ufshpb_lu *hpb;
struct list_head list_req;
void (*end_io)(struct request *rq, int err);
void *end_io_data;
char sense[SCSI_SENSE_BUFFERSIZE];
union {
struct {
struct ufshpb_map_ctx *mctx;
unsigned int rgn_idx;
unsigned int srgn_idx;
unsigned int lun;
} rb;
struct {
struct page *m_page;
unsigned int len;
unsigned long lpn;
} wb;
};
};
enum selection_type {
LRU = 1,
};
struct victim_select_info {
int selection_type;
struct list_head lh_lru_rgn;
int max_lru_active_cnt; /* supported hpb #region - pinned #region */
atomic64_t active_cnt;
};
struct ufshpb_lu {
struct ufsf_feature *ufsf;
u8 lun;
int qd;
struct ufshpb_region *rgn_tbl;
spinlock_t hpb_lock;
struct ufshpb_req *map_req;
int num_inflight_map_req;
int throttle_map_req;
struct list_head lh_map_req_free;
struct list_head lh_map_req_retry;
struct list_head lh_map_ctx_free;
spinlock_t rsp_list_lock;
struct list_head lh_pinned_srgn;
struct list_head lh_act_srgn;
struct list_head lh_inact_rgn;
struct kobject kobj;
struct mutex sysfs_lock;
struct ufshpb_sysfs_entry *sysfs_entries;
struct ufshpb_req *pre_req;
int num_inflight_pre_req;
int throttle_pre_req;
struct list_head lh_pre_req_free;
struct list_head lh_pre_req_dummy; /* dummy for blk_start_requests() */
int ctx_id_ticket;
int pre_req_min_tr_len;
int pre_req_max_tr_len;
struct work_struct ufshpb_work;
struct delayed_work ufshpb_retry_work;
struct work_struct ufshpb_task_workq;
/* for selecting victim */
struct victim_select_info lru_info;
int hpb_ver;
int lu_max_active_rgns;
int lu_pinned_rgn_startidx;
int lu_pinned_end_offset;
int lu_num_pinned_rgns;
int srgns_per_lu;
int rgns_per_lu;
int srgns_per_rgn;
int srgn_mem_size;
int entries_per_rgn_shift;
int entries_per_rgn_mask;
int entries_per_srgn;
int entries_per_srgn_shift;
int entries_per_srgn_mask;
int dwords_per_srgn;
unsigned long long srgn_unit_size;
int mpage_bytes;
int mpages_per_srgn;
int lu_num_blocks;
/* for debug */
int alloc_mctx;
int debug_free_table;
bool force_disable;
bool force_map_req_disable;
bool debug;
atomic64_t hit;
atomic64_t miss;
atomic64_t rb_noti_cnt;
atomic64_t rb_active_cnt;
atomic64_t rb_inactive_cnt;
atomic64_t map_req_cnt;
atomic64_t pre_req_cnt;
};
struct ufshpb_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct ufshpb_lu *hpb, char *buf);
ssize_t (*store)(struct ufshpb_lu *hpb, const char *, size_t);
};
struct ufs_hba;
struct ufshcd_lrb;
int ufshpb_prepare_pre_req(struct ufsf_feature *ufsf, struct scsi_cmnd *cmd,
u8 lun);
int ufshpb_prepare_add_lrbp(struct ufsf_feature *ufsf, int add_tag);
void ufshpb_end_pre_req(struct ufsf_feature *ufsf, struct request *req);
void ufshpb_get_dev_info(struct ufshpb_dev_info *hpb_dev_info, u8 *desc_buf);
void ufshpb_get_geo_info(struct ufshpb_dev_info *hpb_dev_info, u8 *geo_buf);
int ufshpb_get_lu_info(struct ufsf_feature *ufsf, u8 lun, u8 *unit_buf);
void ufshpb_init_handler(struct work_struct *work);
void ufshpb_reset_handler(struct work_struct *work);
void ufshpb_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp);
void ufshpb_rsp_upiu(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp);
void ufshpb_release(struct ufsf_feature *ufsf, int state);
int ufshpb_issue_req_dev_ctx(struct ufshpb_lu *hpb, unsigned char *buf,
int buf_length);
void ufshpb_resume(struct ufsf_feature *ufsf);
void ufshpb_suspend(struct ufsf_feature *ufsf);
#endif /* End of Header */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,482 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018 Samsung Electronics Co., Ltd.
* Modified work Copyright (C) 2018, Google, Inc.
* Modified work Copyright (C) 2019 SK hynix
*/
#ifndef _SKHPB_H_
#define _SKHPB_H_
#include <linux/spinlock.h>
#include <linux/circ_buf.h>
#include <linux/workqueue.h>
/* Version info*/
#define SKHPB_DD_VER 0x010506
/* QUIRKs */
/* Use READ16 instead of HPB_READ command,
* This is workaround solution to countmeasure QCT ICE issue. */
#define SKHPB_QUIRK_USE_READ_16_FOR_ENCRYPTION (1 << 0)
/* This quirk makes HPB driver always works as Devie Control Mode.
* To cover old Configuration descriptor format which interpret
* the bHPBControl field as RESERVED. */
#define SKHPB_QUIRK_ALWAYS_DEVICE_CONTROL_MODE (1 << 1)
/* Discard SubRegion activation hint information that has been processed,
* when the host enters RPM/SPM sleep.
* Must not be set the bit in ufs_quirks.h.*/
#define SKHPB_QUIRK_PURGE_HINT_INFO_WHEN_SLEEP (1 << 20)
/* Constant value*/
#define SKHPB_SECTOR 512
#define SKHPB_BLOCK 4096
#define SKHPB_SECTORS_PER_BLOCK (SKHPB_BLOCK / SKHPB_SECTOR)
#define SKHPB_BITS_PER_DWORD 32
#define SKHPB_MAX_ACTIVE_NUM 2
#define SKHPB_MAX_INACTIVE_NUM 2
#define SKHPB_ENTRY_SIZE 0x08
#define SKHPB_ENTREIS_PER_OS_PAGE (PAGE_SIZE / SKHPB_ENTRY_SIZE)
/* Description */
#define SKHPB_UFS_FEATURE_SUPPORT_HPB_BIT 0x80
#define SKHPB_QUERY_DESC_DEVICE_MAX_SIZE 0x43
#define SKHPB_QUERY_DESC_CONFIGURAION_MAX_SIZE 0xE6
#define SKHPB_QUERY_DESC_UNIT_MAX_SIZE 0x29
#define SKHPB_QUERY_DESC_GEOMETRY_MAX_SIZE 0x4D
/* Configuration for HPB */
#define SKHPB_CONF_LU_ENABLE 0x00
#define SKHPB_CONF_ACTIVE_REGIONS 0x10
#define SKHPB_CONF_PINNED_START 0x12
#define SKHPB_CONF_PINNED_NUM 0x14
/* Parameter Macros */
#define SKHPB_DEV(h) ((h)->hba->dev)
#define SKHPB_MAX_BVEC_SIZE 128
/* Use for HPB activate */
#define SKHPB_CONFIG_LEN 0xd0
#define SKHPB_READ_LARGE_CHUNK_SUPPORT
#define SKHPB_READ_LARGE_CHUNK_MAX_BLOCK_COUNT (128) //TRANSFER LENGTH: 8bit
typedef u64 skhpb_t;
enum skhpb_lu_set {
LU_DISABLE = 0x00,
LU_ENABLE = 0x01,
LU_HPB_ENABLE = 0x02,
LU_SET_MAX,
};
struct skhpb_config_desc {
unsigned char conf_dev_desc[16];
unsigned char unit[UFS_UPIU_MAX_GENERAL_LUN][24];
};
/* Response UPIU types */
#define SKHPB_RSP_NONE 0x00
#define SKHPB_RSP_REQ_REGION_UPDATE 0x01
#define SKHPB_RSP_HPB_RESET 0x02
#define SKHPB_PER_ACTIVE_INFO_BYTES 4
#define SKHPB_PER_INACTIVE_INFO_BYTES 2
/* Vender defined OPCODE */
#define SKHPB_READ 0xF8
#define SKHPB_READ_BUFFER 0xF9
#define SKHPB_WRITE_BUFFER 0xFA
#define SKHPB_DEV_DATA_SEG_LEN 0x14
#define SKHPB_DEV_SENSE_SEG_LEN 0x12
#define SKHPB_DEV_DES_TYPE 0x80
#define SKHPB_DEV_ADDITIONAL_LEN 0x10
/* BYTE SHIFT */
#define SKHPB_ZERO_BYTE_SHIFT 0
#define SKHPB_ONE_BYTE_SHIFT 8
#define SKHPB_TWO_BYTE_SHIFT 16
#define SKHPB_THREE_BYTE_SHIFT 24
#define SKHPB_SHIFT_BYTE_0(num) ((num) << SKHPB_ZERO_BYTE_SHIFT)
#define SKHPB_SHIFT_BYTE_1(num) ((num) << SKHPB_ONE_BYTE_SHIFT)
#define SKHPB_GET_BYTE_0(num) (((num) >> SKHPB_ZERO_BYTE_SHIFT) & 0xff)
#define SKHPB_GET_BYTE_1(num) (((num) >> SKHPB_ONE_BYTE_SHIFT) & 0xff)
#define SKHPB_GET_BYTE_2(num) (((num) >> SKHPB_TWO_BYTE_SHIFT) & 0xff)
#define SKHPB_GET_BYTE_3(num) (((num) >> SKHPB_THREE_BYTE_SHIFT) & 0xff)
#define REGION_UNIT_SIZE(bit_offset) (0x01 << (bit_offset))
enum SKHPB_STATE {
SKHPB_PRESENT = 1,
SKHPB_NOT_SUPPORTED = -1,
SKHPB_FAILED = -2,
SKHPB_NEED_INIT = 0,
SKHPB_RESET = -3,
};
enum SKHPB_BUFFER_MODE {
R_BUFFER = 0,
W_BUFFER = 1,
};
enum SKHPB_CMD {
SKHPB_CMD_READ = 0,
SKHPB_CMD_WRITE = 1,
SKHPB_CMD_DISCARD = 2,
SKHPB_CMD_OTHERS = 3,
};
enum SKHPB_REGION_STATE {
SKHPB_REGION_INACTIVE,
SKHPB_REGION_ACTIVE,
};
enum SKHPB_SUBREGION_STATE {
SKHPB_SUBREGION_UNUSED,
SKHPB_SUBREGION_DIRTY,
SKHPB_SUBREGION_CLEAN,
SKHPB_SUBREGION_ISSUED,
};
enum SKHPB_CONTROL_MODE {
HOST_CTRL_MODE = 0,
DEV_CTRL_MODE = 1,
};
enum SKHPB_RST_TIME {
SKHPB_MAP_RSP_DISABLE = 0,
SKHPB_MAP_RSP_ENABLE = 1,
};
struct skhpb_func_desc {
/*** Device Descriptor ***/
/* 06h bNumberLU */
int lu_cnt;
/* 10h wSpecVersion */
u16 spec_ver;
/* 40h HPB Version */
u16 hpb_ver;
/* 42h HPB control mode */
u8 hpb_control_mode;
/*** Geometry Descriptor ***/
/* 48h bHPBRegionSize (UNIT: 512KB) */
u8 hpb_region_size;
/* 49h bHPBNumberLU */
u8 hpb_number_lu;
/* 4Ah bHPBSubRegionSize */
u8 hpb_subregion_size;
/* 4B:4Ch wDeviceMaxActiveHPBRegions */
u16 hpb_device_max_active_regions;
};
struct skhpb_lu_desc {
/*** Unit Descriptor ****/
/* 03h bLUEnable */
int lu_enable;
/* 06h lu queue depth info*/
int lu_queue_depth;
/* 0Ah bLogicalBlockSize. default 0x0C = 4KB */
int lu_logblk_size;
/* 0Bh qLogicalBlockCount. same as the read_capacity ret val. */
u64 lu_logblk_cnt;
/* 23h:24h wLUMaxActiveHPBRegions */
u16 lu_max_active_hpb_regions;
/* 25h:26h wHPBPinnedRegionStartIdx */
u16 hpb_pinned_region_startidx;
/* 27h:28h wNumHPBPinnedRegions */
u16 lu_num_hpb_pinned_regions;
/* if 03h value is 02h, hpb_enable is set. */
bool lu_hpb_enable;
int lu_hpb_pinned_end_offset;
};
struct skhpb_rsp_active_list {
u16 region[SKHPB_MAX_ACTIVE_NUM];
u16 subregion[SKHPB_MAX_ACTIVE_NUM];
};
struct skhpb_rsp_inactive_list {
u16 region[SKHPB_MAX_INACTIVE_NUM];
};
struct skhpb_rsp_update_entry {
unsigned int lpn;
skhpb_t ppn;
};
struct skhpb_rsp_info {
int type;
int active_cnt;
int inactive_cnt;
struct skhpb_rsp_active_list active_list;
struct skhpb_rsp_inactive_list inactive_list;
__u64 RSP_start;
__u64 RSP_tasklet_enter;
struct list_head list_rsp_info;
};
struct skhpb_rsp_field {
u8 sense_data_len[2];
u8 desc_type;
u8 additional_len;
u8 hpb_type;
u8 lun;
u8 active_region_cnt;
u8 inactive_region_cnt;
u8 hpb_active_field[8];
u8 hpb_inactive_field[4];
};
struct skhpb_map_ctx {
struct page **m_page;
unsigned int *ppn_dirty;
struct list_head list_table;
};
struct skhpb_subregion {
struct skhpb_map_ctx *mctx;
enum SKHPB_SUBREGION_STATE subregion_state;
int region;
int subregion;
bool last;
struct list_head list_subregion;
};
struct skhpb_region {
struct skhpb_subregion *subregion_tbl;
enum SKHPB_REGION_STATE region_state;
bool is_pinned;
int region;
int subregion_count;
/*below information is used by lru*/
struct list_head list_region;
int hit_count;
};
struct skhpb_map_req {
struct skhpb_lu *hpb;
struct skhpb_map_ctx *mctx;
struct bio bio;
struct bio *pbio;
struct bio_vec bvec[SKHPB_MAX_BVEC_SIZE];
void (*end_io)(struct request *rq, int err);
void *end_io_data;
int region;
int subregion;
int subregion_mem_size;
int lun;
int retry_cnt;
/* for debug : RSP Profiling */
__u64 RSP_start; // get the request from device
__u64 RSP_issue; // issue scsi cmd
__u64 RSP_end; // complete the request
char sense[SCSI_SENSE_BUFFERSIZE];
struct list_head list_map_req;
int rwbuffer_flag;
};
enum SKHPB_SELECTION_TYPE {
TYPE_LRU = 1,
TYPE_LFU = 2,
};
struct skhpb_victim_select_info {
int selection_type;
struct list_head lru;
int max_lru_active_count; // supported hpb #region - pinned #region
atomic64_t active_count;
};
struct skhpb_lu {
struct skhpb_region *region_tbl;
struct skhpb_rsp_info *rsp_info;
struct skhpb_map_req *map_req;
struct list_head lh_map_ctx;
struct list_head lh_subregion_req;
struct list_head lh_rsp_info;
struct list_head lh_rsp_info_free;
struct list_head lh_map_req_free;
struct list_head lh_map_req_retry;
int debug_free_table;
bool lu_hpb_enable;
struct delayed_work skhpb_pinned_work;
struct delayed_work skhpb_map_req_retry_work;
struct work_struct skhpb_rsp_work;
struct bio_vec bvec[SKHPB_MAX_BVEC_SIZE];
int subregions_per_lu;
int regions_per_lu;
int subregion_mem_size;
int last_subregion_mem_size;
/* for selecting victim */
struct skhpb_victim_select_info lru_info;
int hpb_ver;
int lu_max_active_regions;
int entries_per_subregion;
int entries_per_subregion_shift;
int entries_per_subregion_mask;
int entries_per_region_shift;
int entries_per_region_mask;
int subregions_per_region;
int dwords_per_subregion;
unsigned long long subregion_unit_size;
bool identical_size;
#define BITS_PER_PPN_DIRTY (BITS_PER_BYTE * sizeof(unsigned int))
int ppn_dirties_per_subregion;
int mpage_bytes;
int mpages_per_subregion;
/* for debug constant variables */
unsigned long long lu_num_blocks;
u8 lun;
struct ufs_hba *hba;
spinlock_t hpb_lock;
spinlock_t rsp_list_lock;
spinlock_t map_list_lock;
struct kobject kobj;
struct mutex sysfs_lock;
struct skhpb_sysfs_entry *sysfs_entries;
bool hpb_control_mode;
/* for debug */
bool force_hpb_read_disable;
bool force_map_req_disable;
bool read_buf_debug;
atomic64_t hit;
atomic64_t size_miss;
atomic64_t region_miss;
atomic64_t subregion_miss;
atomic64_t entry_dirty_miss;
atomic64_t rb_noti_cnt;
atomic64_t rb_fail;
atomic64_t reset_noti_cnt;
atomic64_t w_map_req_cnt;
#if defined(SKHPB_READ_LARGE_CHUNK_SUPPORT)
atomic64_t lc_entry_dirty_miss;
atomic64_t lc_reg_subreg_miss;
atomic64_t lc_hit;
#endif
atomic64_t map_req_cnt;
atomic64_t region_add;
atomic64_t region_evict;
atomic64_t canceled_resp;
atomic64_t canceled_map_req;
atomic64_t alloc_map_req_cnt;
};
struct skhpb_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct skhpb_lu *hpb, char *buf);
ssize_t (*store)(struct skhpb_lu *hpb, const char *, size_t);
};
struct ufshcd_lrb;
void ufshcd_init_hpb(struct ufs_hba *hba);
void skhpb_init_handler(struct work_struct *work);
void skhpb_prep_fn(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
void skhpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
void skhpb_suspend(struct ufs_hba *hba);
void skhpb_resume(struct ufs_hba *hba);
void skhpb_release(struct ufs_hba *hba, int state);
int skhpb_issue_req_dev_ctx(struct skhpb_lu *hpb, unsigned char *buf,
int buf_length);
int skhpb_control_validation(struct ufs_hba *hba,
struct skhpb_config_desc *config);
extern u32 skhpb_debug_mask;
extern int debug_map_req;
enum SKHPB_LOG_LEVEL {
SKHPB_LOG_LEVEL_OFF = 0,
SKHPB_LOG_LEVEL_ERR = 1,
SKHPB_LOG_LEVEL_INFO = 2,
SKHPB_LOG_LEVEL_DEBUG = 3,
SKHPB_LOG_LEVEL_HEX = 4,
};
enum SKHPB_LOG_MASK {
SKHPB_LOG_OFF = SKHPB_LOG_LEVEL_OFF, /* 0 */
SKHPB_LOG_ERR = (1U << SKHPB_LOG_LEVEL_ERR), /* 2 */
SKHPB_LOG_INFO = (1U << SKHPB_LOG_LEVEL_INFO), /* 4 */
SKHPB_LOG_DEBUG = (1U << SKHPB_LOG_LEVEL_DEBUG), /* 8 */
SKHPB_LOG_HEX = (1U << SKHPB_LOG_LEVEL_HEX), /* 16 */
};
#define SKHPB_DRIVER_E(fmt, args...) \
do { \
if (likely(skhpb_debug_mask & SKHPB_LOG_ERR)) \
pr_err("[HPB E][%s:%d] " fmt, __func__, __LINE__, ##args); \
} while (0)
#define SKHPB_DRIVER_I(fmt, args...) \
do { \
if (unlikely(skhpb_debug_mask & SKHPB_LOG_INFO)) \
pr_err("[HPB][%s:%d] " fmt, __func__, __LINE__, ##args); \
} while (0)
#define SKHPB_DRIVER_D(fmt, args...) \
do { \
if (unlikely(skhpb_debug_mask & SKHPB_LOG_DEBUG)) \
printk(KERN_DEBUG "[HPB][%s:%d] " fmt, __func__, __LINE__, ##args); \
} while (0)
#define SKHPB_DRIVER_HEXDUMP(fmt, args...) \
do { \
if (unlikely(skhpb_debug_mask & SKHPB_LOG_HEX)) { \
print_hex_dump(KERN_DEBUG, fmt, DUMP_PREFIX_ADDRESS, ##args); \
} \
} while (0)
#define SKHPB_MAP_REQ_TIME(map_req, val, print) \
do { \
if (unlikely(debug_map_req)) { \
val = ktime_to_us(ktime_get()); \
if (print) { \
SKHPB_DRIVER_I("SKHPB COMPL BUFFER %d - %d\n", \
map_req->region, map_req->subregion); \
SKHPB_DRIVER_I("start~issue = %lluus, issue~end = %lluus\n", \
map_req->RSP_issue - map_req->RSP_start, \
map_req->RSP_end - map_req->RSP_issue); \
} \
} \
} while (0)
#define SKHPB_RSP_TIME(val) \
do { \
if (unlikely(debug_map_req)) { \
val = ktime_to_us(ktime_get()); \
} \
} while (0)
#endif /* End of Header */

1714
drivers/scsi/ufs/ufstw.c Normal file

File diff suppressed because it is too large Load Diff

147
drivers/scsi/ufs/ufstw.h Normal file
View File

@@ -0,0 +1,147 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018 Samsung Electronics Co., Ltd.
*/
#ifndef _UFSTW_H_
#define _UFSTW_H_
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/blktrace_api.h>
#include <linux/blkdev.h>
#include <scsi/scsi_cmnd.h>
#include "../../../block/blk.h"
#define UFSTW_VER 0x0101
#define UFSTW_DD_VER 0x0103
#define UFSTW_FLUSH_CHECK_PERIOD_MS 1000
#define UFSTW_FLUSH_WORKER_TH_MIN 3
#define UFSTW_FLUSH_WORKER_TH_MAX 8
#define UFSTW_LIFETIME_SECT 2097152 /* 1GB */
#define UFSTW_MAX_LIFETIME_VALUE 0x0B
/* TW 1.0.1[31], TW 1.1.0[7] */
#define MASK_UFSTW_LIFETIME_NOT_GUARANTEE 0x80000080
/*
* UFSTW DEBUG
*/
#define TW_DEBUG(ufsf, msg, args...) \
do { if (ufsf->tw_debug) \
printk(KERN_ERR "%s:%d " msg "\n", \
__func__, __LINE__, ##args); \
} while (0)
enum {
FLUSH_IDLE = 0,
FLUSH_RUN,
FLUSH_COMPLETE,
FLUSH_FAIL,
FLUSH_NUM_OF_STATE,
};
enum UFSTW_STATE {
TW_NOT_SUPPORTED = -1,
TW_NEED_INIT = 0,
TW_PRESENT = 1,
TW_FAILED = -2,
TW_RESET = -3,
};
enum {
TW_MODE_DISABLED,
TW_MODE_MANUAL,
TW_MODE_FS,
TW_MODE_NUM
};
enum {
TW_EE_MODE_DISABLE,
TW_EE_MODE_AUTO,
TW_EE_MODE_NUM
};
enum {
TW_FLAG_ENABLE_NONE = 0,
TW_FLAG_ENABLE_CLEAR = 1,
TW_FLAG_ENABLE_SET = 2,
};
struct ufstw_dev_info {
bool tw_device;
/* from Device Descriptor */
u16 tw_ver;
u8 tw_buf_no_reduct;
u8 tw_buf_type;
/* from Geometry Descriptor */
u8 tw_number_lu;
};
struct ufstw_lu {
struct ufsf_feature *ufsf;
int lun;
/* Flags */
bool tw_flush_enable;
bool tw_flush_during_hibern_enter;
struct mutex flush_lock;
/* lifetiem estimated */
unsigned int tw_lifetime_est;
spinlock_t lifetime_lock;
u32 stat_write_sec;
struct work_struct tw_lifetime_work;
/* Attributes */
unsigned int tw_flush_status;
unsigned int tw_available_buffer_size;
unsigned int tw_current_tw_buffer_size;
/* mode manual/fs */
atomic_t tw_mode;
bool tw_enable;
atomic_t active_cnt;
struct mutex mode_lock;
/* Worker */
struct delayed_work tw_flush_work;
struct delayed_work tw_flush_h8_work;
unsigned long next_q;
unsigned int flush_th_max;
unsigned int flush_th_min;
/* for sysfs */
struct kobject kobj;
struct mutex sysfs_lock;
struct ufstw_sysfs_entry *sysfs_entries;
};
struct ufstw_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct ufstw_lu *tw, char *buf);
ssize_t (*store)(struct ufstw_lu *tw, const char *buf, size_t count);
};
struct ufshcd_lrb;
void ufstw_get_dev_info(struct ufstw_dev_info *tw_dev_info, u8 *desc_buf);
void ufstw_get_geo_info(struct ufstw_dev_info *tw_dev_info, u8 *geo_buf);
int ufstw_get_lu_info(struct ufsf_feature *ufsf, unsigned int lun, u8 *lu_buf);
void ufstw_init(struct ufsf_feature *ufsf);
void ufstw_prep_fn(struct ufsf_feature *ufsf, struct ufshcd_lrb *lrbp);
void ufstw_init_work_fn(struct work_struct *work);
void ufstw_ee_handler(struct ufsf_feature *ufsf);
void ufstw_error_handler(struct ufsf_feature *ufsf);
void ufstw_reset_work_fn(struct work_struct *work);
void ufstw_suspend(struct ufsf_feature *ufsf);
void ufstw_resume(struct ufsf_feature *ufsf);
void ufstw_release(struct kref *kref);
bool ufstw_need_flush(struct ufsf_feature *ufsf);
#endif /* End of Header */

View File

@@ -676,6 +676,9 @@ struct request_queue {
#define BLK_MAX_WRITE_HINTS 5
u64 write_hints[BLK_MAX_WRITE_HINTS];
#ifdef CONFIG_SCSI_UFS_TW
bool turbo_write_dev;
#endif
};
#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */

View File

@@ -17,7 +17,7 @@
#define UFS_IOCTL_GET_FW_VER 0x5390 /* Query production revision level */
#define UFS_IOCTL_RPMB 0x5391 /* For RPMB access */
#if defined(CONFIG_UFSFEATURE)
#if defined(CONFIG_SCSI_UFS_FEATURE)
#define UFSFEATURE_QUERY_OPCODE 0x5500
#endif