From 3e91b8bf9c98e07ef19defc55622403bed43125b Mon Sep 17 00:00:00 2001 From: Sachin Mohan Gadag Date: Fri, 28 Feb 2020 18:52:21 +0530 Subject: [PATCH 001/141] RM: dts: msm: add support for gpio based jack detection on qcs610 Add gpio based detection support for linein and lineout jack types. Change-Id: Ic854ad3065aeb176ccab0144f343f4e716738ef7 Signed-off-by: Surendar Karka Signed-off-by: Sachin Mohan Gadag --- arch/arm64/boot/dts/qcom/qcs610-ipc.dtsi | 9 +++++- arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi | 31 +++++++++++++++++++- 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/qcs610-ipc.dtsi b/arch/arm64/boot/dts/qcom/qcs610-ipc.dtsi index 0cf10c79bc4d..f3dfc308b749 100644 --- a/arch/arm64/boot/dts/qcom/qcs610-ipc.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs610-ipc.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -66,6 +66,13 @@ qcom,wsa-max-devs = <1>; qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>; qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft"; + qcom,linein-det-swh = <1>; + qcom,lineout-det-swh = <1>; + qcom,linein-det-gpio = <&tlmm 1 0>; + qcom,lineout-det-gpio = <&tlmm 60 0>; + pinctrl-names = "default"; + pinctrl-0 = <&jack_det_linein_default + &jack_det_lineout_default>; }; &pm6150_charger { diff --git a/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi index f93318091cc8..e6706dadcafc 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1146,6 +1146,35 @@ }; }; + + gpio_jack_det_line_in { + jack_det_linein_default: jack_det_linein_default { + mux { + pins = "gpio1"; + function = "gpio"; + }; + config { + pins = "gpio1"; + bias-pull-up; /* pull up */ + input-enable; + }; + }; + }; + + gpio_jack_det_line_out { + jack_det_lineout_default: jack_det_lineout_default { + mux { + pins = "gpio60"; + function = "gpio"; + }; + config { + pins = "gpio60"; + bias-pull-up; /* pull up */ + input-enable; + }; + }; + }; + ter_i2s_sck_ws { ter_i2s_sck_sleep: ter_i2s_sck_sleep { mux { From 9d033252ff1024e93f7dabefeb06149476a83734 Mon Sep 17 00:00:00 2001 From: Prateek Sood Date: Fri, 22 Nov 2019 17:12:01 +0530 Subject: [PATCH 002/141] ARM: dts: msm: Add WLAN PD auxilary minidump ID for MSS on SM6150 Add the auxilary minidump ID for the WLAN PD, so that the dumps for the PD can be collected during modem SSR. Change-Id: Iae9aac864b57c91f4dbc3f7f864b61a347c3eb42 Signed-off-by: Prateek Sood --- arch/arm64/boot/dts/qcom/sm6150.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/qcom/sm6150.dtsi b/arch/arm64/boot/dts/qcom/sm6150.dtsi index 4eed5815d029..20b02757adf1 100644 --- a/arch/arm64/boot/dts/qcom/sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150.dtsi @@ -2426,6 +2426,7 @@ qcom,smem-id = <421>; qcom,signal-aop; qcom,minidump-id = <3>; + qcom,aux-minidump-ids = <4>; qcom,complete-ramdump; /* Inputs from mss */ From b34c252b62a5aabd95a326a390639fd64c90e48b Mon Sep 17 00:00:00 2001 From: Prateek Sood Date: Fri, 22 Nov 2019 17:14:35 +0530 Subject: [PATCH 003/141] ARM: dts: msm: Add WLAN PD auxilary minidump ID for sdmmagpie Add the auxilary minidump ID for the WLAN PD, so that the dumps for the PD can be collected during modem SSR. Change-Id: I71abc97d6528a400d0580797e358dac9c00e48d3 Signed-off-by: Prateek Sood --- arch/arm64/boot/dts/qcom/sdmmagpie.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi index e5d0292f5518..03be2826b44c 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi @@ -2629,6 +2629,7 @@ qcom,smem-id = <421>; qcom,signal-aop; qcom,minidump-id = <3>; + qcom,aux-minidump-ids = <4>; qcom,complete-ramdump; /* Inputs from mss */ From b2171caf750e4e94ff9dbdd6b219f3b0d908d805 Mon Sep 17 00:00:00 2001 From: Pankaj Gupta Date: Tue, 23 Jun 2020 17:12:46 +0530 Subject: [PATCH 004/141] msm: kgsl: Poll GDSCR to ensure CX collapse The regulator_is_enabled() API doesn't guarantee that CX gdsc has collapsed at hardware. There could be a vote on the GDSC from another subsystem like TZ. So poll the CX GDSCR register to ensure that CX has indeed collapsed. Change-Id: Id98c5318d5358b16f4277cb5d96027add63ad801 Signed-off-by: Akhil P Oommen Signed-off-by: Pankaj Gupta --- drivers/gpu/msm/a6xx_reg.h | 1 + drivers/gpu/msm/adreno_a6xx_gmu.c | 15 ++++++++++++++- drivers/gpu/msm/kgsl_gmu.c | 13 +++++++------ drivers/gpu/msm/kgsl_gmu_core.c | 12 +++++++++++- drivers/gpu/msm/kgsl_gmu_core.h | 4 +++- 5 files changed, 36 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h index 19b97a557224..d0751f298775 100644 --- a/drivers/gpu/msm/a6xx_reg.h +++ b/drivers/gpu/msm/a6xx_reg.h @@ -1076,6 +1076,7 @@ /* GPUCC registers */ #define A6XX_GPU_CC_GX_GDSCR 0x24403 #define A6XX_GPU_CC_GX_DOMAIN_MISC 0x24542 +#define A6XX_GPU_CC_CX_GDSCR 0x2441B /* GPU RSC sequencer registers */ #define A6XX_RSCC_PDC_SEQ_START_ADDR 0x23408 diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index a4639068403b..ed7465a02cd1 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -829,6 +829,18 @@ static bool a6xx_gmu_gx_is_on(struct adreno_device *adreno_dev) return is_on(val); } +/* + * a6xx_gmu_cx_is_on() - Check if CX is on using GPUCC register + * @device - Pointer to KGSL device struct + */ +static bool a6xx_gmu_cx_is_on(struct kgsl_device *device) +{ + unsigned int val; + + gmu_core_regread(device, A6XX_GPU_CC_CX_GDSCR, &val); + return (val & BIT(31)); +} + /* * a6xx_gmu_sptprac_is_on() - Check if SPTP is on using pwr status register * @adreno_dev - Pointer to adreno_device @@ -1669,6 +1681,7 @@ struct gmu_dev_ops adreno_a6xx_gmudev = { .enable_lm = a6xx_gmu_enable_lm, .rpmh_gpu_pwrctrl = a6xx_gmu_rpmh_gpu_pwrctrl, .gx_is_on = a6xx_gmu_gx_is_on, + .cx_is_on = a6xx_gmu_cx_is_on, .wait_for_lowest_idle = a6xx_gmu_wait_for_lowest_idle, .wait_for_gmu_idle = a6xx_gmu_wait_for_idle, .ifpc_store = a6xx_gmu_ifpc_store, diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index cf993e7ebbf1..2b414435dbc5 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -1538,8 +1538,9 @@ static int gmu_enable_gdsc(struct gmu_device *gmu) } #define CX_GDSC_TIMEOUT 5000 /* ms */ -static int gmu_disable_gdsc(struct gmu_device *gmu) +static int gmu_disable_gdsc(struct kgsl_device *device) { + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); int ret; unsigned long t; @@ -1561,13 +1562,13 @@ static int gmu_disable_gdsc(struct gmu_device *gmu) */ t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT); do { - if (!regulator_is_enabled(gmu->cx_gdsc)) + if (!gmu_core_dev_cx_is_on(device)) return 0; usleep_range(10, 100); } while (!(time_after(jiffies, t))); - if (!regulator_is_enabled(gmu->cx_gdsc)) + if (!gmu_core_dev_cx_is_on(device)) return 0; dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout"); @@ -1595,7 +1596,7 @@ static int gmu_suspend(struct kgsl_device *device) if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CX_GDSC)) regulator_set_mode(gmu->cx_gdsc, REGULATOR_MODE_IDLE); - gmu_disable_gdsc(gmu); + gmu_disable_gdsc(device); if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CX_GDSC)) regulator_set_mode(gmu->cx_gdsc, REGULATOR_MODE_NORMAL); @@ -1752,7 +1753,7 @@ static void gmu_stop(struct kgsl_device *device) gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0); gmu_disable_clks(device); - gmu_disable_gdsc(gmu); + gmu_disable_gdsc(device); msm_bus_scale_client_update_request(gmu->pcl, 0); return; @@ -1862,7 +1863,7 @@ static bool gmu_is_initialized(struct kgsl_device *device) ret = gmu_dev_ops->is_initialized(adreno_dev); gmu_disable_clks(device); - gmu_disable_gdsc(gmu); + gmu_disable_gdsc(device); return ret; } diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c index 1116b6b7dada..140baea8efdc 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.c +++ b/drivers/gpu/msm/kgsl_gmu_core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -275,6 +275,16 @@ void gmu_core_regrmw(struct kgsl_device *device, gmu_core_regwrite(device, offsetwords, val | bits); } +bool gmu_core_dev_cx_is_on(struct kgsl_device *device) +{ + struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device); + + if (ops && ops->cx_is_on) + return ops->cx_is_on(device); + + return true; +} + bool gmu_core_is_initialized(struct kgsl_device *device) { struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device); diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h index 3dff7fafbe12..e2f956cc9dc7 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.h +++ b/drivers/gpu/msm/kgsl_gmu_core.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -164,6 +164,7 @@ struct gmu_dev_ops { unsigned int val); unsigned int (*ifpc_show)(struct adreno_device *adreno_dev); void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *); + bool (*cx_is_on)(struct kgsl_device *device); void (*halt_execution)(struct kgsl_device *device); int (*wait_for_active_transition)(struct adreno_device *adreno_dev); bool (*is_initialized)(struct adreno_device *adreno_dev); @@ -231,6 +232,7 @@ void gmu_core_blkwrite(struct kgsl_device *device, unsigned int offsetwords, void gmu_core_regrmw(struct kgsl_device *device, unsigned int offsetwords, unsigned int mask, unsigned int bits); const char *gmu_core_oob_type_str(enum oob_request req); +bool gmu_core_dev_cx_is_on(struct kgsl_device *device); bool gmu_core_is_initialized(struct kgsl_device *device); u64 gmu_core_dev_read_ao_counter(struct kgsl_device *device); #endif /* __KGSL_GMU_CORE_H */ From e18ef3bc1e7a408b4ceae368c0ab7d0ec70fee0c Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Wed, 17 Jun 2020 14:26:21 -0700 Subject: [PATCH 005/141] msm: ipa3: add v2x ethernet pipes Add cv2x ethernet endpoint configuration changes. Change-Id: I6d1ac9cb5a282a871722e56f51c7c027b24b6b55 Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_api.c | 2 ++ drivers/platform/msm/ipa/ipa_v3/ipa_utils.c | 40 ++++++++++++++------- include/linux/ipa_uc_offload.h | 8 ++++- include/uapi/linux/msm_ipa.h | 5 ++- 4 files changed, 41 insertions(+), 14 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 755910c02582..1c65cb4806f5 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -224,6 +224,8 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = { __stringify(IPA_CLIENT_MHI_LOW_LAT_CONS), __stringify(IPA_CLIENT_QDSS_PROD), __stringify(IPA_CLIENT_MHI_QDSS_CONS), + __stringify(IPA_CLIENT_ETHERNET2_PROD), + __stringify(IPA_CLIENT_ETHERNET2_CONS), }; /** diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 319e6c13f5d2..ebc65c73d5fe 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -2770,6 +2770,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, QMB_MASTER_SELECT_DDR, { 12, 0, 8, 16, IPA_EE_UC, GSI_SMART_PRE_FETCH, 3 } }, + [IPA_4_5_AUTO][IPA_CLIENT_ETHERNET2_PROD] = { + true, IPA_v4_5_GROUP_CV2X, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 10, 13, 8, 16, IPA_EE_UC, GSI_SMART_PRE_FETCH, 3 } }, [IPA_4_5_AUTO][IPA_CLIENT_Q6_WAN_PROD] = { true, IPA_v4_5_GROUP_UL_DL, true, @@ -2887,6 +2893,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, { 28, 1, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } }, + [IPA_4_5_AUTO][IPA_CLIENT_ETHERNET2_CONS] = { + true, IPA_v4_5_GROUP_CV2X, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 25, 16, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } }, [IPA_4_5_AUTO][IPA_CLIENT_Q6_LAN_CONS] = { true, IPA_v4_5_GROUP_UL_DL, false, @@ -3549,18 +3561,19 @@ bool ipa3_should_pipe_be_suspended(enum ipa_client_type client) if (client == IPA_CLIENT_USB_CONS || client == IPA_CLIENT_USB2_CONS || - client == IPA_CLIENT_USB_DPL_CONS || - client == IPA_CLIENT_MHI_QDSS_CONS || - client == IPA_CLIENT_MHI_CONS || - client == IPA_CLIENT_MHI_DPL_CONS || - client == IPA_CLIENT_HSIC1_CONS || - client == IPA_CLIENT_WLAN1_CONS || - client == IPA_CLIENT_WLAN2_CONS || - client == IPA_CLIENT_WLAN3_CONS || - client == IPA_CLIENT_WLAN4_CONS || - client == IPA_CLIENT_ODU_EMB_CONS || - client == IPA_CLIENT_ODU_TETH_CONS || - client == IPA_CLIENT_ETHERNET_CONS) + client == IPA_CLIENT_USB_DPL_CONS || + client == IPA_CLIENT_MHI_QDSS_CONS || + client == IPA_CLIENT_MHI_CONS || + client == IPA_CLIENT_MHI_DPL_CONS || + client == IPA_CLIENT_HSIC1_CONS || + client == IPA_CLIENT_WLAN1_CONS || + client == IPA_CLIENT_WLAN2_CONS || + client == IPA_CLIENT_WLAN3_CONS || + client == IPA_CLIENT_WLAN4_CONS || + client == IPA_CLIENT_ODU_EMB_CONS || + client == IPA_CLIENT_ODU_TETH_CONS || + client == IPA_CLIENT_ETHERNET_CONS || + client == IPA_CLIENT_ETHERNET2_CONS) return true; return false; @@ -5719,6 +5732,7 @@ int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) param_in->client == IPA_CLIENT_HSIC1_PROD || param_in->client == IPA_CLIENT_ODU_PROD || param_in->client == IPA_CLIENT_ETHERNET_PROD || + param_in->client == IPA_CLIENT_ETHERNET2_PROD || param_in->client == IPA_CLIENT_WIGIG_PROD || param_in->client == IPA_CLIENT_AQC_ETHERNET_PROD) { result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta); @@ -9006,6 +9020,8 @@ int ipa3_get_prot_id(enum ipa_client_type client) case IPA_CLIENT_USB_CONS: prot_id = IPA_HW_PROTOCOL_USB; break; + case IPA_CLIENT_ETHERNET2_PROD: + case IPA_CLIENT_ETHERNET2_CONS: case IPA_CLIENT_ETHERNET_PROD: case IPA_CLIENT_ETHERNET_CONS: prot_id = IPA_HW_PROTOCOL_ETH; diff --git a/include/linux/ipa_uc_offload.h b/include/linux/ipa_uc_offload.h index 2e0905b14605..e77f38b7c400 100644 --- a/include/linux/ipa_uc_offload.h +++ b/include/linux/ipa_uc_offload.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -26,6 +26,7 @@ enum ipa_uc_offload_proto { IPA_UC_INVALID = 0, IPA_UC_WDI = 1, IPA_UC_NTN = 2, + IPA_UC_NTN_V2X = 3, IPA_UC_MAX_PROT_SIZE }; @@ -95,6 +96,7 @@ struct ntn_buff_smmu_map { * @num_buffers: Rx/Tx buffer pool size (in terms of elements) * @data_buff_size: size of the each data buffer allocated in DDR * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN Ring's + * @u8 db_mode: 0 means irq mode, 1 means db mode * tail pointer */ struct ipa_ntn_setup_info { @@ -117,6 +119,8 @@ struct ipa_ntn_setup_info { u32 data_buff_size; phys_addr_t ntn_reg_base_ptr_pa; + + u8 db_mode; }; /** @@ -182,10 +186,12 @@ struct ipa_uc_offload_conn_out_params { * struct ipa_perf_profile - To set BandWidth profile * * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS) + * @proto: uC offload protocol type * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps) */ struct ipa_perf_profile { enum ipa_client_type client; + enum ipa_uc_offload_proto proto; u32 max_supported_bw_mbps; }; diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 497526feb0ca..9df94210f549 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -417,9 +417,12 @@ enum ipa_client_type { IPA_CLIENT_QDSS_PROD = 110, IPA_CLIENT_MHI_QDSS_CONS = 111, + + IPA_CLIENT_ETHERNET2_PROD = 112, + IPA_CLIENT_ETHERNET2_CONS = 113, }; -#define IPA_CLIENT_MAX (IPA_CLIENT_MHI_QDSS_CONS + 1) +#define IPA_CLIENT_MAX (IPA_CLIENT_ETHERNET2_CONS + 1) #define IPA_CLIENT_WLAN2_PROD IPA_CLIENT_A5_WLAN_AMPDU_PROD #define IPA_CLIENT_Q6_DL_NLO_DATA_PROD IPA_CLIENT_Q6_DL_NLO_DATA_PROD From 3595311ff9cd3d3f97a384cf5615eb19e09b4bc8 Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Fri, 26 Jun 2020 17:09:19 -0700 Subject: [PATCH 006/141] msm: ipa3: add eth ep_pair info Add ep details corresponding to RMNET_CV2X tethering over eth. Change-Id: I0285887470c950a73e61dd51bf673bbc650d40bb Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_v3/ipa.c | 72 +++++++++++++++++++++++++++ include/uapi/linux/msm_ipa.h | 6 ++- 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index a1191427c30c..c7841049da57 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -862,6 +862,74 @@ static void ipa3_get_pcie_ep_info( } } +static void ipa3_get_eth_ep_info( + struct ipa_ioc_get_ep_info *ep_info, + struct ipa_ep_pair_info *pair_info + ) +{ + int ep_index = -1, i; + + ep_info->num_ep_pairs = 0; + for (i = 0; i < ep_info->max_ep_pairs; i++) { + pair_info[i].consumer_pipe_num = -1; + pair_info[i].producer_pipe_num = -1; + pair_info[i].ep_id = -1; + } + + ep_index = ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET2_PROD); + + if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) { + pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index; + ep_index = ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET2_CONS); + if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) { + pair_info[ep_info->num_ep_pairs].producer_pipe_num = + ep_index; + pair_info[ep_info->num_ep_pairs].ep_id = + IPA_ETH1_EP_ID; + + IPADBG("ep_pair_info consumer_pipe_num %d", + pair_info[ep_info->num_ep_pairs].consumer_pipe_num); + IPADBG(" producer_pipe_num %d ep_id %d\n", + pair_info[ep_info->num_ep_pairs].producer_pipe_num, + pair_info[ep_info->num_ep_pairs].ep_id); + ep_info->num_ep_pairs++; + } else { + pair_info[ep_info->num_ep_pairs].consumer_pipe_num = -1; + IPADBG("ep_pair_info consumer_pipe_num %d", + pair_info[ep_info->num_ep_pairs].consumer_pipe_num); + IPADBG(" producer_pipe_num %d ep_id %d\n", + pair_info[ep_info->num_ep_pairs].producer_pipe_num, + pair_info[ep_info->num_ep_pairs].ep_id); + } + } + + ep_index = ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD); + + if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) { + pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index; + ep_index = ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS); + if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) { + pair_info[ep_info->num_ep_pairs].producer_pipe_num = + ep_index; + pair_info[ep_info->num_ep_pairs].ep_id = + IPA_ETH0_EP_ID; + + IPADBG("ep_pair_info consumer_pipe_num %d", + pair_info[ep_info->num_ep_pairs].consumer_pipe_num); + IPADBG(" producer_pipe_num %d ep_id %d\n", + pair_info[ep_info->num_ep_pairs].producer_pipe_num, + pair_info[ep_info->num_ep_pairs].ep_id); + ep_info->num_ep_pairs++; + } else { + pair_info[ep_info->num_ep_pairs].consumer_pipe_num = -1; + IPADBG("ep_pair_info consumer_pipe_num %d", + pair_info[ep_info->num_ep_pairs].consumer_pipe_num); + IPADBG(" producer_pipe_num %d ep_id %d\n", + pair_info[ep_info->num_ep_pairs].producer_pipe_num, + pair_info[ep_info->num_ep_pairs].ep_id); + } + } +} static int ipa3_get_ep_info(struct ipa_ioc_get_ep_info *ep_info, u8 *param) @@ -878,6 +946,10 @@ static int ipa3_get_ep_info(struct ipa_ioc_get_ep_info *ep_info, ipa3_get_pcie_ep_info(ep_info, pair_info); break; + case IPA_DATA_EP_TYP_ETH: + ipa3_get_eth_ep_info(ep_info, pair_info); + break; + default: IPAERR_RL("Undefined ep_type %d\n", ep_info->ep_type); ret = -EFAULT; diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 9df94210f549..790effd175da 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -2342,13 +2342,17 @@ struct ipa_ioc_gsb_info { #define IPA_PCIE0_EP_ID 21 #define IPA_PCIE1_EP_ID 22 +#define IPA_ETH0_EP_ID 31 +#define IPA_ETH1_EP_ID 32 + enum ipa_peripheral_ep_type { IPA_DATA_EP_TYP_RESERVED = 0, IPA_DATA_EP_TYP_HSIC = 1, IPA_DATA_EP_TYP_HSUSB = 2, IPA_DATA_EP_TYP_PCIE = 3, IPA_DATA_EP_TYP_EMBEDDED = 4, - IPA_DATA_EP_TYP_BAM_DMUX, + IPA_DATA_EP_TYP_BAM_DMUX = 5, + IPA_DATA_EP_TYP_ETH, }; enum ipa_data_ep_prot_type { From 68b106195b3d6097c68a8c83d4e9f619e8c3b1ff Mon Sep 17 00:00:00 2001 From: Carter Cooper Date: Fri, 18 Aug 2017 10:39:57 -0600 Subject: [PATCH 007/141] msm: kgsl: Add handler for GPC interrupt on A6xx GPU Add the interrupt handler to cause a snapshot when the GPC interrupt is received for A6xx. Change-Id: I6eabde0f2bdfc3997bf380055246c2cbdada7cdf Signed-off-by: Carter Cooper Signed-off-by: Harshdeep Dhatt Signed-off-by: Rajesh Kemisetti --- drivers/gpu/msm/adreno_a5xx.c | 8 ++++---- drivers/gpu/msm/adreno_a6xx.c | 25 ++++++++++++++++++++++++- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index ba46bd46d450..ef32eba17581 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -3315,11 +3315,11 @@ static void a5xx_gpmu_int_callback(struct adreno_device *adreno_dev, int bit) } /* - * a5x_gpc_err_int_callback() - Isr for GPC error interrupts + * a5xx_gpc_err_int_callback() - Isr for GPC error interrupts * @adreno_dev: Pointer to device * @bit: Interrupt bit */ -void a5x_gpc_err_int_callback(struct adreno_device *adreno_dev, int bit) +static void a5xx_gpc_err_int_callback(struct adreno_device *adreno_dev, int bit) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); @@ -3329,7 +3329,7 @@ void a5x_gpc_err_int_callback(struct adreno_device *adreno_dev, int bit) * with help of register dump. */ - KGSL_DRV_CRIT(device, "RBBM: GPC error\n"); + KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: GPC error\n"); adreno_irqctrl(adreno_dev, 0); /* Trigger a fault in the dispatcher - this will effect a restart */ @@ -3367,7 +3367,7 @@ static struct adreno_irq_funcs a5xx_irq_funcs[32] = { ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 6 - RBBM_ATB_ASYNC_OVERFLOW */ ADRENO_IRQ_CALLBACK(a5xx_err_callback), - ADRENO_IRQ_CALLBACK(a5x_gpc_err_int_callback), /* 7 - GPC_ERR */ + ADRENO_IRQ_CALLBACK(a5xx_gpc_err_int_callback), /* 7 - GPC_ERR */ ADRENO_IRQ_CALLBACK(a5xx_preempt_callback),/* 8 - CP_SW */ ADRENO_IRQ_CALLBACK(a5xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */ /* 10 - CP_CCU_FLUSH_DEPTH_TS */ diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index a780c1bb1600..7ca19a934dce 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -1762,6 +1762,29 @@ static void a6xx_cp_callback(struct adreno_device *adreno_dev, int bit) adreno_dispatcher_schedule(device); } +/* + * a6xx_gpc_err_int_callback() - Isr for GPC error interrupts + * @adreno_dev: Pointer to device + * @bit: Interrupt bit + */ +static void a6xx_gpc_err_int_callback(struct adreno_device *adreno_dev, int bit) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + + /* + * GPC error is typically the result of mistake SW programming. + * Force GPU fault for this interrupt so that we can debug it + * with help of register dump. + */ + + KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: GPC error\n"); + adreno_irqctrl(adreno_dev, 0); + + /* Trigger a fault in the dispatcher - this will effect a restart */ + adreno_set_gpu_fault(adreno_dev, ADRENO_SOFT_FAULT); + adreno_dispatcher_schedule(device); +} + #define A6XX_INT_MASK \ ((1 << A6XX_INT_CP_AHB_ERROR) | \ (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \ @@ -1787,7 +1810,7 @@ static struct adreno_irq_funcs a6xx_irq_funcs[32] = { ADRENO_IRQ_CALLBACK(NULL), /* 5 - UNUSED */ /* 6 - RBBM_ATB_ASYNC_OVERFLOW */ ADRENO_IRQ_CALLBACK(a6xx_err_callback), - ADRENO_IRQ_CALLBACK(NULL), /* 7 - GPC_ERR */ + ADRENO_IRQ_CALLBACK(a6xx_gpc_err_int_callback), /* 7 - GPC_ERR */ ADRENO_IRQ_CALLBACK(a6xx_preemption_callback),/* 8 - CP_SW */ ADRENO_IRQ_CALLBACK(a6xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */ ADRENO_IRQ_CALLBACK(NULL), /* 10 - CP_CCU_FLUSH_DEPTH_TS */ From 02efcc8c33bc0de3facb4ef1d9168506334a550e Mon Sep 17 00:00:00 2001 From: Jim Wang Date: Tue, 14 Jan 2020 18:11:10 -0500 Subject: [PATCH 008/141] ARM: dts: msm: add support of DP PCLK bond mode for SA8195p Add DP PCLK bond mode options. Add the shared bonding pclk parent. Change-Id: Ifecd71218ae35b976e75271529c883fac8c06596 Signed-off-by: Jim Wang --- arch/arm64/boot/dts/qcom/sdmshrike-sde.dtsi | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdmshrike-sde.dtsi b/arch/arm64/boot/dts/qcom/sdmshrike-sde.dtsi index 9ad7c2c33df4..1068389f5898 100644 --- a/arch/arm64/boot/dts/qcom/sdmshrike-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmshrike-sde.dtsi @@ -618,6 +618,7 @@ qcom,phy-index = <0>; qcom,bond-dual-ctrl = <1 0>; + qcom,bond-tri-ctrl = <2 0 1>; reg = <0xae90000 0x0dc>, <0xae90200 0x0c0>, @@ -653,13 +654,15 @@ <&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK_SRC>, <&mdss_dp0_pll DP_VCO_DIVIDED_CLK_SRC_MUX>, <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>, - <&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK>; + <&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK>, + <&mdss_dp2_pll DP_VCO_DIVIDED_CLK_SRC_MUX>; clock-names = "core_aux_clk", "core_usb_ref_clk_src", "core_usb_ref_clk", "core_usb_pipe_clk", "link_clk", "link_iface_clk", "crypto_clk", "pixel_clk_rcg", "pixel_parent", "pixel1_clk_rcg", "pixel1_parent", - "strm0_pixel_clk", "strm1_pixel_clk"; + "strm0_pixel_clk", "strm1_pixel_clk", + "bond_pixel_parent"; qcom,phy-version = <0x420>; qcom,phy-mode = "dp"; @@ -766,13 +769,15 @@ <&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK_SRC>, <&mdss_dp1_pll DP_VCO_DIVIDED_CLK_SRC_MUX>, <&clock_dispcc DISP_CC_MDSS_DP_PIXEL2_CLK>, - <&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK>; + <&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK>, + <&mdss_dp2_pll DP_VCO_DIVIDED_CLK_SRC_MUX>; clock-names = "core_aux_clk", "core_usb_ref_clk_src", "core_usb_ref_clk", "core_usb_pipe_clk", "link_clk", "link_iface_clk", "crypto_clk", "pixel_clk_rcg", "pixel_parent", "pixel1_clk_rcg", "pixel1_parent", - "strm0_pixel_clk", "strm1_pixel_clk"; + "strm0_pixel_clk", "strm1_pixel_clk", + "bond_pixel_parent"; qcom,phy-version = <0x420>; qcom,phy-mode = "dp"; @@ -872,11 +877,13 @@ <&clock_dispcc DISP_CC_MDSS_EDP_LINK_INTF_CLK>, <&clock_dispcc DISP_CC_MDSS_EDP_PIXEL_CLK_SRC>, <&mdss_edp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>, - <&clock_dispcc DISP_CC_MDSS_EDP_PIXEL_CLK>; + <&clock_dispcc DISP_CC_MDSS_EDP_PIXEL_CLK>, + <&mdss_dp2_pll DP_VCO_DIVIDED_CLK_SRC_MUX>; clock-names = "core_aux_clk", "core_ref_clk", "link_clk", "link_iface_clk", "pixel_clk_rcg", "pixel_parent", - "strm0_pixel_clk"; + "strm0_pixel_clk", + "bond_pixel_parent"; qcom,phy-version = <0x500>; qcom,phy-mode = "edp"; @@ -895,6 +902,7 @@ qcom,dsc-feature-enable; qcom,fec-feature-enable; + qcom,widebus-enable; qcom,max-dp-dsc-blks = <2>; qcom,max-dp-dsc-input-width-pixs = <2048>; From 73c08c589af7e7a54f45cdf4002d56bfd2a0341f Mon Sep 17 00:00:00 2001 From: Anurag Chouhan Date: Tue, 4 Aug 2020 13:36:13 +0530 Subject: [PATCH 009/141] ARM: dts: msm: Remove unnecessary files for qcm6125 Remove unnecessary DT files to support QCM6125/QCS6125. Change-Id: Ia7830c5a4f6364286fb381a2033e94d346670ab0 Signed-off-by: Anurag Chouhan --- arch/arm64/boot/dts/qcom/Makefile | 34 +------------ .../dts/qcom/qcm6125-iot-dp-idp-overlay.dts | 51 ------------------- .../boot/dts/qcom/qcm6125-iot-dp-idp.dts | 44 ---------------- ...qcm6125-iot-external-codec-idp-overlay.dts | 31 ----------- .../qcom/qcm6125-iot-external-codec-idp.dts | 24 --------- .../boot/dts/qcom/qcm6125-iot-idp-overlay.dts | 25 --------- arch/arm64/boot/dts/qcom/qcm6125-iot-idp.dts | 23 --------- arch/arm64/boot/dts/qcom/qcm6125-iot-idp.dtsi | 13 ----- ...25-iot-usbc-external-codec-idp-overlay.dts | 29 ----------- .../qcm6125-iot-usbc-external-codec-idp.dts | 22 -------- .../dts/qcom/qcm6125-iot-usbc-idp-overlay.dts | 30 ----------- .../boot/dts/qcom/qcm6125-iot-usbc-idp.dts | 23 --------- .../boot/dts/qcom/qcm6125-iot-usbc-idp.dtsi | 19 ------- arch/arm64/boot/dts/qcom/qcm6125.dts | 22 -------- arch/arm64/boot/dts/qcom/qcm6125.dtsi | 21 -------- .../dts/qcom/qcs6125-iot-dp-idp-overlay.dts | 51 ------------------- .../boot/dts/qcom/qcs6125-iot-dp-idp.dts | 44 ---------------- ...qcs6125-iot-external-codec-idp-overlay.dts | 31 ----------- .../qcom/qcs6125-iot-external-codec-idp.dts | 24 --------- .../boot/dts/qcom/qcs6125-iot-idp-overlay.dts | 25 --------- arch/arm64/boot/dts/qcom/qcs6125-iot-idp.dts | 23 --------- arch/arm64/boot/dts/qcom/qcs6125-iot-idp.dtsi | 13 ----- ...25-iot-usbc-external-codec-idp-overlay.dts | 29 ----------- .../qcs6125-iot-usbc-external-codec-idp.dts | 22 -------- .../dts/qcom/qcs6125-iot-usbc-idp-overlay.dts | 30 ----------- .../boot/dts/qcom/qcs6125-iot-usbc-idp.dts | 23 --------- .../boot/dts/qcom/qcs6125-iot-usbc-idp.dtsi | 19 ------- arch/arm64/boot/dts/qcom/qcs6125.dts | 22 -------- arch/arm64/boot/dts/qcom/qcs6125.dtsi | 21 -------- 29 files changed, 2 insertions(+), 786 deletions(-) delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-dp-idp-overlay.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-dp-idp.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-external-codec-idp-overlay.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-external-codec-idp.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-idp-overlay.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-idp.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-idp.dtsi delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-external-codec-idp-overlay.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-external-codec-idp.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp-overlay.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp.dtsi delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcm6125.dtsi delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-dp-idp-overlay.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-dp-idp.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-external-codec-idp-overlay.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-external-codec-idp.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-idp-overlay.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-idp.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-idp.dtsi delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-external-codec-idp-overlay.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-external-codec-idp.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp-overlay.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp.dtsi delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125.dts delete mode 100644 arch/arm64/boot/dts/qcom/qcs6125.dtsi diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index 86523cc11354..00916dedf352 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -330,17 +330,7 @@ ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) trinket-external-codec-idp-overlay.dtbo \ trinket-usbc-external-codec-idp-overlay.dtbo \ trinket-usbc-idp-overlay.dtbo \ - trinket-dp-idp-overlay.dtbo \ - qcm6125-iot-idp-overlay.dtbo \ - qcs6125-iot-idp-overlay.dtbo \ - qcm6125-iot-external-codec-idp-overlay.dtbo \ - qcs6125-iot-external-codec-idp-overlay.dtbo \ - qcm6125-iot-usbc-external-codec-idp-overlay.dtbo \ - qcs6125-iot-usbc-external-codec-idp-overlay.dtbo \ - qcm6125-iot-usbc-idp-overlay.dtbo \ - qcs6125-iot-usbc-idp-overlay.dtbo \ - qcm6125-iot-dp-idp-overlay.dtbo \ - qcs6125-iot-dp-idp-overlay.dtbo + trinket-dp-idp-overlay.dtbo trinket-rumi-overlay.dtbo-base := trinket.dtb trinket-idp-overlay.dtbo-base := trinket.dtb @@ -349,16 +339,6 @@ trinket-external-codec-idp-overlay.dtbo-base := trinket.dtb trinket-usbc-external-codec-idp-overlay.dtbo-base := trinket.dtb trinket-usbc-idp-overlay.dtbo-base := trinket.dtb trinket-dp-idp-overlay.dtbo-base := trinket.dtb -qcm6125-iot-idp-overlay.dtbo-base := qcm6125.dtb -qcs6125-iot-idp-overlay.dtbo-base := qcs6125.dtb -qcm6125-iot-external-codec-idp-overlay.dtbo-base := qcm6125.dtb -qcs6125-iot-external-codec-idp-overlay.dtbo-base := qcs6125.dtb -qcm6125-iot-usbc-external-codec-idp-overlay.dtbo-base := qcm6125.dtb -qcs6125-iot-usbc-external-codec-idp-overlay.dtbo-base := qcs6125.dtb -qcm6125-iot-usbc-idp-overlay.dtbo-base := qcm6125.dtb -qcs6125-iot-usbc-idp-overlay.dtbo-base := qcs6125.dtb -qcm6125-iot-dp-idp-overlay.dtbo-base := qcm6125.dtb -qcs6125-iot-dp-idp-overlay.dtbo-base := qcs6125.dtb else dtb-$(CONFIG_ARCH_TRINKET) += trinket-rumi.dtb \ trinket-idp.dtb \ @@ -366,17 +346,7 @@ dtb-$(CONFIG_ARCH_TRINKET) += trinket-rumi.dtb \ trinket-external-codec-idp.dtb \ trinket-usbc-external-codec-idp.dtb \ trinket-usbc-idp.dtb \ - trinket-dp-idp.dtb \ - qcm6125-iot-idp.dtb \ - qcs6125-iot-idp.dtb \ - qcm6125-iot-external-codec-idp.dtb \ - qcs6125-iot-external-codec-idp.dtb \ - qcm6125-iot-usbc-external-codec-idp.dtb \ - qcs6125-iot-usbc-external-codec-idp.dtb \ - qcm6125-iot-usbc-idp.dtb \ - qcs6125-iot-usbc-idp.dtb \ - qcm6125-iot-dp-idp.dtb \ - qcs6125-iot-dp-idp.dtb + trinket-dp-idp.dtb endif ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-dp-idp-overlay.dts b/arch/arm64/boot/dts/qcom/qcm6125-iot-dp-idp-overlay.dts deleted file mode 100644 index 96696b0399be..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-dp-idp-overlay.dts +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -#include -#include -#include "qcm6125-iot-idp.dtsi" -#include "trinket-audio-overlay.dtsi" - -/ { - model = "Display Port Enable IDP"; - compatible = "qcom,qcm6125-idp", "qcom,qcm6125", "qcom,idp"; - qcom,msm-id = <467 0x10000>; - qcom,board-id = <34 4>; -}; - -&dsi_td4330_truly_cmd_display { - qcom,dsi-display-active; -}; - -&sde_dp { - status = "ok"; - qcom,dp-hpd-gpio = <&tlmm 100 0>; - qcom,dp-low-power-hw-hpd; -}; - -&mdss_dp_pll { - status = "ok"; -}; - -&usb0 { - dwc3@4e00000 { - usb-phy = <&qusb_phy0>, <&usb_nop_phy>; - maximum-speed = "high-speed"; - }; -}; - -&mdss_mdp { - connectors = <&sde_wb &sde_dsi &sde_dp>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-dp-idp.dts b/arch/arm64/boot/dts/qcom/qcm6125-iot-dp-idp.dts deleted file mode 100644 index 2a0232f76173..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-dp-idp.dts +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcm6125.dtsi" -#include "qcm6125-iot-idp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCM6125 IOT Disp. Port Enable IDP"; - compatible = "qcom,qcm6125-idp", "qcom,qcm6125", "qcom,idp"; - qcom,board-id = <34 4>; -}; - -&sde_dp { - status = "ok"; - qcom,dp-hpd-gpio = <&tlmm 100 0>; - qcom,dp-low-power-hw-hpd; -}; - -&mdss_dp_pll { - status = "ok"; -}; - -&usb0 { - dwc3@4e00000 { - usb-phy = <&qusb_phy0>, <&usb_nop_phy>; - maximum-speed = "high-speed"; - }; -}; - -&mdss_mdp { - connectors = <&sde_wb &sde_dsi &sde_dp>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-external-codec-idp-overlay.dts b/arch/arm64/boot/dts/qcom/qcm6125-iot-external-codec-idp-overlay.dts deleted file mode 100644 index c8a93d2e1cc4..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-external-codec-idp-overlay.dts +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -#include - -#include "qcm6125-iot-idp.dtsi" -#include "trinket-tasha-codec-audio-overlay.dtsi" -#include "trinket-tasha-codec.dtsi" - -/ { - model = "Ext Audio Codec IDP"; - compatible = "qcom,qcm6125-idp", "qcom,qcm6125", "qcom,idp"; - qcom,msm-id = <467 0x10000>; - qcom,board-id = <34 1>; -}; - -&dsi_td4330_truly_cmd_display { - qcom,dsi-display-active; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-external-codec-idp.dts b/arch/arm64/boot/dts/qcom/qcm6125-iot-external-codec-idp.dts deleted file mode 100644 index 586d54193c07..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-external-codec-idp.dts +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcm6125.dtsi" -#include "qcm6125-iot-idp.dtsi" -#include "trinket-tasha-codec-audio-overlay.dtsi" -#include "trinket-tasha-codec.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCM6125 IOT Ext Audio Codec IDP"; - compatible = "qcom,qcm6125-idp", "qcom,qcm6125", "qcom,idp"; - qcom,board-id = <34 1>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-idp-overlay.dts b/arch/arm64/boot/dts/qcom/qcm6125-iot-idp-overlay.dts deleted file mode 100644 index 936473fcec4b..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-idp-overlay.dts +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -#include "qcm6125-iot-idp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCM6125 IOT IDP Overlay"; - compatible = "qcom,qcm6125"; - qcom,msm-id = <467 0x10000>; - qcom,board-id = <34 0>; -}; - diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-idp.dts b/arch/arm64/boot/dts/qcom/qcm6125-iot-idp.dts deleted file mode 100644 index 37f06e2c3f46..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-idp.dts +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcm6125.dtsi" -#include "qcm6125-iot-idp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCM6125 IOT IDP SoC"; - compatible = "qcom,qcm6125"; - qcom,board-id = <34 0>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-idp.dtsi b/arch/arm64/boot/dts/qcom/qcm6125-iot-idp.dtsi deleted file mode 100644 index 6d198b235fbc..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-idp.dtsi +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include "trinket-idp.dtsi" diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-external-codec-idp-overlay.dts b/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-external-codec-idp-overlay.dts deleted file mode 100644 index 2c9d5820eccb..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-external-codec-idp-overlay.dts +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -#include - -#include "qcm6125-iot-idp.dtsi" - -/ { - model = "USB-C Ext Audio Codec IDP"; - compatible = "qcom,qcm6125-idp", "qcom,qcm6125", "qcom,idp"; - qcom,msm-id = <467 0x10000>; - qcom,board-id = <34 3>; -}; - -&dsi_td4330_truly_cmd_display { - qcom,dsi-display-active; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-external-codec-idp.dts b/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-external-codec-idp.dts deleted file mode 100644 index a3c69c31f734..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-external-codec-idp.dts +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcm6125.dtsi" -#include "qcm6125-iot-idp.dtsi" - -/ { - model = "Qualcomm Technologies,Inc. QCM6125 IOT USBC Ext Aud Codec IDP"; - compatible = "qcom,qcm6125-idp", "qcom,qcm6125", "qcom,idp"; - qcom,board-id = <34 3>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp-overlay.dts b/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp-overlay.dts deleted file mode 100644 index ef7a24aeb2bf..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp-overlay.dts +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -#include - -#include "qcm6125-iot-idp.dtsi" -#include "qcm6125-iot-usbc-idp.dtsi" - -/ { - model = "USBC Audio IDP"; - compatible = "qcom,qcm6125-idp", "qcom,qcm6125", "qcom,idp"; - qcom,msm-id = <467 0x10000>; - qcom,board-id = <34 2>; -}; - -&dsi_td4330_truly_cmd_display { - qcom,dsi-display-active; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp.dts b/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp.dts deleted file mode 100644 index 73f65a9fc2d6..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp.dts +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcm6125.dtsi" -#include "qcm6125-iot-idp.dtsi" -#include "qcm6125-iot-usbc-idp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCM6125 IOT USBC Audio IDP"; - compatible = "qcom,qcm6125-idp", "qcom,qcm6125", "qcom,idp"; - qcom,board-id = <34 2>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp.dtsi b/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp.dtsi deleted file mode 100644 index faafcf82f5e3..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125-iot-usbc-idp.dtsi +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "trinket-audio-overlay.dtsi" - -&sm6150_snd { - qcom,msm-mbhc-usbc-audio-supported = <1>; - qcom,msm-mbhc-hphl-swh = <1>; - qcom,msm-mbhc-gnd-swh = <1>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125.dts b/arch/arm64/boot/dts/qcom/qcm6125.dts deleted file mode 100644 index 4e31d2849dc3..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125.dts +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcm6125.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCM6125 IOT IDP SoC"; - compatible = "qcom,qcm6125"; - qcom,board-id = <0 0>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcm6125.dtsi b/arch/arm64/boot/dts/qcom/qcm6125.dtsi deleted file mode 100644 index dca459b90404..000000000000 --- a/arch/arm64/boot/dts/qcom/qcm6125.dtsi +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "trinket.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCM6125"; - compatible = "qcom,qcm6125"; - qcom,msm-id = <467 0x0>; - qcom,msm-name = "QCM6125"; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-dp-idp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs6125-iot-dp-idp-overlay.dts deleted file mode 100644 index 2830ca6cfde8..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-dp-idp-overlay.dts +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -#include -#include -#include "qcs6125-iot-idp.dtsi" -#include "trinket-audio-overlay.dtsi" - -/ { - model = "Display Port Enable IDP"; - compatible = "qcom,qcs6125-idp", "qcom,qcs6125", "qcom,idp"; - qcom,msm-id = <468 0x10000>; - qcom,board-id = <34 4>; -}; - -&dsi_td4330_truly_cmd_display { - qcom,dsi-display-active; -}; - -&sde_dp { - status = "ok"; - qcom,dp-hpd-gpio = <&tlmm 100 0>; - qcom,dp-low-power-hw-hpd; -}; - -&mdss_dp_pll { - status = "ok"; -}; - -&usb0 { - dwc3@4e00000 { - usb-phy = <&qusb_phy0>, <&usb_nop_phy>; - maximum-speed = "high-speed"; - }; -}; - -&mdss_mdp { - connectors = <&sde_wb &sde_dsi &sde_dp>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-dp-idp.dts b/arch/arm64/boot/dts/qcom/qcs6125-iot-dp-idp.dts deleted file mode 100644 index 69bf2c2be9d8..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-dp-idp.dts +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcs6125.dtsi" -#include "qcs6125-iot-idp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCM6125 IOT Disp. Port Enable IDP"; - compatible = "qcom,qcs6125-idp", "qcom,qcs6125", "qcom,idp"; - qcom,board-id = <34 4>; -}; - -&sde_dp { - status = "ok"; - qcom,dp-hpd-gpio = <&tlmm 100 0>; - qcom,dp-low-power-hw-hpd; -}; - -&mdss_dp_pll { - status = "ok"; -}; - -&usb0 { - dwc3@4e00000 { - usb-phy = <&qusb_phy0>, <&usb_nop_phy>; - maximum-speed = "high-speed"; - }; -}; - -&mdss_mdp { - connectors = <&sde_wb &sde_dsi &sde_dp>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-external-codec-idp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs6125-iot-external-codec-idp-overlay.dts deleted file mode 100644 index d5c75339ba9d..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-external-codec-idp-overlay.dts +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -#include - -#include "qcs6125-iot-idp.dtsi" -#include "trinket-tasha-codec-audio-overlay.dtsi" -#include "trinket-tasha-codec.dtsi" - -/ { - model = "Ext Audio Codec IDP"; - compatible = "qcom,qcs6125-idp", "qcom,qcs6125", "qcom,idp"; - qcom,msm-id = <468 0x10000>; - qcom,board-id = <34 1>; -}; - -&dsi_td4330_truly_cmd_display { - qcom,dsi-display-active; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-external-codec-idp.dts b/arch/arm64/boot/dts/qcom/qcs6125-iot-external-codec-idp.dts deleted file mode 100644 index 7d75678fd29a..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-external-codec-idp.dts +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcs6125.dtsi" -#include "qcs6125-iot-idp.dtsi" -#include "trinket-tasha-codec-audio-overlay.dtsi" -#include "trinket-tasha-codec.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCS6125 IOT Ext Audio Codec IDP"; - compatible = "qcom,qcs6125-idp", "qcom,qcs6125", "qcom,idp"; - qcom,board-id = <34 1>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-idp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs6125-iot-idp-overlay.dts deleted file mode 100644 index 8b80605046e5..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-idp-overlay.dts +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -#include "qcs6125-iot-idp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCS6125 IOT IDP Overlay"; - compatible = "qcom,qcs6125"; - qcom,msm-id = <468 0x10000>; - qcom,board-id = <34 0>; -}; - diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-idp.dts b/arch/arm64/boot/dts/qcom/qcs6125-iot-idp.dts deleted file mode 100644 index 4eaa9b6ccccb..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-idp.dts +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcs6125.dtsi" -#include "qcs6125-iot-idp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCS6125 IOT IDP SoC"; - compatible = "qcom,qcs6125"; - qcom,board-id = <34 0>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-idp.dtsi b/arch/arm64/boot/dts/qcom/qcs6125-iot-idp.dtsi deleted file mode 100644 index 6d198b235fbc..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-idp.dtsi +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include "trinket-idp.dtsi" diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-external-codec-idp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-external-codec-idp-overlay.dts deleted file mode 100644 index 8fb605f7dac8..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-external-codec-idp-overlay.dts +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -#include - -#include "qcs6125-iot-idp.dtsi" - -/ { - model = "USB-C Ext Audio Codec IDP"; - compatible = "qcom,qcs6125-idp", "qcom,qcs6125", "qcom,idp"; - qcom,msm-id = <468 0x10000>; - qcom,board-id = <34 3>; -}; - -&dsi_td4330_truly_cmd_display { - qcom,dsi-display-active; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-external-codec-idp.dts b/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-external-codec-idp.dts deleted file mode 100644 index 8a47d58bc4d1..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-external-codec-idp.dts +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcs6125.dtsi" -#include "qcs6125-iot-idp.dtsi" - -/ { - model = "Qualcomm Technologies,Inc. QCS6125IOT USBC Ext AudioCodec IDP"; - compatible = "qcom,qcs6125-idp", "qcom,qcs6125", "qcom,idp"; - qcom,board-id = <34 3>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp-overlay.dts deleted file mode 100644 index 3428c7b93bc4..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp-overlay.dts +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -#include - -#include "qcs6125-iot-idp.dtsi" -#include "qcs6125-iot-usbc-idp.dtsi" - -/ { - model = "USBC Audio IDP"; - compatible = "qcom,qcs6125-idp", "qcom,qcs6125", "qcom,idp"; - qcom,msm-id = <468 0x10000>; - qcom,board-id = <34 2>; -}; - -&dsi_td4330_truly_cmd_display { - qcom,dsi-display-active; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp.dts b/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp.dts deleted file mode 100644 index a33a34046fdc..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp.dts +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcs6125.dtsi" -#include "qcs6125-iot-idp.dtsi" -#include "qcs6125-iot-usbc-idp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCS6125 IOT USBC Audio IDP"; - compatible = "qcom,qcs6125-idp", "qcom,qcs6125", "qcom,idp"; - qcom,board-id = <34 2>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp.dtsi b/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp.dtsi deleted file mode 100644 index faafcf82f5e3..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125-iot-usbc-idp.dtsi +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "trinket-audio-overlay.dtsi" - -&sm6150_snd { - qcom,msm-mbhc-usbc-audio-supported = <1>; - qcom,msm-mbhc-hphl-swh = <1>; - qcom,msm-mbhc-gnd-swh = <1>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125.dts b/arch/arm64/boot/dts/qcom/qcs6125.dts deleted file mode 100644 index c2ac4473ee4b..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125.dts +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "qcs6125.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCS6125 SoC"; - compatible = "qcom,qcs6125"; - qcom,board-id = <0 0>; -}; diff --git a/arch/arm64/boot/dts/qcom/qcs6125.dtsi b/arch/arm64/boot/dts/qcom/qcs6125.dtsi deleted file mode 100644 index aed91a655019..000000000000 --- a/arch/arm64/boot/dts/qcom/qcs6125.dtsi +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "trinket.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. QCS6125"; - compatible = "qcom,qcs6125"; - qcom,msm-id = <468 0x0>; - qcom,msm-name = "QCS6125"; -}; From f287f9587cadaf66d7ff1024f5f03c56b83b5dd2 Mon Sep 17 00:00:00 2001 From: Anurag Chouhan Date: Tue, 4 Aug 2020 15:33:01 +0530 Subject: [PATCH 010/141] soc: qcom: socinfo: Remove Unnecessary soc-id Remove Unnecessary socinfo support and update the bindings for the same. Change-Id: I724b37fd7e466307db0c209cef834c4a308fa940 Signed-off-by: Anurag Chouhan --- Documentation/devicetree/bindings/arm/msm/msm.txt | 10 ---------- drivers/soc/qcom/socinfo.c | 12 ------------ include/soc/qcom/socinfo.h | 8 -------- 3 files changed, 30 deletions(-) diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt index b58a1eb312ee..b5d9f2f83b67 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm.txt @@ -106,12 +106,6 @@ SoCs: - SDA429W compatible = "qcom,sda429w" -- QCM6125 - compatible = "qcom,qcm6125" - -- QCS6125 - compatible = "qcom,qcs6125" - - SA2145P compatible = "qcom,sa2145p" @@ -311,7 +305,3 @@ compatible = "qcom,sda429w-wdp" compatible = "qcom,sda429-wdp" compatible = "qcom,sdm429w-wdp" compatible = "qcom,sdm429-wdp" -compatible = "qcom,qcm6125" -compatible = "qcom,qcs6125" -compatible = "qcom,qcm6125-idp" -compatible = "qcom,qcs6125-idp" diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 03a4c2214fe6..c5a629686bf8 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -451,10 +451,6 @@ static struct msm_soc_info cpu_of_id[] = { [416] = {MSM_CPU_SDM429W, "SDM429W"}, [437] = {MSM_CPU_SDA429W, "SDA429W"}, - /* QCM6125 IDs*/ - [467] = {MSM_CPU_QCM6125, "QCM6125"}, - [468] = {MSM_CPU_QCS6125, "QCS6125"}, - /* Uninitialized IDs are not known to run Linux. * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are * considered as unknown CPU. @@ -1467,14 +1463,6 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 437; strlcpy(dummy_socinfo.build_id, "sda429w - ", sizeof(dummy_socinfo.build_id)); - } else if (early_machine_is_qcm6125()) { - dummy_socinfo.id = 467; - strlcpy(dummy_socinfo.build_id, "qcm6125 - ", - sizeof(dummy_socinfo.build_id)); - } else if (early_machine_is_qcs6125()) { - dummy_socinfo.id = 468; - strlcpy(dummy_socinfo.build_id, "qcm6125 - ", - sizeof(dummy_socinfo.build_id)); } else strlcat(dummy_socinfo.build_id, "Dummy socinfo", sizeof(dummy_socinfo.build_id)); diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h index 55dc5e9732c9..2e316c5bb623 100644 --- a/include/soc/qcom/socinfo.h +++ b/include/soc/qcom/socinfo.h @@ -117,10 +117,6 @@ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm429w") #define early_machine_is_sda429w() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda429w") -#define early_machine_is_qcm6125() \ - of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcm6125") -#define early_machine_is_qcs6125() \ - of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcs6125") #else #define of_board_is_sim() 0 #define of_board_is_rumi() 0 @@ -168,8 +164,6 @@ #define early_machine_is_sda660() 0 #define early_machine_is_sdm429w() 0 #define early_machine_is_sda429w() 0 -#define early_machine_is_qcm6125() 0 -#define early_machine_is_qcs6125() 0 #endif #define PLATFORM_SUBTYPE_MDM 1 @@ -220,8 +214,6 @@ enum msm_cpu { MSM_CPU_SDA660, MSM_CPU_SDM429W, MSM_CPU_SDA429W, - MSM_CPU_QCM6125, - MSM_CPU_QCS6125, }; struct msm_soc_info { From c29cf27601e9322dba902fc34c25839805ed41c0 Mon Sep 17 00:00:00 2001 From: Tony Truong Date: Mon, 3 Aug 2020 18:44:27 -0700 Subject: [PATCH 011/141] msm: pcie: correct cached PCIe link BW max gen speed PCIe link BW (bandwidth) max GEN speed is incorrectly calculated. Update bw_gen_max to have to the correct value. Change-Id: I5a9c77e326966681bdc0efde84815dcea083d470 Signed-off-by: Tony Truong --- drivers/pci/host/pci-msm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 8b5471eeb9da..9b8eaf5545a9 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -3724,7 +3724,7 @@ static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev, of_property_read_u32_array(pdev->dev.of_node, "qcom,bw-scale", (u32 *)dev->bw_scale, size / sizeof(u32)); - dev->bw_gen_max = size / sizeof(u32); + dev->bw_gen_max = size / sizeof(*dev->bw_scale); } else { PCIE_DBG(dev, "RC%d: bandwidth scaling is not supported\n", dev->rc_idx); From bf480f17d961e5d62b1481544bf3e015669f0441 Mon Sep 17 00:00:00 2001 From: Tony Truong Date: Mon, 3 Aug 2020 18:34:22 -0700 Subject: [PATCH 012/141] msm: pcie: validate speed switch request Validate the target link speed request to ensure it does not exceed what PCIe root complex allow. Change-Id: I62f218e227ff446feae292ce4eeb78b02904ea3e Signed-off-by: Tony Truong --- drivers/pci/host/pci-msm.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 9b8eaf5545a9..ad55f37d0002 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -6450,6 +6450,15 @@ int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed, pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus); + if (target_link_speed > pcie_dev->bw_gen_max || + (pcie_dev->target_link_speed && + target_link_speed > pcie_dev->target_link_speed)) { + PCIE_DBG(pcie_dev, + "PCIe: RC%d: invalid target link speed: %d\n", + pcie_dev->rc_idx, target_link_speed); + return -EINVAL; + } + pcie_capability_read_word(root_pci_dev, PCI_EXP_LNKSTA, &link_status); current_link_speed = link_status & PCI_EXP_LNKSTA_CLS; From 03777e3eb47135b2b61c1460392b9af9f8d4a5db Mon Sep 17 00:00:00 2001 From: Jayadev K Date: Thu, 19 Dec 2019 15:11:30 +0530 Subject: [PATCH 013/141] ARM: dts: qcom: Enable SE2 I2C for SA8195 SE2 I2C bus is required to support SDR card. Change-Id: I67801b589613696328a3efae6125f3e1ec885c7c Signed-off-by: Jayadev K --- arch/arm64/boot/dts/qcom/sa8195p.dtsi | 5 +++++ arch/arm64/boot/dts/qcom/sdmshrike.dtsi | 1 + 2 files changed, 6 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sa8195p.dtsi b/arch/arm64/boot/dts/qcom/sa8195p.dtsi index 3e31e5d48b2a..62728a000748 100644 --- a/arch/arm64/boot/dts/qcom/sa8195p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195p.dtsi @@ -774,3 +774,8 @@ }; }; }; + +&qupv3_se2_i2c { + qcom,clk-freq-out = <400000>; + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmshrike.dtsi b/arch/arm64/boot/dts/qcom/sdmshrike.dtsi index d417ff891224..edcc32d586c7 100644 --- a/arch/arm64/boot/dts/qcom/sdmshrike.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmshrike.dtsi @@ -46,6 +46,7 @@ hsuart0 = &qupv3_se13_4uart; spi0 = &qupv3_se3_spi; i2c0 = &qupv3_se4_i2c; + i2c2 = &qupv3_se2_i2c; }; cpus { From ce37a0a09f7b87631e8490c5831a63e9e2993a87 Mon Sep 17 00:00:00 2001 From: Pankaj Gupta Date: Thu, 30 Jul 2020 18:39:40 +0530 Subject: [PATCH 014/141] msm: kgsl: Always boot GMU with default CM3 config Ensure that CM3 configuration is always set to default value before taking CM3 out of reset. Also make sure that we read/modify this register when we send NMI to GMU. Change-Id: Ic9c4506c04c5e7dd1cabf12901fa53636e2ed9c7 Signed-off-by: Oleg Perelet Signed-off-by: Pankaj Gupta --- drivers/gpu/msm/adreno_a6xx_gmu.c | 11 ++++++++++- drivers/gpu/msm/kgsl_gmu.c | 9 ++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index a4639068403b..279f57e32846 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -351,6 +351,8 @@ static int a6xx_gmu_start(struct kgsl_device *device) /* Bring GMU out of reset */ gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0); + /* Make sure the request completes before continuing */ + wmb(); if (timed_poll_check(device, A6XX_GMU_CM3_FW_INIT_RESULT, 0xBABEFACE, @@ -1042,6 +1044,13 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device, gmu_core_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0, GMU_FENCE_RANGE_MASK); + /* + * Make sure that CM3 state is at reset value. Snapshot is changing + * NMI bit and if we boot up GMU with NMI bit set.GMU will boot straight + * in to NMI handler without executing __main code + */ + gmu_core_regwrite(device, A6XX_GMU_CM3_CFG, 0x4052); + /* Pass chipid to GMU FW, must happen before starting GMU */ /* Keep Core and Major bitfields unchanged */ diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index cf993e7ebbf1..cb1ca467ea9d 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -964,6 +964,8 @@ static int gmu_rpmh_init(struct kgsl_device *device, static void send_nmi_to_gmu(struct adreno_device *adreno_dev) { + u32 val; + /* Mask so there's no interrupt caused by NMI */ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF); @@ -972,9 +974,10 @@ static void send_nmi_to_gmu(struct adreno_device *adreno_dev) wmb(); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0); - adreno_write_gmureg(adreno_dev, - ADRENO_REG_GMU_CM3_CFG, - (1 << GMU_CM3_CFG_NONMASKINTR_SHIFT)); + + adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_CM3_CFG, &val); + val |= 1 << GMU_CM3_CFG_NONMASKINTR_SHIFT; + adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_CM3_CFG, val); /* Make sure the NMI is invoked before we proceed*/ wmb(); From bab9b42617f8c1b096ae3a7c0cbf1cbdf0f4eab0 Mon Sep 17 00:00:00 2001 From: Anurag Chouhan Date: Tue, 4 Aug 2020 15:54:49 +0530 Subject: [PATCH 015/141] soc: qcom: socinfo: Add support for trinket-iot soc-id Add socinfo support for trinket-iot Soc and update the bindings for the same. Change-Id: I377526de9e7e7d95d93f6f954f6ff94e8f886b71 Signed-off-by: Anurag Chouhan --- Documentation/devicetree/bindings/arm/msm/msm.txt | 9 +++++++++ drivers/soc/qcom/socinfo.c | 14 ++++++++++++++ include/soc/qcom/socinfo.h | 8 ++++++++ 3 files changed, 31 insertions(+) diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt index b5d9f2f83b67..288832098310 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm.txt @@ -162,6 +162,11 @@ Generic board variants: - TTP device: compatible = "qcom,ttp" +- TRINKET-IOT + compatible = "qcom,trinket-iot" + +- TRINKETP-IOT + compatible = "qcom,trinketp-iot" Boards (SoC type + board variant): @@ -305,3 +310,7 @@ compatible = "qcom,sda429w-wdp" compatible = "qcom,sda429-wdp" compatible = "qcom,sdm429w-wdp" compatible = "qcom,sdm429-wdp" +compatible = "qcom,trinket-iot" +compatible = "qcom,trinketp-iot" +compatible = "qcom,trinket-iot-idp" +compatible = "qcom,trinketp-iot-idp" diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index c5a629686bf8..9e749e559937 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -451,6 +451,12 @@ static struct msm_soc_info cpu_of_id[] = { [416] = {MSM_CPU_SDM429W, "SDM429W"}, [437] = {MSM_CPU_SDA429W, "SDA429W"}, + /* TRINKET-IOT IDs*/ + [467] = {MSM_CPU_TRINKET_IOT, "TRINKET-IOT"}, + + /* TRINKETP-IOT IDs*/ + [468] = {MSM_CPU_TRINKETP_IOT, "TRINKETP-IOT"}, + /* Uninitialized IDs are not known to run Linux. * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are * considered as unknown CPU. @@ -1463,6 +1469,14 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 437; strlcpy(dummy_socinfo.build_id, "sda429w - ", sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_trinket_iot()) { + dummy_socinfo.id = 467; + strlcpy(dummy_socinfo.build_id, "trinket-iot - ", + sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_trinketp_iot()) { + dummy_socinfo.id = 468; + strlcpy(dummy_socinfo.build_id, "trinketp-iot - ", + sizeof(dummy_socinfo.build_id)); } else strlcat(dummy_socinfo.build_id, "Dummy socinfo", sizeof(dummy_socinfo.build_id)); diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h index 2e316c5bb623..b9e3927081d3 100644 --- a/include/soc/qcom/socinfo.h +++ b/include/soc/qcom/socinfo.h @@ -117,6 +117,10 @@ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm429w") #define early_machine_is_sda429w() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda429w") +#define early_machine_is_trinket_iot() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,trinket-iot") +#define early_machine_is_trinketp_iot() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,trinketp-iot") #else #define of_board_is_sim() 0 #define of_board_is_rumi() 0 @@ -164,6 +168,8 @@ #define early_machine_is_sda660() 0 #define early_machine_is_sdm429w() 0 #define early_machine_is_sda429w() 0 +#define early_machine_is_trinket_iot() 0 +#define early_machine_is_trinketp_iot() 0 #endif #define PLATFORM_SUBTYPE_MDM 1 @@ -214,6 +220,8 @@ enum msm_cpu { MSM_CPU_SDA660, MSM_CPU_SDM429W, MSM_CPU_SDA429W, + MSM_CPU_TRINKET_IOT, + MSM_CPU_TRINKETP_IOT, }; struct msm_soc_info { From 58456964ed278ce03f647a5ea2aa44a5bae4bc07 Mon Sep 17 00:00:00 2001 From: Pankaj Gupta Date: Wed, 5 Aug 2020 19:43:48 +0530 Subject: [PATCH 016/141] msm: kgsl: Reset CM3 during GMU suspend Reset CM3 during GMU suspend to make sure that CM3 stays in proper state before turning it on. Change-Id: I6b40a498de261842ca9cedf5eadfea5b689c5073 Signed-off-by: Pankaj Gupta --- drivers/gpu/msm/adreno_a6xx_gmu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index a4639068403b..796a9c8306a7 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1183,6 +1183,8 @@ static int a6xx_gmu_suspend(struct kgsl_device *device) /* Check no outstanding RPMh voting */ a6xx_complete_rpmh_votes(device); + gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1); + /* * This is based on the assumption that GMU is the only one controlling * the GX HS. This code path is the only client voting for GX through From 38cad2c8ac6c528324bd94eae3b8272a11a52e9c Mon Sep 17 00:00:00 2001 From: Pankaj Gupta Date: Wed, 24 Jun 2020 17:52:30 +0530 Subject: [PATCH 017/141] msm: kgsl: Use regulator_is_enabled api when gpu-quirk-cx-gdsc is defined When gpu-quirk-cx-gdsc is defined, cx_gdsc will not be disabled from HLOS, so use regulator_is_enabled api to get the dummy status of cx_gdsc. Change-Id: Ied2a54687d9438610116bf4a96a4843fdbc05c56 Signed-off-by: Pankaj Gupta --- drivers/gpu/msm/adreno_a6xx_gmu.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index ed7465a02cd1..eb414b1689a0 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -837,6 +837,9 @@ static bool a6xx_gmu_cx_is_on(struct kgsl_device *device) { unsigned int val; + if (ADRENO_QUIRK(ADRENO_DEVICE(device), ADRENO_QUIRK_CX_GDSC)) + return regulator_is_enabled(KGSL_GMU_DEVICE(device)->cx_gdsc); + gmu_core_regread(device, A6XX_GPU_CC_CX_GDSCR, &val); return (val & BIT(31)); } From 88cf26f50676b27c22ac3925b372529edf1fbb42 Mon Sep 17 00:00:00 2001 From: Saurabh Sahu Date: Wed, 5 Aug 2020 11:05:24 +0530 Subject: [PATCH 018/141] clk: qcom: gcc: Add freq support for emac clk in qcs405 Add support for 2.5MHz and 25MHz for emac_clk_src in qcs405. Change-Id: I7b2df3d83dbe8f4b5576a10040076c345cde38e4 Signed-off-by: Saurabh Sahu --- drivers/clk/qcom/gcc-qcs405.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/clk/qcom/gcc-qcs405.c b/drivers/clk/qcom/gcc-qcs405.c index 081df4a32e5b..da0fc7d409d0 100644 --- a/drivers/clk/qcom/gcc-qcs405.c +++ b/drivers/clk/qcom/gcc-qcs405.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -853,7 +853,9 @@ static struct clk_rcg2 byte0_clk_src = { }; static const struct freq_tbl ftbl_emac_clk_src[] = { + F(2500000, P_GPLL1_OUT_MAIN, 4, 1, 50), F(5000000, P_GPLL1_OUT_MAIN, 2, 1, 50), + F(25000000, P_GPLL1_OUT_MAIN, 1, 1, 20), F(50000000, P_GPLL1_OUT_MAIN, 10, 0, 0), F(125000000, P_GPLL1_OUT_MAIN, 4, 0, 0), F(250000000, P_GPLL1_OUT_MAIN, 2, 0, 0), From b27321e79fe3dfe2d00ffc3bc6a1b08010923a6d Mon Sep 17 00:00:00 2001 From: Chandrasekhar Mattaparthy Date: Fri, 7 Aug 2020 10:02:32 +0530 Subject: [PATCH 019/141] ARM: defconfig: Enable intermediate functional block support for sdm429w Adding intermediate functional block driver to support data over BT only use case. Change-Id: I1f4e243aba48d7dfbc659ce0e5f44d734e7419f2 Signed-off-by: Chandrasekhar Mattaparthy --- arch/arm/configs/vendor/sdm429-bg-perf_defconfig | 1 + arch/arm/configs/vendor/sdm429-bg_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig index 30c473f75796..6b1b06ac5b3d 100644 --- a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig @@ -278,6 +278,7 @@ CONFIG_DM_VERITY_FEC=y CONFIG_NETDEVICES=y CONFIG_BONDING=y CONFIG_DUMMY=y +CONFIG_IFB=y CONFIG_TUN=y CONFIG_MSM_RMNET_BAM=y CONFIG_PPP=y diff --git a/arch/arm/configs/vendor/sdm429-bg_defconfig b/arch/arm/configs/vendor/sdm429-bg_defconfig index 761f83e7c8de..405b1788b5c0 100644 --- a/arch/arm/configs/vendor/sdm429-bg_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg_defconfig @@ -285,6 +285,7 @@ CONFIG_DM_VERITY_FEC=y CONFIG_NETDEVICES=y CONFIG_BONDING=y CONFIG_DUMMY=y +CONFIG_IFB=y CONFIG_TUN=y CONFIG_MSM_RMNET_BAM=y CONFIG_PPP=y From 032498ece62c7acb6ffaa63910c417ff54371f2f Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Mon, 10 Aug 2020 11:28:08 +0530 Subject: [PATCH 020/141] serial: msm_geni_serial: Fix the issue with PM usage During system suspend PM_runtime is disabled by PM core for a device. Currently driver using pm_runtime_enable checks in PM suspend/resume callbacks and doing manual resume. With this PM usage count is getting miss-matched in driver and causing the device to not to enter system suspend state. Fix this issue by removing pm_runtime_enable checks inside PM callbacks of driver. Change-Id: I7032bf28c22eda5b989ee66c452a43ed5b7af75e Signed-off-by: Chandana Kishori Chiluveru --- drivers/tty/serial/msm_geni_serial.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index bcdcdc194866..d5858c08919b 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -3541,7 +3541,7 @@ static int msm_geni_serial_sys_suspend_noirq(struct device *dev) struct msm_geni_serial_port *port = platform_get_drvdata(pdev); struct uart_port *uport = &port->uport; - if (uart_console(uport) || !pm_runtime_enabled(uport->dev)) { + if (uart_console(uport) || port->pm_auto_suspend_disable) { uart_suspend_port((struct uart_driver *)uport->private_data, uport); } else { @@ -3572,7 +3572,7 @@ static int msm_geni_serial_sys_resume_noirq(struct device *dev) if ((uart_console(uport) && console_suspend_enabled && uport->suspended) || - !pm_runtime_enabled(uport->dev)) { + port->pm_auto_suspend_disable) { uart_resume_port((struct uart_driver *)uport->private_data, uport); } From e7c61a5e20f4587bdeb3a61f7054b6743f301f98 Mon Sep 17 00:00:00 2001 From: VijayaKumar T M Date: Mon, 10 Aug 2020 17:50:44 +0530 Subject: [PATCH 021/141] msm: camera: Fix uninitialized and Null pointer dereference - Initialize uninitialized variabled. - Check for Null pointer dereference. Change-Id: I27714561a1e53db6bfba431ba155d2044e87b8da Signed-off-by: VijayaKumar T M --- .../msm/camera/cam_isp/cam_isp_context.c | 17 ++++++++++------- .../lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c | 16 +++++++++------- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c index 2ac164251e01..025004404bf5 100644 --- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c +++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c @@ -876,11 +876,13 @@ static int __cam_isp_ctx_reg_upd_in_epoch_state( else if (ctx_isp->fps && ((rup_event_data->irq_mono_boot_time - ctx_isp->irq_timestamps) > ((1000*1000)/ctx_isp->fps))) { ctx_isp->irq_delay_detect = true; - trace_cam_isp_irq_delay_detect("IRQ delay at reg_upd", - ctx, req->request_id, - ctx_isp->substate_activated, - (rup_event_data->irq_mono_boot_time - - ctx_isp->irq_timestamps)); + + if (req) + trace_cam_isp_irq_delay_detect("IRQ delay at reg_upd", + ctx, req->request_id, + ctx_isp->substate_activated, + (rup_event_data->irq_mono_boot_time - + ctx_isp->irq_timestamps)); } ctx_isp->irq_timestamps = rup_event_data->irq_mono_boot_time; @@ -1196,7 +1198,7 @@ end: static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp, void *evt_data) { - struct cam_ctx_request *req; + struct cam_ctx_request *req = NULL; struct cam_isp_ctx_req *req_isp = NULL; struct cam_context *ctx = ctx_isp->base; uint64_t request_id = 0; @@ -2308,7 +2310,7 @@ static int __cam_isp_ctx_dump_in_top_state(struct cam_context *ctx, struct timeval cur_time; int rc = 0; uintptr_t cpu_addr; - size_t buf_len; + size_t buf_len = 0; struct cam_isp_context_dump_header *hdr; uint64_t *addr, *start; uint8_t *dst; @@ -2486,6 +2488,7 @@ static int __cam_isp_ctx_flush_req_in_top_state( struct cam_hw_stop_args stop_args; struct cam_isp_start_args start_isp; struct cam_hw_reset_args reset_args; + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) { CAM_INFO(CAM_ISP, "ctx id:%d Last request id to flush is %lld", ctx->ctx_id, flush_req->req_id); diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c index ec4297822fb7..4de0f639a670 100644 --- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c +++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -273,13 +273,15 @@ static int cam_lrme_hw_dev_remove(struct platform_device *pdev) kfree(lrme_core); deinit_platform_res: - rc = cam_lrme_soc_deinit_resources(&lrme_hw->soc_info); - if (rc) - CAM_ERR(CAM_LRME, "Error in LRME soc deinit, rc=%d", rc); - - mutex_destroy(&lrme_hw->hw_mutex); - kfree(lrme_hw); + if (lrme_hw) { + rc = cam_lrme_soc_deinit_resources(&lrme_hw->soc_info); + if (rc) + CAM_ERR(CAM_LRME, + "Error in LRME soc deinit, rc=%d", rc); + mutex_destroy(&lrme_hw->hw_mutex); + kfree(lrme_hw); + } return rc; } From b9ae67e4be350eb9a94e932271b11bbfdd7775ee Mon Sep 17 00:00:00 2001 From: Aditya Mathur Date: Mon, 3 Aug 2020 17:36:24 -0700 Subject: [PATCH 022/141] net: stmmac: Enable RX parameter configuration from device tree Enabling RX configurations like rx_dll_bypass and rx_prog_swap valies to be configured from device tree to support multiple platforms. Change-Id: I851b07563fbdf5eab9a4c15b773cacdb6b93c952 Signed-off-by: Aditya Mathur --- .../stmicro/stmmac/dwmac-qcom-ethqos.c | 118 ++++++++++++------ .../stmicro/stmmac/dwmac-qcom-ethqos.h | 8 ++ 2 files changed, 90 insertions(+), 36 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index b6c8996c99b5..66703be2cd28 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -630,9 +630,10 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos) rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, 0, SDCC_HC_REG_DLL_CONFIG); - /* Set DLL_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN, - SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG); + if (!ethqos->io_macro.rx_dll_bypass) + /* Set DLL_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN, + SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG); if (ethqos->emac_ver != EMAC_HW_v2_3_2 && ethqos->emac_ver != EMAC_HW_v2_1_2) { @@ -677,8 +678,9 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos) if (ethqos->emac_ver != EMAC_HW_v2_3_2 && ethqos->emac_ver != EMAC_HW_v2_1_2) { - rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS, - 0, SDCC_HC_REG_DLL_CONFIG2); + if (!ethqos->io_macro.rx_dll_bypass) + rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS, + 0, SDCC_HC_REG_DLL_CONFIG2); rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC, 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2); @@ -728,15 +730,19 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos) RGMII_IO_MACRO_CONFIG2); /* Set PRG_RCLK_DLY to 57 for 1.8 ns delay */ - if (ethqos->emac_ver == EMAC_HW_v2_3_2) + if (ethqos->emac_ver == EMAC_HW_v2_3_2) { rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY, 69, SDCC_HC_REG_DDR_CONFIG); - else if (ethqos->emac_ver == EMAC_HW_v2_1_2) + } else if (ethqos->emac_ver == EMAC_HW_v2_1_2) { rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY, 52, SDCC_HC_REG_DDR_CONFIG); - else - rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY, - 57, SDCC_HC_REG_DDR_CONFIG); + } else { + if (!ethqos->io_macro.rx_dll_bypass) + rgmii_updatel(ethqos, + SDCC_DDR_CONFIG_PRG_RCLK_DLY, + 57, SDCC_HC_REG_DDR_CONFIG); + } + rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN, SDCC_DDR_CONFIG_PRG_DLY_EN, SDCC_HC_REG_DDR_CONFIG); @@ -770,8 +776,7 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos) BIT(6), RGMII_IO_MACRO_CONFIG); rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, 0, RGMII_IO_MACRO_CONFIG2); - if (ethqos->emac_ver == EMAC_HW_v2_3_2 || - ethqos->emac_ver == EMAC_HW_v2_1_2) + if (ethqos->io_macro.rx_prog_swap) rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, RGMII_CONFIG2_RX_PROG_SWAP, RGMII_IO_MACRO_CONFIG2); @@ -825,8 +830,7 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos) RGMII_IO_MACRO_CONFIG); rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, 0, RGMII_IO_MACRO_CONFIG2); - if (ethqos->emac_ver == EMAC_HW_v2_3_2 || - ethqos->emac_ver == EMAC_HW_v2_1_2) + if (ethqos->io_macro.rx_prog_swap) rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, RGMII_CONFIG2_RX_PROG_SWAP, RGMII_IO_MACRO_CONFIG2); @@ -890,33 +894,56 @@ static int ethqos_configure(struct qcom_ethqos *ethqos) SDCC_HC_REG_DLL_CONFIG); if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) { - /* Set DLL_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN, - SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG); + if (ethqos->io_macro.rx_dll_bypass) { + /* Set DLL_CLOCK_DISABLE */ + rgmii_updatel(ethqos, + SDCC_DLL_CONFIG2_DLL_CLOCK_DIS, + SDCC_DLL_CONFIG2_DLL_CLOCK_DIS, + SDCC_HC_REG_DLL_CONFIG2); - /* Set CK_OUT_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, - SDCC_DLL_CONFIG_CK_OUT_EN, - SDCC_HC_REG_DLL_CONFIG); + /* Clear DLL_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN, + 0, SDCC_HC_REG_DLL_CONFIG); - /* Set USR_CTL bit 26 with mask of 3 bits */ - rgmii_updatel(ethqos, GENMASK(26, 24), BIT(26), SDCC_USR_CTL); + /* Set PDN */ + rgmii_updatel(ethqos, + SDCC_DLL_CONFIG_PDN, + SDCC_DLL_CONFIG_PDN, + SDCC_HC_REG_DLL_CONFIG); - /* wait for DLL LOCK */ - do { - mdelay(1); - dll_lock = rgmii_readl(ethqos, SDC4_STATUS); - if (dll_lock & SDC4_STATUS_DLL_LOCK) - break; - retry--; - } while (retry > 0); - if (!retry) - dev_err(ðqos->pdev->dev, - "Timeout while waiting for DLL lock\n"); - } + /* Set USR_CTL bit 30 */ + rgmii_updatel(ethqos, BIT(30), BIT(30), SDCC_USR_CTL); + } else { + /* Set DLL_EN */ + rgmii_updatel(ethqos, + SDCC_DLL_CONFIG_DLL_EN, + SDCC_DLL_CONFIG_DLL_EN, + SDCC_HC_REG_DLL_CONFIG); + + /* Set CK_OUT_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, + SDCC_DLL_CONFIG_CK_OUT_EN, + SDCC_HC_REG_DLL_CONFIG); + + /* Set USR_CTL bit 26 with mask of 3 bits */ + rgmii_updatel(ethqos, GENMASK(26, 24), BIT(26), + SDCC_USR_CTL); + + /* wait for DLL LOCK */ + do { + mdelay(1); + dll_lock = rgmii_readl(ethqos, SDC4_STATUS); + if (dll_lock & SDC4_STATUS_DLL_LOCK) + break; + retry--; + } while (retry > 0); + if (!retry) + dev_err(ðqos->pdev->dev, + "Timeout while waiting for DLL lock\n"); + } - if (ethqos->speed == SPEED_1000) ethqos_dll_configure(ethqos); + } ethqos_rgmii_macro_init(ethqos); @@ -2330,6 +2357,7 @@ bool qcom_ethqos_ipa_enabled(void) static int qcom_ethqos_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; + struct device_node *io_macro_node = NULL; struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; struct qcom_ethqos *ethqos; @@ -2475,6 +2503,24 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ETHQOSINFO("emac-phy-off-suspend = %d\n", ethqos->current_phy_mode); + io_macro_node = of_find_node_by_name(pdev->dev.of_node, + "io-macro-info"); + + if (ethqos->emac_ver == EMAC_HW_v2_3_2 || + ethqos->emac_ver == EMAC_HW_v2_1_2) { + ethqos->io_macro.rx_prog_swap = 1; + } else if (!io_macro_node) { + ethqos->io_macro.rx_prog_swap = 0; + } else { + if (of_property_read_bool(io_macro_node, "rx-prog-swap")) + ethqos->io_macro.rx_prog_swap = 1; + } + + if (io_macro_node) { + if (of_property_read_bool(io_macro_node, "rx-dll-bypass")) + ethqos->io_macro.rx_dll_bypass = 1; + } + ethqos->ioaddr = (&stmmac_res)->addr; ethqos_update_rgmii_tx_drv_strength(ethqos); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h index e275873aa250..4c6c8f5143f3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h @@ -408,6 +408,11 @@ struct ethqos_emac_driver_data { unsigned int num_por; }; +struct ethqos_io_macro { + bool rx_prog_swap; + bool rx_dll_bypass; +}; + struct qcom_ethqos { struct platform_device *pdev; void __iomem *rgmii_base; @@ -495,6 +500,9 @@ struct qcom_ethqos { int backup_suspend_speed; u32 backup_bmcr; unsigned backup_autoneg:1; + + /* IO Macro parameters */ + struct ethqos_io_macro io_macro; }; struct pps_cfg { From 8aee7c9bba213a34e23e41eddd4aadfb2da0f411 Mon Sep 17 00:00:00 2001 From: Lijuan Gao Date: Fri, 3 Jul 2020 15:13:33 +0800 Subject: [PATCH 023/141] lkdtm: Correct the size value for WRITE_KERN Correct the value of size, WRITE_KERN will not work as expected if the do_overwritten is less than do_nothing. Change-Id: I2c69c62829cd55f280efd63763316b91691c0620 Signed-off-by: Lijuan Gao --- drivers/misc/lkdtm_perms.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c index 62f76d506f04..e859d94fd903 100644 --- a/drivers/misc/lkdtm_perms.c +++ b/drivers/misc/lkdtm_perms.c @@ -109,7 +109,12 @@ void lkdtm_WRITE_KERN(void) size_t size; unsigned char *ptr; - size = (unsigned long)do_overwritten - (unsigned long)do_nothing; + if ((unsigned long)do_overwritten < (unsigned long)do_nothing) + size = (unsigned long)do_nothing - + (unsigned long)do_overwritten; + else + size = (unsigned long)do_overwritten - + (unsigned long)do_nothing; ptr = (unsigned char *)do_overwritten; pr_info("attempting bad %zu byte write at %px\n", size, ptr); From b19a9f80f99273d9e0ba83e356a90989c87594a9 Mon Sep 17 00:00:00 2001 From: Suraj Jaiswal Date: Wed, 12 Aug 2020 12:14:51 +0530 Subject: [PATCH 024/141] net: stmmac: FR60005 unused data cleanup Remove unused header files & API. Change-Id: I7105753ec899d7c5ce51eae9df0bb421694f17cb Signed-off-by: Suraj Jaiswal --- .../stmicro/stmmac/dwmac-qcom-ethqos.c | 91 ++++++++----------- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 - 2 files changed, 39 insertions(+), 53 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index b6c8996c99b5..e6263cea2acb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -23,10 +23,6 @@ #include #include #include -#include -#include -#include -#include #include "stmmac.h" #include "stmmac_platform.h" #include "dwmac-qcom-ethqos.h" @@ -44,7 +40,6 @@ static int phy_digital_loopback_config( struct qcom_ethqos *ethqos, int speed, int config); bool phy_intr_en; -static char buf[2000]; static struct ethqos_emac_por emac_por[] = { { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x0 }, @@ -970,44 +965,6 @@ static int ethqos_mdio_read(struct stmmac_priv *priv, int phyaddr, int phyreg) return data; } -static int ethqos_mdio_write(struct stmmac_priv *priv, int phyaddr, int phyreg, - u16 phydata) -{ - unsigned int mii_address = priv->hw->mii.addr; - unsigned int mii_data = priv->hw->mii.data; - u32 v; - u32 value = MII_BUSY; - struct qcom_ethqos *ethqos = priv->plat->bsp_priv; - - if (ethqos->phy_state == PHY_IS_OFF) { - ETHQOSINFO("Phy is in off state writing is not possible\n"); - return -EOPNOTSUPP; - } - value |= (phyaddr << priv->hw->mii.addr_shift) - & priv->hw->mii.addr_mask; - value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; - - value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) - & priv->hw->mii.clk_csr_mask; - if (priv->plat->has_gmac4) - value |= MII_GMAC4_WRITE; - else - value |= MII_WRITE; - - /* Wait until any existing MII operation is complete */ - if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) - return -EBUSY; - - /* Set the MII address register to write */ - writel_relaxed(phydata, priv->ioaddr + mii_data); - writel_relaxed(value, priv->ioaddr + mii_address); - - /* Wait until any existing MII operation is complete */ - return readl_poll_timeout(priv->ioaddr + mii_address, v, - !(v & MII_BUSY), 100, 10000); -} - static int ethqos_phy_intr_config(struct qcom_ethqos *ethqos) { int ret = 0; @@ -1269,8 +1226,18 @@ static ssize_t read_phy_off(struct file *file, size_t count, loff_t *ppos) { unsigned int len = 0, buf_len = 2000; + char *buf; + ssize_t ret_cnt; struct qcom_ethqos *ethqos = file->private_data; + if (!ethqos) { + ETHQOSERR("NULL Pointer\n"); + return -EINVAL; + } + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (ethqos->current_phy_mode == DISABLE_PHY_IMMEDIATELY) len += scnprintf(buf + len, buf_len - len, "Disable phy immediately enabled\n"); @@ -1295,10 +1262,14 @@ static ssize_t read_phy_off(struct file *file, len += scnprintf(buf + len, buf_len - len, "Invalid Phy State\n"); - if (len > buf_len) + if (len > buf_len) { + ETHQOSERR("(len > buf_len) buffer not sufficient\n"); len = buf_len; + } - return simple_read_from_buffer(user_buf, count, ppos, buf, len); + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret_cnt; } static ssize_t phy_off_config( @@ -1436,9 +1407,12 @@ static int phy_digital_loopback_config( return -EINVAL; } if (phydata != 0) { - ethqos_mdio_write( - priv, priv->plat->phy_addr, MII_BMCR, phydata); - ETHQOSINFO("write done for phy loopback\n"); + if (priv->phydev) { + phy_write(priv->phydev, MII_BMCR, phydata); + ETHQOSINFO("write done for phy loopback\n"); + } else { + ETHQOSINFO("Phy dev is NULL\n"); + } } return 0; } @@ -1694,6 +1668,16 @@ static ssize_t read_loopback_config(struct file *file, { unsigned int len = 0, buf_len = 2000; struct qcom_ethqos *ethqos = file->private_data; + char *buf; + ssize_t ret_cnt; + + if (!ethqos) { + ETHQOSERR("NULL Pointer\n"); + return -EINVAL; + } + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; if (ethqos->current_loopback == DISABLE_LOOPBACK) len += scnprintf(buf + len, buf_len - len, @@ -1713,7 +1697,9 @@ static ssize_t read_loopback_config(struct file *file, if (len > buf_len) len = buf_len; - return simple_read_from_buffer(user_buf, count, ppos, buf, len); + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret_cnt; } static const struct file_operations fops_phy_reg_dump = { @@ -2662,9 +2648,10 @@ static int qcom_ethqos_resume(struct device *dev) ethqos_reset_phy_enable_interrupt(ethqos); if (ethqos->backup_autoneg == AUTONEG_DISABLE) { priv->phydev->autoneg = ethqos->backup_autoneg; - ethqos_mdio_write( - priv, priv->plat->phy_addr, - MII_BMCR, ethqos->backup_bmcr); + if (priv->phydev) + phy_write(priv->phydev, MII_BMCR, ethqos->backup_bmcr); + } else { + ETHQOSINFO("Phy dev is NULL\n"); } } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 52cdaa9c0c02..a3a66fcbc1a2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -35,7 +35,6 @@ #include "dwmac-qcom-ipa-offload.h" #include #include -#include #include struct stmmac_resources { From 9c90ca43065b226ebda5a4dc1490fa4d381ec5a4 Mon Sep 17 00:00:00 2001 From: Ajay Agarwal Date: Thu, 6 Aug 2020 17:34:56 +0530 Subject: [PATCH 025/141] usb: gadget: u_ether: Add skb check in eth_start_xmit 'Commit 145c3d9be206 ("usb: u_ether: Prevent dropping multicast packet in rmnet ip mode")' removed skb check from eth_start_xmit function. Whike ndo_start_xmit are typically expected never to be called with NULL SKBs, for NCM function this xmit function is called with NULL SKB initially which then uses the 'wrap' routine to create valid SKBs. But this null SKB is getting dereferenced before 'wrap' routine could run on it when in ethernet mode. Fix this by bringing in the skb check. Change-Id: I9d40f70190eb91ecc848b71c4ce31a656c60a28d Signed-off-by: Ajay Agarwal --- drivers/usb/gadget/function/u_ether.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 214fad87ce05..fc5f05a4b5ab 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -798,13 +798,13 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, } spin_unlock_irqrestore(&dev->lock, flags); - if (!in) { + if (skb && !in) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* apply outgoing CDC or RNDIS filters */ - if (!test_bit(RMNET_MODE_LLP_IP, &dev->flags) && + if (skb && !test_bit(RMNET_MODE_LLP_IP, &dev->flags) && !is_promisc(cdc_filter)) { u8 *dest = skb->data; @@ -872,16 +872,17 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, if (dev->wrap) { if (dev->port_usb) skb = dev->wrap(dev->port_usb, skb); - if (!skb) { - spin_unlock_irqrestore(&dev->lock, flags); - /* Multi frame CDC protocols may store the frame for - * later which is not a dropped frame. - */ - if (dev->port_usb && - dev->port_usb->supports_multi_frame) - goto multiframe; - goto drop; - } + } + + if (!skb) { + spin_unlock_irqrestore(&dev->lock, flags); + /* Multi frame CDC protocols may store the frame for + * later which is not a dropped frame. + */ + if (dev->port_usb && + dev->port_usb->supports_multi_frame) + goto multiframe; + goto drop; } dev->tx_skb_hold_count++; From 8e39e84eb7ee5f5dfe6f818de3801c8b93ad853c Mon Sep 17 00:00:00 2001 From: Sunil Paidimarri Date: Tue, 4 Aug 2020 18:12:46 -0700 Subject: [PATCH 026/141] net: stmmac: Fix the ioctl case for timestamping Add a break for timestamping case. Change-Id: I7899b44729b1681c58fa128523790cde6caaeb7c Acked-by: Rahul Kawadgave Signed-off-by: Sunil Paidimarri --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8a1a05b60e97..b4c12a7f4959 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4003,6 +4003,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) break; case SIOCSHWTSTAMP: ret = stmmac_hwtstamp_ioctl(dev, rq); + break; case SIOCDEVPRIVATE: ret = ethqos_handle_prv_ioctl(dev, rq, cmd); break; From 5bf9dfa8348ef954bcec5fff4d2dd4013cb63b8d Mon Sep 17 00:00:00 2001 From: Chunhuan Zhan Date: Tue, 11 Aug 2020 10:11:05 +0800 Subject: [PATCH 027/141] msm: ais: change the buffer SOF timestamp match When IFE RDI WM HW list becomes empty, the below situaltion will happen: 1. queue buffer 0 when last frame done, queue SOF0; 2. queue SOF1; 3. buffer 0 frame done. step3 will use the SOF0 as its SOF timestamp, it is incorrect. Change the SOF timestamp match method: 1. when queue buffer 0 to WM HW, get the SOF HW timestamp 2. when framedone, traverse sof_q, if the sof <= queue_sof_ts, mismatch, discard; else match Change-Id: I0b2bfbd5b9148aec97476bf87bdfb5fffee5bbdd Signed-off-by: Chunhuan Zhan --- .../msm/ais/ais_isp/vfe_hw/ais_vfe_core.c | 57 ++++++++++++++++--- .../msm/ais/ais_isp/vfe_hw/ais_vfe_core.h | 1 + 2 files changed, 50 insertions(+), 8 deletions(-) diff --git a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.c b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.c index b8a1909520ec..b0897259abf9 100644 --- a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.c +++ b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.c @@ -684,6 +684,7 @@ static void ais_vfe_q_bufs_to_hw(struct ais_vfe_hw_core_info *core_info, struct ais_vfe_bus_ver2_reg_offset_bus_client *client_regs = NULL; uint32_t fifo_status = 0; bool is_full = false; + struct ais_ife_rdi_get_timestamp_args get_ts; rdi_path = &core_info->rdi_out[path]; bus_hw_info = core_info->vfe_hw_info->bus_hw_info; @@ -702,10 +703,19 @@ static void ais_vfe_q_bufs_to_hw(struct ais_vfe_hw_core_info *core_info, struct ais_vfe_buffer_t, list); list_del_init(&vfe_buf->list); - CAM_DBG(CAM_ISP, "IFE%d|RDI%d: Q %d(0x%x) FIFO:%d", + get_ts.path = path; + get_ts.ts = &vfe_buf->ts_hw; + core_info->csid_hw->hw_ops.process_cmd( + core_info->csid_hw->hw_priv, + AIS_IFE_CSID_CMD_GET_TIME_STAMP, + &get_ts, + sizeof(get_ts)); + + + CAM_DBG(CAM_ISP, "IFE%d|RDI%d: Q %d(0x%x) FIFO:%d ts %llu", core_info->vfe_idx, path, vfe_buf->bufIdx, vfe_buf->iova_addr, - rdi_path->num_buffer_hw_q); + rdi_path->num_buffer_hw_q, vfe_buf->ts_hw.cur_sof_ts); cam_io_w_mb(vfe_buf->iova_addr, core_info->mem_base + client_regs->image_addr); @@ -713,6 +723,7 @@ static void ais_vfe_q_bufs_to_hw(struct ais_vfe_hw_core_info *core_info, list_add_tail(&vfe_buf->list, &rdi_path->buffer_hw_q); ++rdi_path->num_buffer_hw_q; + fifo_status = cam_io_r_mb(core_info->mem_base + bus_hw_info->common_reg.addr_fifo_status); is_full = fifo_status & (1 << path); @@ -901,9 +912,10 @@ static int ais_vfe_q_sof(struct ais_vfe_hw_core_info *core_info, } else { rc = -1; - CAM_ERR(CAM_ISP, "I%d|R%d|F%llu: free timestamp empty (%d)", + CAM_ERR(CAM_ISP, + "I%d|R%d|F%llu: free timestamp empty (%d) sof %llu", core_info->vfe_idx, path, p_sof->frame_cnt, - p_rdi->num_buffer_hw_q); + p_rdi->num_buffer_hw_q, p_sof->cur_sof_hw_ts); } return rc; @@ -943,6 +955,13 @@ static void ais_vfe_handle_sof_rdi(struct ais_vfe_hw_core_info *core_info, prev_sof_hw_ts, p_rdi->last_sof_info.cur_sof_hw_ts, ts_delta); + + CAM_DBG(CAM_ISP, + "I%d R%d miss_sof %u prev %llu last %llu cur %llu", + core_info->vfe_idx, path, + miss_sof, prev_sof_hw_ts, + p_rdi->last_sof_info.cur_sof_hw_ts, + cur_sof_hw_ts); } } @@ -1140,6 +1159,7 @@ static void ais_vfe_bus_handle_client_frame_done( uint64_t cur_sof_hw_ts; bool last_addr_match = false; + CAM_DBG(CAM_ISP, "I%d|R%d last_addr 0x%x", core_info->vfe_idx, path, last_addr); @@ -1157,6 +1177,7 @@ static void ais_vfe_bus_handle_client_frame_done( while (rdi_path->num_buffer_hw_q && !last_addr_match) { struct ais_sof_info_t *p_sof_info = NULL; + bool is_sof_match = false; if (list_empty(&rdi_path->buffer_hw_q)) { CAM_DBG(CAM_ISP, "I%d|R%d: FD while HW Q empty", @@ -1181,10 +1202,29 @@ static void ais_vfe_bus_handle_client_frame_done( rdi_path->num_buffer_hw_q, last_addr); if (!list_empty(&rdi_path->sof_info_q)) { - p_sof_info = list_first_entry(&rdi_path->sof_info_q, - struct ais_sof_info_t, list); - list_del_init(&p_sof_info->list); - rdi_path->num_sof_info_q--; + while (!is_sof_match && + !list_empty(&rdi_path->sof_info_q)) { + p_sof_info = + list_first_entry(&rdi_path->sof_info_q, + struct ais_sof_info_t, list); + list_del_init(&p_sof_info->list); + rdi_path->num_sof_info_q--; + if (p_sof_info->cur_sof_hw_ts > + vfe_buf->ts_hw.cur_sof_ts) { + is_sof_match = true; + break; + } + list_add_tail(&p_sof_info->list, + &rdi_path->free_sof_info_list); + } + + if (!is_sof_match) { + p_sof_info = NULL; + CAM_ERR(CAM_ISP, + "I%d|R%d: can't find the match sof", + core_info->vfe_idx, path); + } + } else CAM_ERR(CAM_ISP, "I%d|R%d: SOF info Q is empty", core_info->vfe_idx, path); @@ -1543,6 +1583,7 @@ irqreturn_t ais_vfe_irq(int irq_num, void *data) CAM_DBG(CAM_ISP, "IFE%d BUS_WR", core_info->vfe_idx); work_data.evt_type = AIS_VFE_HW_IRQ_EVENT_BUS_WR; ais_vfe_irq_fill_bus_wr_status(core_info, &work_data); + ais_vfe_dispatch_irq(vfe_hw, &work_data); } if (ife_status[1]) { diff --git a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.h b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.h index 3b5ee36182bc..ccd21f8d3794 100644 --- a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.h +++ b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.h @@ -82,6 +82,7 @@ struct ais_vfe_buffer_t { int32_t mem_handle; uint64_t iova_addr; uint32_t bufIdx; + struct ais_ife_rdi_timestamps ts_hw; }; struct ais_sof_info_t { From 49cf6b26af0bfd96219802d1cce55b0710943e0d Mon Sep 17 00:00:00 2001 From: Sreelakshmi Gownipalli Date: Wed, 7 Aug 2019 22:54:25 -0700 Subject: [PATCH 028/141] diag: Add usb events to a queue Add usb connect and disconnect events to a queue and process each event in work function so that all of the usb connect and disconnect events are processed without any miss. Change-Id: I2b5debef28d683f55a727e53e41c811419d2bf3f Signed-off-by: Sreelakshmi Gownipalli --- drivers/char/diag/diag_debugfs.c | 6 +- drivers/char/diag/diag_usb.c | 105 ++++++++++++++++++------------- drivers/char/diag/diag_usb.h | 10 ++- 3 files changed, 71 insertions(+), 50 deletions(-) diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c index 6910d5c81001..1d95dcb3fa12 100644 --- a/drivers/char/diag/diag_debugfs.c +++ b/drivers/char/diag/diag_debugfs.c @@ -445,8 +445,7 @@ static ssize_t diag_dbgfs_read_usbinfo(struct file *file, char __user *ubuf, "write count: %lu\n" "read work pending: %d\n" "read done work pending: %d\n" - "connect work pending: %d\n" - "disconnect work pending: %d\n" + "event work pending: %d\n" "max size supported: %d\n\n", usb_info->id, usb_info->name, @@ -460,8 +459,7 @@ static ssize_t diag_dbgfs_read_usbinfo(struct file *file, char __user *ubuf, usb_info->write_cnt, work_pending(&usb_info->read_work), work_pending(&usb_info->read_done_work), - work_pending(&usb_info->connect_work), - work_pending(&usb_info->disconnect_work), + work_pending(&usb_info->event_work), usb_info->max_size); bytes_in_buffer += bytes_written; diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c index 43cf043eb7d5..0b7a92e99d64 100644 --- a/drivers/char/diag/diag_usb.c +++ b/drivers/char/diag/diag_usb.c @@ -94,7 +94,29 @@ struct diag_usb_info diag_usb[NUM_DIAG_USB_DEV] = { } #endif }; +static int diag_usb_event_add(struct diag_usb_info *usb_info, int data) +{ + struct diag_usb_event_q *entry = NULL; + entry = kzalloc(sizeof(struct diag_usb_event_q), GFP_ATOMIC); + if (!entry) + return -ENOMEM; + + entry->data = data; + INIT_LIST_HEAD(&entry->link); + list_add_tail(&entry->link, &usb_info->event_q); + + return 0; +} +static void diag_usb_event_remove(struct diag_usb_event_q *entry) +{ + if (!entry) + return; + + list_del(&entry->link); + kfree(entry); + entry = NULL; +} static int diag_usb_buf_tbl_add(struct diag_usb_info *usb_info, unsigned char *buf, uint32_t len, int ctxt) { @@ -202,25 +224,6 @@ static void usb_connect(struct diag_usb_info *ch) queue_work(ch->usb_wq, &(ch->read_work)); } -static void usb_connect_work_fn(struct work_struct *work) -{ - struct diag_usb_info *ch = container_of(work, struct diag_usb_info, - connect_work); - - wait_event_interruptible(ch->wait_q, ch->enabled > 0); - ch->max_size = usb_diag_request_size(ch->hdl); - atomic_set(&ch->connected, 1); - - DIAG_LOG(DIAG_DEBUG_PERIPHERALS, - "diag: USB channel %s: disconnected_status: %d, connected_status: %d\n", - ch->name, atomic_read(&ch->disconnected), atomic_read(&ch->connected)); - - usb_connect(ch); - - if (atomic_read(&ch->disconnected)) - wake_up_interruptible(&ch->wait_q); -} - /* * This function is called asynchronously when USB is disconnected * and synchronously when Diag wants to disconnect from USB @@ -232,32 +235,48 @@ static void usb_disconnect(struct diag_usb_info *ch) ch->ops->close(ch->ctxt, DIAG_USB_MODE); } -static void usb_disconnect_work_fn(struct work_struct *work) +static void usb_event_work_fn(struct work_struct *work) { struct diag_usb_info *ch = container_of(work, struct diag_usb_info, - disconnect_work); + event_work); + struct diag_usb_event_q *entry = NULL; if (!ch) return; + entry = list_first_entry(&(ch->event_q), struct diag_usb_event_q, link); + if (!entry) + return; - atomic_set(&ch->disconnected, 1); - DIAG_LOG(DIAG_DEBUG_PERIPHERALS, - "diag: USB channel %s: disconnected_status: %d, connected_status: %d\n", - ch->name, atomic_read(&ch->disconnected), atomic_read(&ch->connected)); + switch (entry->data) { + case USB_DIAG_CONNECT: + wait_event_interruptible(ch->wait_q, ch->enabled > 0); + ch->max_size = usb_diag_request_size(ch->hdl); + atomic_set(&ch->connected, 1); - wait_event_interruptible(ch->wait_q, atomic_read(&ch->connected) > 0); - atomic_set(&ch->connected, 0); - atomic_set(&ch->disconnected, 0); - DIAG_LOG(DIAG_DEBUG_PERIPHERALS, - "diag: USB channel %s: Cleared disconnected(%d) and connected(%d) status\n", - ch->name, atomic_read(&ch->disconnected), atomic_read(&ch->connected)); + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: USB channel %s: connected_status: %d\n", + ch->name, atomic_read(&ch->connected)); - if (!atomic_read(&ch->connected) && - driver->usb_connected && diag_mask_param() && - ch->id == DIAG_USB_LOCAL) - diag_clear_masks(0); + usb_connect(ch); + break; + case USB_DIAG_DISCONNECT: + atomic_set(&ch->connected, 0); + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: USB channel %s: Cleared connected(%d) status\n", + ch->name, atomic_read(&ch->connected)); + + if (!atomic_read(&ch->connected) && + driver->usb_connected && + (ch->id == DIAG_USB_LOCAL) && diag_mask_param()) + diag_clear_masks(0); + + usb_disconnect(ch); + break; + } + diag_usb_event_remove(entry); + if (!list_empty(&ch->event_q)) + queue_work(ch->usb_wq, &(ch->event_work)); - usb_disconnect(ch); } static void usb_read_work_fn(struct work_struct *work) @@ -386,15 +405,16 @@ static void diag_usb_notifier(void *priv, unsigned int event, case USB_DIAG_CONNECT: pr_info("diag: USB channel %s: Received Connect event\n", usb_info->name); - if (!atomic_read(&usb_info->connected)) - queue_work(usb_info->usb_wq, - &usb_info->connect_work); + diag_usb_event_add(usb_info, USB_DIAG_CONNECT); + queue_work(usb_info->usb_wq, + &usb_info->event_work); break; case USB_DIAG_DISCONNECT: pr_info("diag: USB channel %s: Received Disconnect event\n", usb_info->name); + diag_usb_event_add(usb_info, USB_DIAG_DISCONNECT); queue_work(usb_info->usb_wq, - &usb_info->disconnect_work); + &usb_info->event_work); break; case USB_DIAG_READ_DONE: spin_lock_irqsave(&usb_info->lock, flags); @@ -672,7 +692,6 @@ int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops) if (!ch->read_ptr) goto err; atomic_set(&ch->connected, 0); - atomic_set(&ch->disconnected, 0); atomic_set(&ch->read_pending, 0); /* * This function is called when the mux registers with Diag-USB. @@ -681,11 +700,11 @@ int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops) */ atomic_set(&ch->diag_state, 1); INIT_LIST_HEAD(&ch->buf_tbl); + INIT_LIST_HEAD(&ch->event_q); diagmem_init(driver, ch->mempool); INIT_WORK(&(ch->read_work), usb_read_work_fn); INIT_WORK(&(ch->read_done_work), usb_read_done_work_fn); - INIT_WORK(&(ch->connect_work), usb_connect_work_fn); - INIT_WORK(&(ch->disconnect_work), usb_disconnect_work_fn); + INIT_WORK(&(ch->event_work), usb_event_work_fn); init_waitqueue_head(&ch->wait_q); strlcpy(wq_name, "DIAG_USB_", sizeof(wq_name)); strlcat(wq_name, ch->name, sizeof(wq_name)); diff --git a/drivers/char/diag/diag_usb.h b/drivers/char/diag/diag_usb.h index 95ef5ed58774..16417a932aee 100644 --- a/drivers/char/diag/diag_usb.h +++ b/drivers/char/diag/diag_usb.h @@ -46,6 +46,11 @@ struct diag_usb_buf_tbl_t { int ctxt; }; +struct diag_usb_event_q { + struct list_head link; + int data; +}; + struct diag_usb_info { int id; int ctxt; @@ -53,7 +58,6 @@ struct diag_usb_info { atomic_t connected; atomic_t diag_state; atomic_t read_pending; - atomic_t disconnected; int enabled; int mempool; int max_size; @@ -68,10 +72,10 @@ struct diag_usb_info { struct diag_request *read_ptr; struct work_struct read_work; struct work_struct read_done_work; - struct work_struct connect_work; - struct work_struct disconnect_work; + struct work_struct event_work; struct workqueue_struct *usb_wq; wait_queue_head_t wait_q; + struct list_head event_q; }; #ifdef CONFIG_DIAG_OVER_USB From 68c3e55787cf4ad0baaddafbdf515af2b52ac345 Mon Sep 17 00:00:00 2001 From: Manoj Prabhu B Date: Fri, 27 Dec 2019 16:18:45 +0530 Subject: [PATCH 029/141] diag: Synchronize USB notifications handling event queues Possible race conditions while handling USB notifications in event queues is prevented using spinlock to synchronize the list access. Change-Id: I91bff76b6134e600b0a7091d9576089226b1629c Signed-off-by: Manoj Prabhu B --- drivers/char/diag/diag_usb.c | 33 ++++++++++++++++++++++++++------- drivers/char/diag/diag_usb.h | 3 ++- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c index 0b7a92e99d64..46e4c5692c16 100644 --- a/drivers/char/diag/diag_usb.c +++ b/drivers/char/diag/diag_usb.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -240,15 +240,23 @@ static void usb_event_work_fn(struct work_struct *work) struct diag_usb_info *ch = container_of(work, struct diag_usb_info, event_work); struct diag_usb_event_q *entry = NULL; + unsigned long flags; if (!ch) return; + spin_lock_irqsave(&ch->event_lock, flags); entry = list_first_entry(&(ch->event_q), struct diag_usb_event_q, link); - if (!entry) + if (!entry) { + spin_unlock_irqrestore(&ch->event_lock, flags); return; + } switch (entry->data) { case USB_DIAG_CONNECT: + + diag_usb_event_remove(entry); + spin_unlock_irqrestore(&ch->event_lock, flags); + wait_event_interruptible(ch->wait_q, ch->enabled > 0); ch->max_size = usb_diag_request_size(ch->hdl); atomic_set(&ch->connected, 1); @@ -260,10 +268,14 @@ static void usb_event_work_fn(struct work_struct *work) usb_connect(ch); break; case USB_DIAG_DISCONNECT: + + diag_usb_event_remove(entry); + spin_unlock_irqrestore(&ch->event_lock, flags); + atomic_set(&ch->connected, 0); DIAG_LOG(DIAG_DEBUG_PERIPHERALS, - "diag: USB channel %s: Cleared connected(%d) status\n", - ch->name, atomic_read(&ch->connected)); + "diag: USB channel %s: Cleared connected(%d) status\n", + ch->name, atomic_read(&ch->connected)); if (!atomic_read(&ch->connected) && driver->usb_connected && @@ -272,8 +284,11 @@ static void usb_event_work_fn(struct work_struct *work) usb_disconnect(ch); break; + default: + spin_unlock_irqrestore(&ch->event_lock, flags); + break; } - diag_usb_event_remove(entry); + if (!list_empty(&ch->event_q)) queue_work(ch->usb_wq, &(ch->event_work)); @@ -371,8 +386,7 @@ static void diag_usb_write_done(struct diag_usb_info *ch, spin_unlock_irqrestore(&ch->write_lock, flags); return; } - DIAG_LOG(DIAG_DEBUG_MUX, "full write_done, ctxt: %d\n", - ctxt); + DIAG_LOG(DIAG_DEBUG_MUX, "full write_done\n"); list_del(&entry->track); ctxt = entry->ctxt; buf = entry->buf; @@ -405,14 +419,18 @@ static void diag_usb_notifier(void *priv, unsigned int event, case USB_DIAG_CONNECT: pr_info("diag: USB channel %s: Received Connect event\n", usb_info->name); + spin_lock_irqsave(&usb_info->event_lock, flags); diag_usb_event_add(usb_info, USB_DIAG_CONNECT); + spin_unlock_irqrestore(&usb_info->event_lock, flags); queue_work(usb_info->usb_wq, &usb_info->event_work); break; case USB_DIAG_DISCONNECT: pr_info("diag: USB channel %s: Received Disconnect event\n", usb_info->name); + spin_lock_irqsave(&usb_info->event_lock, flags); diag_usb_event_add(usb_info, USB_DIAG_DISCONNECT); + spin_unlock_irqrestore(&usb_info->event_lock, flags); queue_work(usb_info->usb_wq, &usb_info->event_work); break; @@ -685,6 +703,7 @@ int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops) ch->ctxt = ctxt; spin_lock_init(&ch->lock); spin_lock_init(&ch->write_lock); + spin_lock_init(&ch->event_lock); ch->read_buf = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL); if (!ch->read_buf) goto err; diff --git a/drivers/char/diag/diag_usb.h b/drivers/char/diag/diag_usb.h index 16417a932aee..abebb0c309aa 100644 --- a/drivers/char/diag/diag_usb.h +++ b/drivers/char/diag/diag_usb.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -66,6 +66,7 @@ struct diag_usb_info { unsigned long write_cnt; spinlock_t lock; spinlock_t write_lock; + spinlock_t event_lock; struct usb_diag_ch *hdl; struct diag_mux_ops *ops; unsigned char *read_buf; From de4b3858f6d95938a306645d184f07220fe2dad5 Mon Sep 17 00:00:00 2001 From: Rohith Kollalsi Date: Fri, 7 Aug 2020 15:25:46 +0530 Subject: [PATCH 030/141] usb: f_gsi: Implement remote wakeup feature for gsi for bus suspend Earlier remote wakeup feature for gsi was only available for function suspend. This change adds remote wakeup feature even for bus suspend. The entire implementation of remote wakeup for gsi is now moved to gsi driver from func_ep_queue present in composite. Change-Id: I773ea038f1ee7c081c50388db3f3d397fa7e28ba Signed-off-by: Rohith Kollalsi --- drivers/usb/gadget/function/f_gsi.c | 40 +++++++++++++++++++++++++++-- drivers/usb/gadget/function/f_gsi.h | 1 + 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index 709052bf9e53..ad88ebd61285 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -1858,9 +1858,41 @@ static int queue_notification_request(struct f_gsi *gsi) { int ret; unsigned long flags; + struct usb_function *func = &gsi->function; + struct usb_request *req = gsi->c_port.notify_req; + struct usb_ep *ep = gsi->c_port.notify; + struct usb_gadget *gadget = func->config->cdev->gadget; - ret = usb_func_ep_queue(&gsi->function, gsi->c_port.notify, - gsi->c_port.notify_req, GFP_ATOMIC); + if (gsi->c_port.is_suspended) { + /*For remote wakeup, queue the req from gsi_resume*/ + spin_lock_irqsave(&gsi->c_port.lock, flags); + gsi->c_port.notify_req_queued = false; + spin_unlock_irqrestore(&gsi->c_port.lock, flags); + + if (gsi->rwake_inprogress) { + log_event_dbg("%s remote-wakeup in progress\n", + __func__); + return -EBUSY; + } + + if (!usb_gsi_remote_wakeup_allowed(func)) { + log_event_dbg("%s remote-wakeup not capable\n", + __func__); + return -EOPNOTSUPP; + } + + log_event_dbg("%s wakeup host\n", __func__); + if (gadget->speed >= USB_SPEED_SUPER + && func->func_is_suspended) + ret = usb_func_wakeup(func); + else + ret = usb_gadget_wakeup(gadget); + + gsi->rwake_inprogress = true; + return ret; + } + + ret = usb_ep_queue(ep, req, GFP_ATOMIC); if (ret < 0) { spin_lock_irqsave(&gsi->c_port.lock, flags); gsi->c_port.notify_req_queued = false; @@ -2493,6 +2525,7 @@ static int gsi_set_alt(struct usb_function *f, unsigned int intf, gsi->data_id, gsi->data_interface_up); } + gsi->c_port.is_suspended = false; atomic_set(&gsi->connected, 1); /* send 0 len pkt to qti to notify state change */ @@ -2593,6 +2626,7 @@ static void gsi_suspend(struct usb_function *f) return; } + gsi->c_port.is_suspended = true; block_db = true; usb_gsi_ep_op(gsi->d_port.in_ep, (void *)&block_db, GSI_EP_OP_SET_CLR_BLOCK_DBL); @@ -2624,6 +2658,8 @@ static void gsi_resume(struct usb_function *f) if (gsi->c_port.notify && !gsi->c_port.notify->desc) config_ep_by_speed(cdev->gadget, f, gsi->c_port.notify); + gsi->c_port.is_suspended = false; + /* Check any pending cpkt, and queue immediately on resume */ gsi_ctrl_send_notification(gsi); diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h index 0db1f0ac381d..7e9da0a189fa 100644 --- a/drivers/usb/gadget/function/f_gsi.h +++ b/drivers/usb/gadget/function/f_gsi.h @@ -209,6 +209,7 @@ struct gsi_ctrl_port { atomic_t ctrl_online; bool is_open; + bool is_suspended; wait_queue_head_t read_wq; From 158bac1447b3342587ce59fdcc15b26a6ad93e25 Mon Sep 17 00:00:00 2001 From: Trinath Thammishetty Date: Thu, 13 Aug 2020 20:53:59 +0530 Subject: [PATCH 031/141] ARM: dts: add audio device tree for sda429 Add audio device tree for sda429. Change-Id: Id3fd3a0ddfb2abdab22eb3934e9cfd3fc5b52092 Signed-off-by: Trinath Thammishetty --- arch/arm64/boot/dts/qcom/sda429-wdp-overlay.dts | 1 + arch/arm64/boot/dts/qcom/sda429-wtp-overlay.dts | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sda429-wdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda429-wdp-overlay.dts index 5e1ba25527ff..62d2a33ec20e 100644 --- a/arch/arm64/boot/dts/qcom/sda429-wdp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sda429-wdp-overlay.dts @@ -15,6 +15,7 @@ /plugin/; #include "sda429-wdp.dtsi" +#include "sdm429-spyro-qrd-evt-audio.dtsi" / { model = "Qualcomm Technologies, Inc. SDA429 QRD BG WDP Overlay"; diff --git a/arch/arm64/boot/dts/qcom/sda429-wtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda429-wtp-overlay.dts index aa6d7612dd96..8b141c392fdf 100644 --- a/arch/arm64/boot/dts/qcom/sda429-wtp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sda429-wtp-overlay.dts @@ -17,6 +17,7 @@ #include #include "sda429-wtp.dtsi" #include "sdm429-mdss-panels.dtsi" +#include "sdm429-spyro-qrd-evt-audio.dtsi" / { model = "Qualcomm Technologies, Inc. SDA429 QRD BG WTP Overlay"; From a14968b83328756fb4bd84f10cdbbd3a8569cf8a Mon Sep 17 00:00:00 2001 From: Yadu MG Date: Thu, 13 Aug 2020 12:00:02 +0530 Subject: [PATCH 032/141] ARM: dts: msm: Disable cti apps node for sa8155 commit 0a34474c492f ("coresight: cti: Add sys interface to show max trigger number") introduces a boot regression for sa8155 while reading devid cti register in the cti_probe. Disable the cti apps node temporarily until the issue is properly fixed. Change-Id: I7a43a4b529963407997b12cd80c25b124c42e2e4 Signed-off-by: Yadu MG --- arch/arm64/boot/dts/qcom/sa8155.dtsi | 32 ++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sa8155.dtsi b/arch/arm64/boot/dts/qcom/sa8155.dtsi index 18f6d27a9d83..08d6d085f653 100644 --- a/arch/arm64/boot/dts/qcom/sa8155.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155.dtsi @@ -761,6 +761,38 @@ status="disabled"; }; +&cti_cpu0 { + status = "disabled"; +}; + +&cti_cpu1 { + status = "disabled"; +}; + +&cti_cpu2 { + status = "disabled"; +}; + +&cti_cpu3 { + status = "disabled"; +}; + +&cti_cpu4 { + status = "disabled"; +}; + +&cti_cpu5 { + status = "disabled"; +}; + +&cti_cpu6 { + status = "disabled"; +}; + +&cti_cpu7 { + status = "disabled"; +}; + #include "sa8155-audio.dtsi" #include "sa8155-camera.dtsi" #include "sa8155-camera-sensor.dtsi" From 7a42f09a94c614152495010bf18bc2d83ea3365a Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Fri, 7 Aug 2020 20:11:12 +0530 Subject: [PATCH 033/141] Remove Per File Key based hardware crypto framework Remove the Per File Key logic based inline crypto support for file encryption framework. Change-Id: I90071562ba5c41b9db470363edac35c9fe5e4efa Signed-off-by: Neeraj Soni --- .../configs/vendor/sdm429-bg-perf_defconfig | 5 - arch/arm/configs/vendor/sdm429-bg_defconfig | 5 - .../arm/configs/vendor/trinket-perf_defconfig | 5 - arch/arm/configs/vendor/trinket_defconfig | 5 - .../arm64/configs/vendor/atoll-perf_defconfig | 7 - arch/arm64/configs/vendor/atoll_defconfig | 7 - .../configs/vendor/gen3auto-capture_defconfig | 5 - .../configs/vendor/gen3auto-perf_defconfig | 5 - arch/arm64/configs/vendor/gen3auto_defconfig | 5 - .../configs/vendor/qcs403-perf_defconfig | 2 - arch/arm64/configs/vendor/qcs403_defconfig | 2 - .../configs/vendor/qcs405-perf_defconfig | 2 - arch/arm64/configs/vendor/qcs405_defconfig | 2 - .../vendor/qcs610-minimal-perf_defconfig | 6 - .../vendor/qti-quin-gvm-perf_defconfig | 5 - .../configs/vendor/qti-quin-gvm_defconfig | 5 - .../vendor/sa2150p-nand-perf_defconfig | 2 - .../configs/vendor/sa2150p-nand_defconfig | 2 - .../configs/vendor/sa2150p-perf_defconfig | 2 - arch/arm64/configs/vendor/sa2150p_defconfig | 2 - .../configs/vendor/sa8155-perf_defconfig | 5 - arch/arm64/configs/vendor/sa8155_defconfig | 5 - .../configs/vendor/sdm660-perf_defconfig | 7 - arch/arm64/configs/vendor/sdm660_defconfig | 7 - arch/arm64/configs/vendor/sdmshrike_defconfig | 5 - .../vendor/sdmsteppe-auto-perf_defconfig | 6 - .../configs/vendor/sdmsteppe-auto_defconfig | 6 - .../configs/vendor/sdmsteppe-perf_defconfig | 7 - arch/arm64/configs/vendor/sdmsteppe_defconfig | 7 - .../configs/vendor/sm8150-perf_defconfig | 7 - arch/arm64/configs/vendor/sm8150_defconfig | 7 - .../configs/vendor/trinket-perf_defconfig | 5 - arch/arm64/configs/vendor/trinket_defconfig | 5 - block/bio.c | 15 +- block/blk-core.c | 19 +- block/blk-merge.c | 27 +- block/elevator.c | 8 +- drivers/block/virtio_blk.c | 2 + drivers/crypto/Kconfig | 4 - drivers/crypto/msm/ice.c | 8 +- drivers/md/Kconfig | 18 - drivers/md/Makefile | 1 - drivers/md/dm-crypt.c | 17 +- drivers/md/dm-default-key.c | 306 ------ drivers/md/dm-table.c | 15 - drivers/misc/qseecom.c | 14 - drivers/mmc/core/queue.c | 4 - drivers/mmc/host/Kconfig | 11 - drivers/mmc/host/Makefile | 1 - drivers/mmc/host/cmdq_hci.c | 87 +- drivers/mmc/host/cmdq_hci.h | 12 +- drivers/mmc/host/sdhci-msm-ice.c | 587 ----------- drivers/mmc/host/sdhci-msm-ice.h | 173 ---- drivers/mmc/host/sdhci-msm.c | 157 +-- drivers/mmc/host/sdhci-msm.h | 11 +- drivers/mmc/host/sdhci.c | 130 --- drivers/mmc/host/sdhci.h | 9 - drivers/scsi/scsi_lib.c | 2 - drivers/scsi/ufs/Kconfig | 13 - drivers/scsi/ufs/Makefile | 1 - drivers/scsi/ufs/ufs-qcom-ice.c | 777 -------------- drivers/scsi/ufs/ufs-qcom-ice.h | 137 --- drivers/scsi/ufs/ufs-qcom.c | 181 +--- drivers/scsi/ufs/ufs-qcom.h | 25 +- drivers/scsi/ufs/ufshcd.c | 81 +- drivers/scsi/ufs/ufshcd.h | 83 -- fs/crypto/Makefile | 4 - fs/crypto/bio.c | 14 +- fs/crypto/fscrypt_ice.c | 190 ---- fs/crypto/fscrypt_ice.h | 99 -- fs/crypto/fscrypt_private.h | 9 +- fs/crypto/keysetup.c | 14 +- fs/crypto/keysetup_v1.c | 19 +- fs/direct-io.c | 41 - fs/ext4/Kconfig | 8 +- fs/ext4/ext4.h | 3 - fs/ext4/inode.c | 33 +- fs/ext4/move_extent.c | 11 +- fs/ext4/page-io.c | 37 +- fs/ext4/readpage.c | 3 +- fs/f2fs/data.c | 65 +- fs/f2fs/f2fs.h | 13 +- fs/namei.c | 10 - include/linux/bio.h | 8 - include/linux/blk_types.h | 19 +- include/linux/blkdev.h | 9 - include/linux/bvec.h | 3 - include/linux/fs.h | 2 - include/linux/fscrypt.h | 31 - include/linux/lsm_hooks.h | 3 - include/linux/mmc/core.h | 3 +- include/linux/pfk.h | 79 -- include/linux/security.h | 10 - include/scsi/scsi_host.h | 3 - security/Kconfig | 4 - security/Makefile | 2 - security/pfe/Kconfig | 50 - security/pfe/Makefile | 15 - security/pfe/pfk.c | 570 ----------- security/pfe/pfk_ext4.c | 212 ---- security/pfe/pfk_ext4.h | 37 - security/pfe/pfk_f2fs.c | 200 ---- security/pfe/pfk_f2fs.h | 37 - security/pfe/pfk_ice.c | 216 ---- security/pfe/pfk_ice.h | 34 - security/pfe/pfk_internal.h | 34 - security/pfe/pfk_kc.c | 951 ------------------ security/pfe/pfk_kc.h | 34 - security/security.c | 8 - security/selinux/include/objsec.h | 7 +- 110 files changed, 108 insertions(+), 6142 deletions(-) delete mode 100644 drivers/md/dm-default-key.c delete mode 100644 drivers/mmc/host/sdhci-msm-ice.c delete mode 100644 drivers/mmc/host/sdhci-msm-ice.h delete mode 100644 drivers/scsi/ufs/ufs-qcom-ice.c delete mode 100644 drivers/scsi/ufs/ufs-qcom-ice.h delete mode 100644 fs/crypto/fscrypt_ice.c delete mode 100644 fs/crypto/fscrypt_ice.h delete mode 100644 include/linux/pfk.h delete mode 100644 security/pfe/Kconfig delete mode 100644 security/pfe/Makefile delete mode 100644 security/pfe/pfk.c delete mode 100644 security/pfe/pfk_ext4.c delete mode 100644 security/pfe/pfk_ext4.h delete mode 100644 security/pfe/pfk_f2fs.c delete mode 100644 security/pfe/pfk_f2fs.h delete mode 100644 security/pfe/pfk_ice.c delete mode 100644 security/pfe/pfk_ice.h delete mode 100644 security/pfe/pfk_internal.h delete mode 100644 security/pfe/pfk_kc.c delete mode 100644 security/pfe/pfk_kc.h diff --git a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig index d1fdcb58461c..7bb95bea9784 100644 --- a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig @@ -266,11 +266,9 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -479,7 +477,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_RTC_CLASS=y @@ -638,7 +635,6 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -652,4 +648,3 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y diff --git a/arch/arm/configs/vendor/sdm429-bg_defconfig b/arch/arm/configs/vendor/sdm429-bg_defconfig index 23b7a19357f8..ff60e088faef 100644 --- a/arch/arm/configs/vendor/sdm429-bg_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg_defconfig @@ -272,12 +272,10 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -493,7 +491,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_EDAC=y @@ -713,7 +710,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -728,6 +724,5 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_CRC8=y CONFIG_XZ_DEC=y diff --git a/arch/arm/configs/vendor/trinket-perf_defconfig b/arch/arm/configs/vendor/trinket-perf_defconfig index bb27b3b2c7f9..d50f0e7098f1 100644 --- a/arch/arm/configs/vendor/trinket-perf_defconfig +++ b/arch/arm/configs/vendor/trinket-perf_defconfig @@ -265,11 +265,9 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -491,7 +489,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_HAPTICS=y @@ -652,7 +649,6 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -666,4 +662,3 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y diff --git a/arch/arm/configs/vendor/trinket_defconfig b/arch/arm/configs/vendor/trinket_defconfig index 815167c2471b..c941cc9f033c 100644 --- a/arch/arm/configs/vendor/trinket_defconfig +++ b/arch/arm/configs/vendor/trinket_defconfig @@ -273,12 +273,10 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -505,7 +503,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_HAPTICS=y @@ -732,7 +729,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -747,5 +743,4 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_XZ_DEC=y diff --git a/arch/arm64/configs/vendor/atoll-perf_defconfig b/arch/arm64/configs/vendor/atoll-perf_defconfig index 2b5babeda540..8703fffe9ba7 100644 --- a/arch/arm64/configs/vendor/atoll-perf_defconfig +++ b/arch/arm64/configs/vendor/atoll-perf_defconfig @@ -280,11 +280,9 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -509,7 +507,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -677,8 +674,6 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -720,7 +715,6 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -735,7 +729,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/atoll_defconfig b/arch/arm64/configs/vendor/atoll_defconfig index 38e428ac723f..35b0bb68edfe 100644 --- a/arch/arm64/configs/vendor/atoll_defconfig +++ b/arch/arm64/configs/vendor/atoll_defconfig @@ -290,12 +290,10 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -524,7 +522,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -707,8 +704,6 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -805,7 +800,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -821,7 +815,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/gen3auto-capture_defconfig b/arch/arm64/configs/vendor/gen3auto-capture_defconfig index e16a32cd060b..db99f7087d9e 100644 --- a/arch/arm64/configs/vendor/gen3auto-capture_defconfig +++ b/arch/arm64/configs/vendor/gen3auto-capture_defconfig @@ -289,7 +289,6 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -647,8 +646,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -722,7 +719,6 @@ CONFIG_TEST_USER_COPY=m CONFIG_MEMTEST=y CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -738,7 +734,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/gen3auto-perf_defconfig b/arch/arm64/configs/vendor/gen3auto-perf_defconfig index 21275ae593c8..186548b8bb69 100644 --- a/arch/arm64/configs/vendor/gen3auto-perf_defconfig +++ b/arch/arm64/configs/vendor/gen3auto-perf_defconfig @@ -280,7 +280,6 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_UEVENT=y @@ -628,8 +627,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -652,7 +649,6 @@ CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_IPC_LOGGING=y CONFIG_DEBUG_ALIGN_RODATA=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -667,7 +663,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/gen3auto_defconfig b/arch/arm64/configs/vendor/gen3auto_defconfig index 7222359693cb..80c473bb6fdd 100644 --- a/arch/arm64/configs/vendor/gen3auto_defconfig +++ b/arch/arm64/configs/vendor/gen3auto_defconfig @@ -291,7 +291,6 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -660,8 +659,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -752,7 +749,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -768,7 +764,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/qcs403-perf_defconfig b/arch/arm64/configs/vendor/qcs403-perf_defconfig index bf405ee3c748..c30b99e11678 100644 --- a/arch/arm64/configs/vendor/qcs403-perf_defconfig +++ b/arch/arm64/configs/vendor/qcs403-perf_defconfig @@ -408,7 +408,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -550,5 +549,4 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/qcs403_defconfig b/arch/arm64/configs/vendor/qcs403_defconfig index 5cc12640652c..4d1ebdb00b84 100644 --- a/arch/arm64/configs/vendor/qcs403_defconfig +++ b/arch/arm64/configs/vendor/qcs403_defconfig @@ -422,7 +422,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -600,4 +599,3 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y diff --git a/arch/arm64/configs/vendor/qcs405-perf_defconfig b/arch/arm64/configs/vendor/qcs405-perf_defconfig index 923e9c719145..433dcaf105f1 100644 --- a/arch/arm64/configs/vendor/qcs405-perf_defconfig +++ b/arch/arm64/configs/vendor/qcs405-perf_defconfig @@ -408,7 +408,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -551,5 +550,4 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/qcs405_defconfig b/arch/arm64/configs/vendor/qcs405_defconfig index 3814ea6bbd5f..0dac911cb584 100644 --- a/arch/arm64/configs/vendor/qcs405_defconfig +++ b/arch/arm64/configs/vendor/qcs405_defconfig @@ -421,7 +421,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -600,4 +599,3 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y diff --git a/arch/arm64/configs/vendor/qcs610-minimal-perf_defconfig b/arch/arm64/configs/vendor/qcs610-minimal-perf_defconfig index 159b155d5142..ae508a225e02 100644 --- a/arch/arm64/configs/vendor/qcs610-minimal-perf_defconfig +++ b/arch/arm64/configs/vendor/qcs610-minimal-perf_defconfig @@ -265,7 +265,6 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y @@ -426,7 +425,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -563,8 +561,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y @@ -599,7 +595,6 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y @@ -615,7 +610,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/qti-quin-gvm-perf_defconfig b/arch/arm64/configs/vendor/qti-quin-gvm-perf_defconfig index 61f2563f8f77..4368321f9748 100644 --- a/arch/arm64/configs/vendor/qti-quin-gvm-perf_defconfig +++ b/arch/arm64/configs/vendor/qti-quin-gvm-perf_defconfig @@ -473,8 +473,6 @@ CONFIG_ANDROID_BINDER_IPC=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -497,9 +495,6 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_PANIC_TIMEOUT=-1 CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set -CONFIG_PFK=y -CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y -CONFIG_PFK_VIRTUALIZED=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/configs/vendor/qti-quin-gvm_defconfig b/arch/arm64/configs/vendor/qti-quin-gvm_defconfig index 19a24df6f119..fe39b2f6d43c 100644 --- a/arch/arm64/configs/vendor/qti-quin-gvm_defconfig +++ b/arch/arm64/configs/vendor/qti-quin-gvm_defconfig @@ -484,8 +484,6 @@ CONFIG_ANDROID_BINDER_IPC=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -541,9 +539,6 @@ CONFIG_ATOMIC64_SELFTEST=m CONFIG_TEST_USER_COPY=m CONFIG_MEMTEST=y CONFIG_PID_IN_CONTEXTIDR=y -CONFIG_PFK=y -CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y -CONFIG_PFK_VIRTUALIZED=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig b/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig index cd9423f0a52a..d78c6d646281 100644 --- a/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig +++ b/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig @@ -353,7 +353,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -495,5 +494,4 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/sa2150p-nand_defconfig b/arch/arm64/configs/vendor/sa2150p-nand_defconfig index 77c30567355a..6194d3f50c3f 100644 --- a/arch/arm64/configs/vendor/sa2150p-nand_defconfig +++ b/arch/arm64/configs/vendor/sa2150p-nand_defconfig @@ -354,7 +354,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -496,5 +495,4 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/sa2150p-perf_defconfig b/arch/arm64/configs/vendor/sa2150p-perf_defconfig index e1e518583dcf..3a0a2ed3cb33 100644 --- a/arch/arm64/configs/vendor/sa2150p-perf_defconfig +++ b/arch/arm64/configs/vendor/sa2150p-perf_defconfig @@ -363,7 +363,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -504,5 +503,4 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/sa2150p_defconfig b/arch/arm64/configs/vendor/sa2150p_defconfig index 5a7550fc9fdf..4a65bc770fc5 100644 --- a/arch/arm64/configs/vendor/sa2150p_defconfig +++ b/arch/arm64/configs/vendor/sa2150p_defconfig @@ -364,7 +364,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -505,5 +504,4 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/sa8155-perf_defconfig b/arch/arm64/configs/vendor/sa8155-perf_defconfig index 5032fae84d52..b3dc60f70e86 100644 --- a/arch/arm64/configs/vendor/sa8155-perf_defconfig +++ b/arch/arm64/configs/vendor/sa8155-perf_defconfig @@ -278,7 +278,6 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_UEVENT=y @@ -609,8 +608,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -633,7 +630,6 @@ CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_IPC_LOGGING=y CONFIG_DEBUG_ALIGN_RODATA=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -648,7 +644,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sa8155_defconfig b/arch/arm64/configs/vendor/sa8155_defconfig index 359ae8a3e879..13d5861e2017 100644 --- a/arch/arm64/configs/vendor/sa8155_defconfig +++ b/arch/arm64/configs/vendor/sa8155_defconfig @@ -290,7 +290,6 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -643,8 +642,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -731,7 +728,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -747,7 +743,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index 072cd837bfb1..699621a225bb 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -279,13 +279,11 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -508,7 +506,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QTI_TRI_LED=y @@ -637,8 +634,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -675,7 +670,6 @@ CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_HWEVENT=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -691,7 +685,6 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y CONFIG_CRYPTO_DEV_OTA_CRYPTO=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem" CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index 695a5619c0e3..901b1c3b70a7 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -287,14 +287,12 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -522,7 +520,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QTI_TRI_LED=y @@ -668,8 +665,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -764,7 +759,6 @@ CONFIG_CORESIGHT_HWEVENT=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -781,7 +775,6 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y CONFIG_CRYPTO_DEV_OTA_CRYPTO=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem" CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmshrike_defconfig b/arch/arm64/configs/vendor/sdmshrike_defconfig index 84bd6089822f..5325e4aeb38e 100644 --- a/arch/arm64/configs/vendor/sdmshrike_defconfig +++ b/arch/arm64/configs/vendor/sdmshrike_defconfig @@ -285,7 +285,6 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -636,8 +635,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -716,7 +713,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -730,7 +726,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig index 151f629ca328..541777f5b2ad 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig @@ -280,7 +280,6 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y @@ -491,7 +490,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -641,8 +639,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -669,7 +665,6 @@ CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_IPC_LOGGING=y CONFIG_DEBUG_ALIGN_RODATA=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -684,7 +679,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig b/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig index 49733043c7b4..f806daee1e59 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig @@ -292,7 +292,6 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -513,7 +512,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -679,8 +677,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -779,7 +775,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -795,7 +790,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig index b36774b71729..f06dced29a56 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig @@ -274,11 +274,9 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -495,7 +493,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -655,8 +652,6 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -698,7 +693,6 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -713,7 +707,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmsteppe_defconfig b/arch/arm64/configs/vendor/sdmsteppe_defconfig index cdc6969fe1d4..e23a67e5fa74 100644 --- a/arch/arm64/configs/vendor/sdmsteppe_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe_defconfig @@ -284,12 +284,10 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -519,7 +517,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -692,8 +689,6 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -790,7 +785,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -806,7 +800,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sm8150-perf_defconfig b/arch/arm64/configs/vendor/sm8150-perf_defconfig index 6b8a37fc3af9..c5444dbd97af 100644 --- a/arch/arm64/configs/vendor/sm8150-perf_defconfig +++ b/arch/arm64/configs/vendor/sm8150-perf_defconfig @@ -285,11 +285,9 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -663,8 +661,6 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -707,8 +703,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y -CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -723,7 +717,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sm8150_defconfig b/arch/arm64/configs/vendor/sm8150_defconfig index 1f1866b3ebec..53fd6b98411a 100644 --- a/arch/arm64/configs/vendor/sm8150_defconfig +++ b/arch/arm64/configs/vendor/sm8150_defconfig @@ -297,12 +297,10 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -693,8 +691,6 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -791,8 +787,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y -CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -808,7 +802,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/trinket-perf_defconfig b/arch/arm64/configs/vendor/trinket-perf_defconfig index 9248468b02eb..67aea57b7d55 100644 --- a/arch/arm64/configs/vendor/trinket-perf_defconfig +++ b/arch/arm64/configs/vendor/trinket-perf_defconfig @@ -279,11 +279,9 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -510,7 +508,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_HAPTICS=y @@ -683,7 +680,6 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -698,7 +694,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/trinket_defconfig b/arch/arm64/configs/vendor/trinket_defconfig index cf1f806d7c9b..65fa34db016b 100644 --- a/arch/arm64/configs/vendor/trinket_defconfig +++ b/arch/arm64/configs/vendor/trinket_defconfig @@ -289,12 +289,10 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -525,7 +523,6 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_HAPTICS=y @@ -768,7 +765,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y -CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -784,7 +780,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/block/bio.c b/block/bio.c index ce70677b9b5e..a3c4fd9ec478 100644 --- a/block/bio.c +++ b/block/bio.c @@ -577,18 +577,6 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio) } EXPORT_SYMBOL(bio_phys_segments); -static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src) -{ -#ifdef CONFIG_PFK - dst->bi_iter.bi_dun = src->bi_iter.bi_dun; -#ifdef CONFIG_DM_DEFAULT_KEY - dst->bi_crypt_key = src->bi_crypt_key; - dst->bi_crypt_skip = src->bi_crypt_skip; -#endif - dst->bi_dio_inode = src->bi_dio_inode; -#endif -} - /** * __bio_clone_fast - clone a bio that shares the original bio's biovec * @bio: destination bio @@ -617,7 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; - bio_clone_crypt_key(bio, bio_src); + bio_clone_blkcg_association(bio, bio_src); } EXPORT_SYMBOL(__bio_clone_fast); @@ -726,7 +714,6 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, } } - bio_clone_crypt_key(bio, bio_src); bio_clone_blkcg_association(bio, bio_src); return bio; diff --git a/block/blk-core.c b/block/blk-core.c index 7ce048c9861c..52490014818f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1462,9 +1462,6 @@ static struct request *blk_old_get_request(struct request_queue *q, /* q->queue_lock is unlocked at this point */ rq->__data_len = 0; rq->__sector = (sector_t) -1; -#ifdef CONFIG_PFK - rq->__dun = 0; -#endif rq->bio = rq->biotail = NULL; return rq; } @@ -1688,9 +1685,6 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, bio->bi_next = req->bio; req->bio = bio; -#ifdef CONFIG_PFK - req->__dun = bio->bi_iter.bi_dun; -#endif req->__sector = bio->bi_iter.bi_sector; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); @@ -1840,9 +1834,6 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio) else req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); req->write_hint = bio->bi_write_hint; -#ifdef CONFIG_PFK - req->__dun = bio->bi_iter.bi_dun; -#endif blk_rq_bio_prep(req->q, req, bio); } EXPORT_SYMBOL_GPL(blk_init_request_from_bio); @@ -2876,13 +2867,8 @@ bool blk_update_request(struct request *req, blk_status_t error, req->__data_len -= total_bytes; /* update sector only for requests with clear definition of sector */ - if (!blk_rq_is_passthrough(req)) { + if (!blk_rq_is_passthrough(req)) req->__sector += total_bytes >> 9; -#ifdef CONFIG_PFK - if (req->__dun) - req->__dun += total_bytes >> 12; -#endif - } /* mixed attributes always follow the first bio */ if (req->rq_flags & RQF_MIXED_MERGE) { @@ -3245,9 +3231,6 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); -#ifdef CONFIG_PFK - dst->__dun = blk_rq_dun(src); -#endif dst->__data_len = blk_rq_bytes(src); if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { dst->rq_flags |= RQF_SPECIAL_PAYLOAD; diff --git a/block/blk-merge.c b/block/blk-merge.c index 82dc0c1df283..de29a4054666 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -9,7 +9,7 @@ #include #include -#include + #include "blk.h" static struct bio *blk_bio_discard_split(struct request_queue *q, @@ -509,8 +509,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, if (blk_integrity_rq(req) && integrity_req_gap_back_merge(req, bio)) return 0; - if (blk_try_merge(req, bio) != ELEVATOR_BACK_MERGE) - return 0; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req, blk_rq_pos(req))) { req_set_nomerge(q, req); @@ -533,8 +531,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, if (blk_integrity_rq(req) && integrity_req_gap_front_merge(req, bio)) return 0; - if (blk_try_merge(req, bio) != ELEVATOR_FRONT_MERGE) - return 0; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { req_set_nomerge(q, req); @@ -668,11 +664,6 @@ static void blk_account_io_merge(struct request *req) } } -static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt) -{ - return (!pfk_allow_merge_bio(bio, nxt)); -} - /* * For non-mq, this has to be called with the request spinlock acquired. * For mq with scheduling, the appropriate queue wide lock should be held. @@ -711,9 +702,6 @@ static struct request *attempt_merge(struct request_queue *q, if (req->write_hint != next->write_hint) return NULL; - if (crypto_not_mergeable(req->bio, next->bio)) - return 0; - /* * If we are allowed to merge, then append bio list * from next to rq and release next. merge_requests_fn @@ -851,18 +839,11 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) { if (req_op(rq) == REQ_OP_DISCARD && - queue_max_discard_segments(rq->q) > 1) { + queue_max_discard_segments(rq->q) > 1) return ELEVATOR_DISCARD_MERGE; - } else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == - bio->bi_iter.bi_sector) { - if (crypto_not_mergeable(rq->bio, bio)) - return ELEVATOR_NO_MERGE; + else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) return ELEVATOR_BACK_MERGE; - } else if (blk_rq_pos(rq) - bio_sectors(bio) == - bio->bi_iter.bi_sector) { - if (crypto_not_mergeable(bio, rq->bio)) - return ELEVATOR_NO_MERGE; + else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) return ELEVATOR_FRONT_MERGE; - } return ELEVATOR_NO_MERGE; } diff --git a/block/elevator.c b/block/elevator.c index 2346c5b53b93..8320d97240be 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -443,7 +443,7 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, { struct elevator_queue *e = q->elevator; struct request *__rq; - enum elv_merge ret; + /* * Levels of merges: * nomerges: No merges at all attempted @@ -456,11 +456,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, /* * First try one-hit cache. */ - if (q->last_merge) { - if (!elv_bio_merge_ok(q->last_merge, bio)) - return ELEVATOR_NO_MERGE; + if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { + enum elv_merge ret = blk_try_merge(q->last_merge, bio); - ret = blk_try_merge(q->last_merge, bio); if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; return ret; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 2531bfb57fdc..763308990dd8 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -15,7 +15,9 @@ #include #include #include +#ifdef CONFIG_PFK #include +#endif #include #define PART_BITS 4 diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 113cd787ec7c..26e1103e49a6 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -771,8 +771,4 @@ config CRYPTO_DEV_ARTPEC6 To compile this driver as a module, choose M here. -if ARCH_QCOM -source drivers/crypto/msm/Kconfig -endif - endif # CRYPTO_HW diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index fd34c0bc39f5..79301bbed969 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1443,9 +1443,9 @@ static void qcom_ice_debug(struct platform_device *pdev) qcom_ice_dump_test_bus(ice_dev); pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n", - ice_dev->ice_instance_type, - (unsigned long long)ice_dev->ice_reset_start_time, - (unsigned long long)ice_dev->ice_reset_complete_time); + ice_dev->ice_instance_type, + (unsigned long long)ice_dev->ice_reset_start_time.tv64, + (unsigned long long)ice_dev->ice_reset_complete_time.tv64); if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time, ice_dev->ice_reset_start_time)) > 0) diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index c805d628d04d..747edadb39ae 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -286,24 +286,6 @@ config DM_CRYPT If unsure, say N. -config DM_DEFAULT_KEY - tristate "Default-key crypt target support" - depends on BLK_DEV_DM - depends on PFK - ---help--- - This (currently Android-specific) device-mapper target allows you to - create a device that assigns a default encryption key to bios that - don't already have one. This can sit between inline cryptographic - acceleration hardware and filesystems that use it. This ensures that - where the filesystem doesn't explicitly specify a key, such as for - filesystem metadata, a default key will be used instead, leaving no - sectors unencrypted. - - To compile this code as a module, choose M here: the module will be - called dm-default-key. - - If unsure, say N. - config DM_SNAPSHOT tristate "Snapshot target" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 1a03ebd1cee7..27962abad668 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -43,7 +43,6 @@ obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o obj-$(CONFIG_DM_BUFIO) += dm-bufio.o obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o obj-$(CONFIG_DM_CRYPT) += dm-crypt.o -obj-$(CONFIG_DM_DEFAULT_KEY) += dm-default-key.o obj-$(CONFIG_DM_DELAY) += dm-delay.o obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 669e18f0453b..cb959a0e711d 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -126,7 +126,7 @@ struct iv_tcw_private { */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, - DM_CRYPT_ENCRYPT_OVERRIDE }; +}; enum cipher_flags { CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ @@ -2678,8 +2678,6 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; } else if (!strcasecmp(opt_string, "iv_large_sectors")) set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); - else if (!strcasecmp(opt_string, "allow_encrypt_override")) - set_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags); else { ti->error = "Invalid feature arguments"; return -EINVAL; @@ -2889,15 +2887,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) struct crypt_config *cc = ti->private; /* - * If bio is REQ_PREFLUSH, REQ_NOENCRYPT, or REQ_OP_DISCARD, - * just bypass crypt queues. + * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight * - for REQ_OP_DISCARD caller must use flush if IO ordering matters */ - if (unlikely(bio->bi_opf & REQ_PREFLUSH) || - (unlikely(bio->bi_opf & REQ_NOENCRYPT) && - test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags)) || - bio_op(bio) == REQ_OP_DISCARD) { + if (unlikely(bio->bi_opf & REQ_PREFLUSH || + bio_op(bio) == REQ_OP_DISCARD)) { bio_set_dev(bio, cc->dev->bdev); if (bio_sectors(bio)) bio->bi_iter.bi_sector = cc->start + @@ -2984,8 +2979,6 @@ static void crypt_status(struct dm_target *ti, status_type_t type, num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); - num_feature_args += test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, - &cc->flags); if (cc->on_disk_tag_size) num_feature_args++; if (num_feature_args) { @@ -3002,8 +2995,6 @@ static void crypt_status(struct dm_target *ti, status_type_t type, DMEMIT(" sector_size:%d", cc->sector_size); if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) DMEMIT(" iv_large_sectors"); - if (test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags)) - DMEMIT(" allow_encrypt_override"); } break; diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c deleted file mode 100644 index 0926bd65bd59..000000000000 --- a/drivers/md/dm-default-key.c +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (C) 2017 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include - -#define DM_MSG_PREFIX "default-key" -#define DEFAULT_DUN_OFFSET 1 - -struct default_key_c { - struct dm_dev *dev; - sector_t start; - struct blk_encryption_key key; - bool set_dun; - u64 dun_offset; -}; - -static void default_key_dtr(struct dm_target *ti) -{ - struct default_key_c *dkc = ti->private; - - if (dkc->dev) - dm_put_device(ti, dkc->dev); - kzfree(dkc); -} - -static int default_key_ctr_optional(struct dm_target *ti, - unsigned int argc, char **argv) -{ - struct default_key_c *dkc = ti->private; - struct dm_arg_set as = {0}; - static const struct dm_arg _args[] = { - {0, 2, "Invalid number of feature args"}, - }; - unsigned int opt_params; - const char *opt_string; - char dummy; - int ret; - - as.argc = argc; - as.argv = argv; - - ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); - if (ret) - return ret; - - while (opt_params--) { - opt_string = dm_shift_arg(&as); - if (!opt_string) { - ti->error = "Not enough feature arguments"; - return -EINVAL; - } - - if (!strcasecmp(opt_string, "set_dun")) { - dkc->set_dun = true; - } else if (sscanf(opt_string, "dun_offset:%llu%c", - &dkc->dun_offset, &dummy) == 1) { - if (dkc->dun_offset == 0) { - ti->error = "dun_offset cannot be 0"; - return -EINVAL; - } - } else { - ti->error = "Invalid feature arguments"; - return -EINVAL; - } - } - - if (dkc->dun_offset && !dkc->set_dun) { - ti->error = "Invalid: dun_offset without set_dun"; - return -EINVAL; - } - - if (dkc->set_dun && !dkc->dun_offset) - dkc->dun_offset = DEFAULT_DUN_OFFSET; - - return 0; -} - -/* - * Construct a default-key mapping: - */ -static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) -{ - struct default_key_c *dkc; - size_t key_size; - unsigned long long tmp; - char dummy; - int err; - - if (argc < 4) { - ti->error = "Too few arguments"; - return -EINVAL; - } - - dkc = kzalloc(sizeof(*dkc), GFP_KERNEL); - if (!dkc) { - ti->error = "Out of memory"; - return -ENOMEM; - } - ti->private = dkc; - - if (strcmp(argv[0], "AES-256-XTS") != 0) { - ti->error = "Unsupported encryption mode"; - err = -EINVAL; - goto bad; - } - - key_size = strlen(argv[1]); - if (key_size != 2 * BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS) { - ti->error = "Unsupported key size"; - err = -EINVAL; - goto bad; - } - key_size /= 2; - - if (hex2bin(dkc->key.raw, argv[1], key_size) != 0) { - ti->error = "Malformed key string"; - err = -EINVAL; - goto bad; - } - - err = dm_get_device(ti, argv[2], dm_table_get_mode(ti->table), - &dkc->dev); - if (err) { - ti->error = "Device lookup failed"; - goto bad; - } - - if (sscanf(argv[3], "%llu%c", &tmp, &dummy) != 1) { - ti->error = "Invalid start sector"; - err = -EINVAL; - goto bad; - } - dkc->start = tmp; - - if (argc > 4) { - err = default_key_ctr_optional(ti, argc - 4, &argv[4]); - if (err) - goto bad; - } - - if (!blk_queue_inlinecrypt(bdev_get_queue(dkc->dev->bdev))) { - ti->error = "Device does not support inline encryption"; - err = -EINVAL; - goto bad; - } - - /* Pass flush requests through to the underlying device. */ - ti->num_flush_bios = 1; - - /* - * We pass discard requests through to the underlying device, although - * the discarded blocks will be zeroed, which leaks information about - * unused blocks. It's also impossible for dm-default-key to know not - * to decrypt discarded blocks, so they will not be read back as zeroes - * and we must set discard_zeroes_data_unsupported. - */ - ti->num_discard_bios = 1; - - /* - * It's unclear whether WRITE_SAME would work with inline encryption; it - * would depend on whether the hardware duplicates the data before or - * after encryption. But since the internal storage in some devices - * (MSM8998-based) doesn't claim to support WRITE_SAME anyway, we don't - * currently have a way to test it. Leave it disabled it for now. - */ - /*ti->num_write_same_bios = 1;*/ - - return 0; - -bad: - default_key_dtr(ti); - return err; -} - -static int default_key_map(struct dm_target *ti, struct bio *bio) -{ - const struct default_key_c *dkc = ti->private; - - bio_set_dev(bio, dkc->dev->bdev); - if (bio_sectors(bio)) { - bio->bi_iter.bi_sector = dkc->start + - dm_target_offset(ti, bio->bi_iter.bi_sector); - } - - if (!bio->bi_crypt_key && !bio->bi_crypt_skip) { - bio->bi_crypt_key = &dkc->key; - - if (dkc->set_dun) - bio_dun(bio) = (dm_target_offset(ti, - bio->bi_iter.bi_sector) - >> 3) + dkc->dun_offset; - } - - return DM_MAPIO_REMAPPED; -} - -static void default_key_status(struct dm_target *ti, status_type_t type, - unsigned int status_flags, char *result, - unsigned int maxlen) -{ - const struct default_key_c *dkc = ti->private; - unsigned int sz = 0; - int num_feature_args = 0; - - switch (type) { - case STATUSTYPE_INFO: - result[0] = '\0'; - break; - - case STATUSTYPE_TABLE: - - /* encryption mode */ - DMEMIT("AES-256-XTS"); - - /* reserved for key; dm-crypt shows it, but we don't for now */ - DMEMIT(" -"); - - /* name of underlying device, and the start sector in it */ - DMEMIT(" %s %llu", dkc->dev->name, - (unsigned long long)dkc->start); - - num_feature_args += dkc->set_dun; - num_feature_args += dkc->set_dun - && dkc->dun_offset != DEFAULT_DUN_OFFSET; - - if (num_feature_args) { - DMEMIT(" %d", num_feature_args); - if (dkc->set_dun) - DMEMIT(" set_dun"); - if (dkc->set_dun - && dkc->dun_offset != DEFAULT_DUN_OFFSET) - DMEMIT(" dun_offset:%llu", dkc->dun_offset); - } - - break; - } -} - -static int default_key_prepare_ioctl(struct dm_target *ti, - struct block_device **bdev, fmode_t *mode) -{ - struct default_key_c *dkc = ti->private; - struct dm_dev *dev = dkc->dev; - - *bdev = dev->bdev; - - /* - * Only pass ioctls through if the device sizes match exactly. - */ - if (dkc->start || - ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) - return 1; - return 0; -} - -static int default_key_iterate_devices(struct dm_target *ti, - iterate_devices_callout_fn fn, - void *data) -{ - struct default_key_c *dkc = ti->private; - - return fn(ti, dkc->dev, dkc->start, ti->len, data); -} - -static struct target_type default_key_target = { - .name = "default-key", - .version = {1, 1, 0}, - .module = THIS_MODULE, - .ctr = default_key_ctr, - .dtr = default_key_dtr, - .map = default_key_map, - .status = default_key_status, - .prepare_ioctl = default_key_prepare_ioctl, - .iterate_devices = default_key_iterate_devices, -}; - -static int __init dm_default_key_init(void) -{ - return dm_register_target(&default_key_target); -} - -static void __exit dm_default_key_exit(void) -{ - dm_unregister_target(&default_key_target); -} - -module_init(dm_default_key_init); -module_exit(dm_default_key_exit); - -MODULE_AUTHOR("Paul Lawrence "); -MODULE_AUTHOR("Paul Crowley "); -MODULE_AUTHOR("Eric Biggers "); -MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 0f9a8087e1a0..852350e3cfe7 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1689,16 +1689,6 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); } -static int queue_supports_inline_encryption(struct dm_target *ti, - struct dm_dev *dev, - sector_t start, sector_t len, - void *data) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - - return q && blk_queue_inlinecrypt(q); -} - static bool dm_table_all_devices_attribute(struct dm_table *t, iterate_devices_callout_fn func) { @@ -1879,11 +1869,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); - if (dm_table_all_devices_attribute(t, queue_supports_inline_encryption)) - queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q); - else - queue_flag_clear_unlocked(QUEUE_FLAG_INLINECRYPT, q); - dm_table_verify_integrity(t); /* diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index f3c12b118f3a..66ed5193add4 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -52,7 +52,6 @@ #include #include #include "compat_qseecom.h" -#include #include #define QSEECOM_DEV "qseecom" @@ -8115,19 +8114,6 @@ static long qseecom_ioctl(struct file *file, qcom_ice_set_fde_flag(ice_data.flag); break; } - case QSEECOM_IOCTL_FBE_CLEAR_KEY: { - struct qseecom_ice_key_data_t key_data; - - ret = copy_from_user(&key_data, argp, sizeof(key_data)); - if (ret) { - pr_err("copy from user failed\n"); - return -EFAULT; - } - pfk_fbe_clear_key((const unsigned char *) key_data.key, - key_data.key_len, (const unsigned char *) - key_data.salt, key_data.salt_len); - break; - } default: pr_err("Invalid IOCTL: 0x%x\n", cmd); return -EINVAL; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 06febb56fa5f..ecc794323729 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -216,8 +216,6 @@ void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card) host->max_req_size / 512)); blk_queue_max_segment_size(mq->queue, host->max_seg_size); blk_queue_max_segments(mq->queue, host->max_segs); - if (host->inlinecrypt_support) - queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue); } static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) @@ -481,8 +479,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); - if (host->inlinecrypt_support) - queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue); sema_init(&mq->thread_sem, 1); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 73e43441f4dd..979b909704df 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -439,17 +439,6 @@ config MMC_SDHCI_MSM If unsure, say N. -config MMC_SDHCI_MSM_ICE - bool "Qualcomm Technologies, Inc Inline Crypto Engine for SDHCI core" - depends on MMC_SDHCI_MSM && CRYPTO_DEV_QCOM_ICE - help - This selects the QTI specific additions to support Inline Crypto - Engine (ICE). ICE accelerates the crypto operations and maintains - the high SDHCI performance. - - Select this if you have ICE supported for SDHCI on QTI chipset. - If unsure, say N. - config MMC_MXC tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support" depends on ARCH_MXC || PPC_MPC512x diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index f079ab6fb055..6389e8125299 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -86,7 +86,6 @@ obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o -obj-$(CONFIG_MMC_SDHCI_MSM_ICE) += sdhci-msm-ice.o obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c index 8571e3171c0c..87c1cb7abf39 100644 --- a/drivers/mmc/host/cmdq_hci.c +++ b/drivers/mmc/host/cmdq_hci.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, 2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -376,7 +376,6 @@ static int cmdq_enable(struct mmc_host *mmc) { int err = 0; u32 cqcfg; - u32 cqcap = 0; bool dcmd_enable; struct cmdq_host *cq_host = mmc_cmdq_private(mmc); @@ -405,24 +404,6 @@ static int cmdq_enable(struct mmc_host *mmc) cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) | (dcmd_enable ? CQ_DCMD : 0)); - cqcap = cmdq_readl(cq_host, CQCAP); - if (cqcap & CQCAP_CS) { - /* - * In case host controller supports cryptographic operations - * then, it uses 128bit task descriptor. Upper 64 bits of task - * descriptor would be used to pass crypto specific informaton. - */ - cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT | - CMDQ_TASK_DESC_SZ_128; - cqcfg |= CQ_ICE_ENABLE; - /* - * For SDHC v5.0 onwards, ICE 3.0 specific registers are added - * in CQ register space, due to which few CQ registers are - * shifted. Set offset_changed boolean to use updated address. - */ - cq_host->offset_changed = true; - } - cmdq_writel(cq_host, cqcfg, CQCFG); /* enable CQ_HOST */ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE, @@ -738,30 +719,6 @@ static void cmdq_prep_dcmd_desc(struct mmc_host *mmc, upper_32_bits(*task_desc)); } -static inline -void cmdq_prep_crypto_desc(struct cmdq_host *cq_host, u64 *task_desc, - u64 ice_ctx) -{ - u64 *ice_desc = NULL; - - if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) { - /* - * Get the address of ice context for the given task descriptor. - * ice context is present in the upper 64bits of task descriptor - * ice_conext_base_address = task_desc + 8-bytes - */ - ice_desc = (__le64 *)((u8 *)task_desc + - CQ_TASK_DESC_TASK_PARAMS_SIZE); - memset(ice_desc, 0, CQ_TASK_DESC_ICE_PARAMS_SIZE); - - /* - * Assign upper 64bits data of task descritor with ice context - */ - if (ice_ctx) - *ice_desc = cpu_to_le64(ice_ctx); - } -} - static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -785,7 +742,6 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq) u32 tag = mrq->cmdq_req->tag; struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc); struct sdhci_host *host = mmc_priv(mmc); - u64 ice_ctx = 0; if (!cq_host->enabled) { pr_err("%s: CMDQ host not enabled yet !!!\n", @@ -804,31 +760,19 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq) goto ring_doorbell; } - if (cq_host->ops->crypto_cfg) { - err = cq_host->ops->crypto_cfg(mmc, mrq, tag, &ice_ctx); - if (err) { - mmc->err_stats[MMC_ERR_ICE_CFG]++; - pr_err("%s: failed to configure crypto: err %d tag %d\n", - mmc_hostname(mmc), err, tag); - goto ice_err; - } - } - task_desc = (__le64 __force *)get_desc(cq_host, tag); cmdq_prep_task_desc(mrq, &data, 1, (mrq->cmdq_req->cmdq_req_flags & QBR)); *task_desc = cpu_to_le64(data); - cmdq_prep_crypto_desc(cq_host, task_desc, ice_ctx); - cmdq_log_task_desc_history(cq_host, *task_desc, false); err = cmdq_prep_tran_desc(mrq, cq_host, tag); if (err) { pr_err("%s: %s: failed to setup tx desc: %d\n", mmc_hostname(mmc), __func__, err); - goto desc_err; + goto out; } cq_host->mrq_slot[tag] = mrq; @@ -848,20 +792,6 @@ ring_doorbell: /* Commit the doorbell write immediately */ wmb(); - return err; - -desc_err: - if (cq_host->ops->crypto_cfg_end) { - err = cq_host->ops->crypto_cfg_end(mmc, mrq); - if (err) { - pr_err("%s: failed to end ice config: err %d tag %d\n", - mmc_hostname(mmc), err, tag); - } - } - if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) && - cq_host->ops->crypto_cfg_reset) - cq_host->ops->crypto_cfg_reset(mmc, tag); -ice_err: if (err) cmdq_runtime_pm_put(cq_host); out: @@ -873,7 +803,6 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag) struct mmc_request *mrq; struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc); int offset = 0; - int err = 0; if (cq_host->offset_changed) offset = CQ_V5_VENDOR_CFG; @@ -888,18 +817,6 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag) cmdq_runtime_pm_put(cq_host); - if (!(mrq->cmdq_req->cmdq_req_flags & DCMD)) { - if (cq_host->ops->crypto_cfg_end) { - err = cq_host->ops->crypto_cfg_end(mmc, mrq); - if (err) { - pr_err("%s: failed to end ice config: err %d tag %d\n", - mmc_hostname(mmc), err, tag); - } - } - } - if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) && - cq_host->ops->crypto_cfg_reset) - cq_host->ops->crypto_cfg_reset(mmc, tag); mrq->done(mrq); } diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h index 03c78d7a891c..0b7c38710c34 100644 --- a/drivers/mmc/host/cmdq_hci.h +++ b/drivers/mmc/host/cmdq_hci.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -18,13 +18,11 @@ #define CQVER 0x00 /* capabilities */ #define CQCAP 0x04 -#define CQCAP_CS (1 << 28) /* configuration */ #define CQCFG 0x08 #define CQ_DCMD 0x00001000 #define CQ_TASK_DESC_SZ 0x00000100 #define CQ_ENABLE 0x00000001 -#define CQ_ICE_ENABLE 0x00000002 /* control */ #define CQCTL 0x0C @@ -153,9 +151,6 @@ #define CQ_VENDOR_CFG 0x100 #define CMDQ_SEND_STATUS_TRIGGER (1 << 31) -#define CQ_TASK_DESC_TASK_PARAMS_SIZE 8 -#define CQ_TASK_DESC_ICE_PARAMS_SIZE 8 - struct task_history { u64 task; bool is_dcmd; @@ -173,7 +168,6 @@ struct cmdq_host { u32 dcmd_slot; u32 caps; #define CMDQ_TASK_DESC_SZ_128 0x1 -#define CMDQ_CAP_CRYPTO_SUPPORT 0x2 u32 quirks; #define CMDQ_QUIRK_SHORT_TXFR_DESC_SZ 0x1 @@ -222,10 +216,6 @@ struct cmdq_host_ops { void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set); int (*reset)(struct mmc_host *mmc); void (*post_cqe_halt)(struct mmc_host *mmc); - int (*crypto_cfg)(struct mmc_host *mmc, struct mmc_request *mrq, - u32 slot, u64 *ice_ctx); - int (*crypto_cfg_end)(struct mmc_host *mmc, struct mmc_request *mrq); - void (*crypto_cfg_reset)(struct mmc_host *mmc, unsigned int slot); }; static inline void cmdq_writel(struct cmdq_host *host, u32 val, int reg) diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c deleted file mode 100644 index 317d8c3bfb0e..000000000000 --- a/drivers/mmc/host/sdhci-msm-ice.c +++ /dev/null @@ -1,587 +0,0 @@ -/* - * Copyright (c) 2015, 2017-2018 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "sdhci-msm-ice.h" - -static void sdhci_msm_ice_error_cb(void *host_ctrl, u32 error) -{ - struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl; - - dev_err(&msm_host->pdev->dev, "%s: Error in ice operation 0x%x", - __func__, error); - - if (msm_host->ice.state == SDHCI_MSM_ICE_STATE_ACTIVE) - msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED; -} - -static struct platform_device *sdhci_msm_ice_get_pdevice(struct device *dev) -{ - struct device_node *node; - struct platform_device *ice_pdev = NULL; - - node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0); - if (!node) { - dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n", - __func__); - goto out; - } - ice_pdev = qcom_ice_get_pdevice(node); -out: - return ice_pdev; -} - -static -struct qcom_ice_variant_ops *sdhci_msm_ice_get_vops(struct device *dev) -{ - struct qcom_ice_variant_ops *ice_vops = NULL; - struct device_node *node; - - node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0); - if (!node) { - dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n", - __func__); - goto out; - } - ice_vops = qcom_ice_get_variant_ops(node); - of_node_put(node); -out: - return ice_vops; -} - -static -void sdhci_msm_enable_ice_hci(struct sdhci_host *host, bool enable) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - u32 config = 0; - u32 ice_cap = 0; - - /* - * Enable the cryptographic support inside SDHC. - * This is a global config which needs to be enabled - * all the time. - * Only when it it is enabled, the ICE_HCI capability - * will get reflected in CQCAP register. - */ - config = readl_relaxed(host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4); - - if (enable) - config &= ~DISABLE_CRYPTO; - else - config |= DISABLE_CRYPTO; - writel_relaxed(config, host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4); - - /* - * CQCAP register is in different register space from above - * ice global enable register. So a mb() is required to ensure - * above write gets completed before reading the CQCAP register. - */ - mb(); - - /* - * Check if ICE HCI capability support is present - * If present, enable it. - */ - ice_cap = readl_relaxed(msm_host->cryptoio + ICE_CQ_CAPABILITIES); - if (ice_cap & ICE_HCI_SUPPORT) { - config = readl_relaxed(msm_host->cryptoio + ICE_CQ_CONFIG); - - if (enable) - config |= CRYPTO_GENERAL_ENABLE; - else - config &= ~CRYPTO_GENERAL_ENABLE; - writel_relaxed(config, msm_host->cryptoio + ICE_CQ_CONFIG); - } -} - -int sdhci_msm_ice_get_dev(struct sdhci_host *host) -{ - struct device *sdhc_dev; - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - - if (!msm_host || !msm_host->pdev) { - pr_err("%s: invalid msm_host %p or msm_host->pdev\n", - __func__, msm_host); - return -EINVAL; - } - - sdhc_dev = &msm_host->pdev->dev; - msm_host->ice.vops = sdhci_msm_ice_get_vops(sdhc_dev); - msm_host->ice.pdev = sdhci_msm_ice_get_pdevice(sdhc_dev); - - if (msm_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) { - dev_err(sdhc_dev, "%s: ICE device not probed yet\n", - __func__); - msm_host->ice.pdev = NULL; - msm_host->ice.vops = NULL; - return -EPROBE_DEFER; - } - - if (!msm_host->ice.pdev) { - dev_dbg(sdhc_dev, "%s: invalid platform device\n", __func__); - msm_host->ice.vops = NULL; - return -ENODEV; - } - if (!msm_host->ice.vops) { - dev_dbg(sdhc_dev, "%s: invalid ice vops\n", __func__); - msm_host->ice.pdev = NULL; - return -ENODEV; - } - msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED; - return 0; -} - -static -int sdhci_msm_ice_pltfm_init(struct sdhci_msm_host *msm_host) -{ - struct resource *ice_memres = NULL; - struct platform_device *pdev = msm_host->pdev; - int err = 0; - - if (!msm_host->ice_hci_support) - goto out; - /* - * ICE HCI registers are present in cmdq register space. - * So map the cmdq mem for accessing ICE HCI registers. - */ - ice_memres = platform_get_resource_byname(pdev, - IORESOURCE_MEM, "cmdq_mem"); - if (!ice_memres) { - dev_err(&pdev->dev, "Failed to get iomem resource for ice\n"); - err = -EINVAL; - goto out; - } - msm_host->cryptoio = devm_ioremap(&pdev->dev, - ice_memres->start, - resource_size(ice_memres)); - if (!msm_host->cryptoio) { - dev_err(&pdev->dev, "Failed to remap registers\n"); - err = -ENOMEM; - } -out: - return err; -} - -int sdhci_msm_ice_init(struct sdhci_host *host) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - int err = 0; - - if (msm_host->ice.vops->init) { - err = sdhci_msm_ice_pltfm_init(msm_host); - if (err) - goto out; - - if (msm_host->ice_hci_support) - sdhci_msm_enable_ice_hci(host, true); - - err = msm_host->ice.vops->init(msm_host->ice.pdev, - msm_host, - sdhci_msm_ice_error_cb); - if (err) { - pr_err("%s: ice init err %d\n", - mmc_hostname(host->mmc), err); - sdhci_msm_ice_print_regs(host); - if (msm_host->ice_hci_support) - sdhci_msm_enable_ice_hci(host, false); - goto out; - } - msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE; - } - -out: - return err; -} - -void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot) -{ - writel_relaxed(SDHCI_MSM_ICE_ENABLE_BYPASS, - host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot); -} - -static -int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req, - unsigned int *bypass, short *key_index) -{ - int err = 0; - struct ice_data_setting ice_set; - - memset(&ice_set, 0, sizeof(struct ice_data_setting)); - if (msm_host->ice.vops->config_start) { - err = msm_host->ice.vops->config_start( - msm_host->ice.pdev, - req, &ice_set, false); - if (err) { - pr_err("%s: ice config failed %d\n", - mmc_hostname(msm_host->mmc), err); - return err; - } - } - /* if writing data command */ - if (rq_data_dir(req) == WRITE) - *bypass = ice_set.encr_bypass ? - SDHCI_MSM_ICE_ENABLE_BYPASS : - SDHCI_MSM_ICE_DISABLE_BYPASS; - /* if reading data command */ - else if (rq_data_dir(req) == READ) - *bypass = ice_set.decr_bypass ? - SDHCI_MSM_ICE_ENABLE_BYPASS : - SDHCI_MSM_ICE_DISABLE_BYPASS; - *key_index = ice_set.crypto_data.key_index; - return err; -} - -static -void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba, u32 slot, - unsigned int bypass, short key_index, u32 cdu_sz) -{ - unsigned int ctrl_info_val = 0; - - /* Configure ICE index */ - ctrl_info_val = - (key_index & - MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX) - << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX; - - /* Configure data unit size of transfer request */ - ctrl_info_val |= - (cdu_sz & - MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU) - << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU; - - /* Configure ICE bypass mode */ - ctrl_info_val |= - (bypass & MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS) - << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS; - - writel_relaxed((lba & 0xFFFFFFFF), - host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n + 16 * slot); - writel_relaxed(((lba >> 32) & 0xFFFFFFFF), - host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n + 16 * slot); - writel_relaxed(ctrl_info_val, - host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot); - /* Ensure ICE registers are configured before issuing SDHCI request */ - mb(); -} - -static inline -void sdhci_msm_ice_hci_update_cmdq_cfg(u64 dun, unsigned int bypass, - short key_index, u64 *ice_ctx) -{ - /* - * The naming convention got changed between ICE2.0 and ICE3.0 - * registers fields. Below is the equivalent names for - * ICE3.0 Vs ICE2.0: - * Data Unit Number(DUN) == Logical Base address(LBA) - * Crypto Configuration index (CCI) == Key Index - * Crypto Enable (CE) == !BYPASS - */ - if (ice_ctx) - *ice_ctx = DATA_UNIT_NUM(dun) | - CRYPTO_CONFIG_INDEX(key_index) | - CRYPTO_ENABLE(!bypass); -} - -static -void sdhci_msm_ice_hci_update_noncq_cfg(struct sdhci_host *host, - u64 dun, unsigned int bypass, short key_index) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - unsigned int crypto_params = 0; - /* - * The naming convention got changed between ICE2.0 and ICE3.0 - * registers fields. Below is the equivalent names for - * ICE3.0 Vs ICE2.0: - * Data Unit Number(DUN) == Logical Base address(LBA) - * Crypto Configuration index (CCI) == Key Index - * Crypto Enable (CE) == !BYPASS - */ - /* Configure ICE bypass mode */ - crypto_params |= - ((!bypass) & MASK_SDHCI_MSM_ICE_HCI_PARAM_CE) - << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE; - /* Configure Crypto Configure Index (CCI) */ - crypto_params |= (key_index & - MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI) - << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI; - - writel_relaxed((crypto_params & 0xFFFFFFFF), - msm_host->cryptoio + ICE_NONCQ_CRYPTO_PARAMS); - - /* Update DUN */ - writel_relaxed((dun & 0xFFFFFFFF), - msm_host->cryptoio + ICE_NONCQ_CRYPTO_DUN); - /* Ensure ICE registers are configured before issuing SDHCI request */ - mb(); -} - -int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq, - u32 slot) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - int err = 0; - short key_index = 0; - u64 dun = 0; - unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS; - u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B; - struct request *req; - - if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { - pr_err("%s: ice is in invalid state %d\n", - mmc_hostname(host->mmc), msm_host->ice.state); - return -EINVAL; - } - - WARN_ON(!mrq); - if (!mrq) - return -EINVAL; - req = mrq->req; - if (req && req->bio) { -#ifdef CONFIG_PFK - if (bio_dun(req->bio)) { - dun = bio_dun(req->bio); - cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB; - } else { - dun = req->__sector; - } -#else - dun = req->__sector; -#endif - err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index); - if (err) - return err; - pr_debug("%s: %s: slot %d bypass %d key_index %d\n", - mmc_hostname(host->mmc), - (rq_data_dir(req) == WRITE) ? "WRITE" : "READ", - slot, bypass, key_index); - } - - if (msm_host->ice_hci_support) { - /* For ICE HCI / ICE3.0 */ - sdhci_msm_ice_hci_update_noncq_cfg(host, dun, bypass, - key_index); - } else { - /* For ICE versions earlier to ICE3.0 */ - sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index, - cdu_sz); - } - return 0; -} - -int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host, - struct mmc_request *mrq, u32 slot, u64 *ice_ctx) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - int err = 0; - short key_index = 0; - u64 dun = 0; - unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS; - struct request *req; - u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B; - - if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { - pr_err("%s: ice is in invalid state %d\n", - mmc_hostname(host->mmc), msm_host->ice.state); - return -EINVAL; - } - - WARN_ON(!mrq); - if (!mrq) - return -EINVAL; - req = mrq->req; - if (req && req->bio) { -#ifdef CONFIG_PFK - if (bio_dun(req->bio)) { - dun = bio_dun(req->bio); - cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB; - } else { - dun = req->__sector; - } -#else - dun = req->__sector; -#endif - err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index); - if (err) - return err; - pr_debug("%s: %s: slot %d bypass %d key_index %d\n", - mmc_hostname(host->mmc), - (rq_data_dir(req) == WRITE) ? "WRITE" : "READ", - slot, bypass, key_index); - } - - if (msm_host->ice_hci_support) { - /* For ICE HCI / ICE3.0 */ - sdhci_msm_ice_hci_update_cmdq_cfg(dun, bypass, key_index, - ice_ctx); - } else { - /* For ICE versions earlier to ICE3.0 */ - sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index, - cdu_sz); - } - return 0; -} - -int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - int err = 0; - struct request *req; - - if (!host->is_crypto_en) - return 0; - - if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { - pr_err("%s: ice is in invalid state %d\n", - mmc_hostname(host->mmc), msm_host->ice.state); - return -EINVAL; - } - - req = mrq->req; - if (req) { - if (msm_host->ice.vops->config_end) { - err = msm_host->ice.vops->config_end(req); - if (err) { - pr_err("%s: ice config end failed %d\n", - mmc_hostname(host->mmc), err); - return err; - } - } - } - - return 0; -} - -int sdhci_msm_ice_reset(struct sdhci_host *host) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - int err = 0; - - if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { - pr_err("%s: ice is in invalid state before reset %d\n", - mmc_hostname(host->mmc), msm_host->ice.state); - return -EINVAL; - } - - if (msm_host->ice.vops->reset) { - err = msm_host->ice.vops->reset(msm_host->ice.pdev); - if (err) { - pr_err("%s: ice reset failed %d\n", - mmc_hostname(host->mmc), err); - sdhci_msm_ice_print_regs(host); - return err; - } - } - - /* If ICE HCI support is present then re-enable it */ - if (msm_host->ice_hci_support) - sdhci_msm_enable_ice_hci(host, true); - - if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { - pr_err("%s: ice is in invalid state after reset %d\n", - mmc_hostname(host->mmc), msm_host->ice.state); - return -EINVAL; - } - return 0; -} - -int sdhci_msm_ice_resume(struct sdhci_host *host) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - int err = 0; - - if (msm_host->ice.state != - SDHCI_MSM_ICE_STATE_SUSPENDED) { - pr_err("%s: ice is in invalid state before resume %d\n", - mmc_hostname(host->mmc), msm_host->ice.state); - return -EINVAL; - } - - if (msm_host->ice.vops->resume) { - err = msm_host->ice.vops->resume(msm_host->ice.pdev); - if (err) { - pr_err("%s: ice resume failed %d\n", - mmc_hostname(host->mmc), err); - return err; - } - } - - msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE; - return 0; -} - -int sdhci_msm_ice_suspend(struct sdhci_host *host) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - int err = 0; - - if (msm_host->ice.state != - SDHCI_MSM_ICE_STATE_ACTIVE) { - pr_err("%s: ice is in invalid state before resume %d\n", - mmc_hostname(host->mmc), msm_host->ice.state); - return -EINVAL; - } - - if (msm_host->ice.vops->suspend) { - err = msm_host->ice.vops->suspend(msm_host->ice.pdev); - if (err) { - pr_err("%s: ice suspend failed %d\n", - mmc_hostname(host->mmc), err); - return -EINVAL; - } - } - msm_host->ice.state = SDHCI_MSM_ICE_STATE_SUSPENDED; - return 0; -} - -int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - int stat = -EINVAL; - - if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { - pr_err("%s: ice is in invalid state %d\n", - mmc_hostname(host->mmc), msm_host->ice.state); - return -EINVAL; - } - - if (msm_host->ice.vops->status) { - *ice_status = 0; - stat = msm_host->ice.vops->status(msm_host->ice.pdev); - if (stat < 0) { - pr_err("%s: ice get sts failed %d\n", - mmc_hostname(host->mmc), stat); - return -EINVAL; - } - *ice_status = stat; - } - return 0; -} - -void sdhci_msm_ice_print_regs(struct sdhci_host *host) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - - if (msm_host->ice.vops->debug) - msm_host->ice.vops->debug(msm_host->ice.pdev); -} diff --git a/drivers/mmc/host/sdhci-msm-ice.h b/drivers/mmc/host/sdhci-msm-ice.h deleted file mode 100644 index b256e285250d..000000000000 --- a/drivers/mmc/host/sdhci-msm-ice.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef __SDHCI_MSM_ICE_H__ -#define __SDHCI_MSM_ICE_H__ - -#include -#include -#include -#include - -#include "sdhci-msm.h" - -#define SDHC_MSM_CRYPTO_LABEL "sdhc-msm-crypto" -/* Timeout waiting for ICE initialization, that requires TZ access */ -#define SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS 500 - -/* - * SDHCI host controller ICE registers. There are n [0..31] - * of each of these registers - */ -#define NUM_SDHCI_MSM_ICE_CTRL_INFO_n_REGS 32 - -#define CORE_VENDOR_SPEC_ICE_CTRL 0x300 -#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n 0x304 -#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n 0x308 -#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n 0x30C - -/* ICE3.0 register which got added cmdq reg space */ -#define ICE_CQ_CAPABILITIES 0x04 -#define ICE_HCI_SUPPORT (1 << 28) -#define ICE_CQ_CONFIG 0x08 -#define CRYPTO_GENERAL_ENABLE (1 << 1) -#define ICE_NONCQ_CRYPTO_PARAMS 0x70 -#define ICE_NONCQ_CRYPTO_DUN 0x74 - -/* ICE3.0 register which got added hc reg space */ -#define HC_VENDOR_SPECIFIC_FUNC4 0x260 -#define DISABLE_CRYPTO (1 << 15) -#define HC_VENDOR_SPECIFIC_ICE_CTRL 0x800 -#define ICE_SW_RST_EN (1 << 0) - -/* SDHCI MSM ICE CTRL Info register offset */ -enum { - OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0, - OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 1, - OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU = 6, - OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI = 0, - OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE = 8, -}; - -/* SDHCI MSM ICE CTRL Info register masks */ -enum { - MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0x1, - MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 0x1F, - MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU = 0x7, - MASK_SDHCI_MSM_ICE_HCI_PARAM_CE = 0x1, - MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI = 0xff -}; - -/* SDHCI MSM ICE encryption/decryption bypass state */ -enum { - SDHCI_MSM_ICE_DISABLE_BYPASS = 0, - SDHCI_MSM_ICE_ENABLE_BYPASS = 1, -}; - -/* SDHCI MSM ICE Crypto Data Unit of target DUN of Transfer Request */ -enum { - SDHCI_MSM_ICE_TR_DATA_UNIT_512_B = 0, - SDHCI_MSM_ICE_TR_DATA_UNIT_1_KB = 1, - SDHCI_MSM_ICE_TR_DATA_UNIT_2_KB = 2, - SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB = 3, - SDHCI_MSM_ICE_TR_DATA_UNIT_8_KB = 4, - SDHCI_MSM_ICE_TR_DATA_UNIT_16_KB = 5, - SDHCI_MSM_ICE_TR_DATA_UNIT_32_KB = 6, - SDHCI_MSM_ICE_TR_DATA_UNIT_64_KB = 7, -}; - -/* SDHCI MSM ICE internal state */ -enum { - SDHCI_MSM_ICE_STATE_DISABLED = 0, - SDHCI_MSM_ICE_STATE_ACTIVE = 1, - SDHCI_MSM_ICE_STATE_SUSPENDED = 2, -}; - -/* crypto context fields in cmdq data command task descriptor */ -#define DATA_UNIT_NUM(x) (((u64)(x) & 0xFFFFFFFF) << 0) -#define CRYPTO_CONFIG_INDEX(x) (((u64)(x) & 0xFF) << 32) -#define CRYPTO_ENABLE(x) (((u64)(x) & 0x1) << 47) - -#ifdef CONFIG_MMC_SDHCI_MSM_ICE -int sdhci_msm_ice_get_dev(struct sdhci_host *host); -int sdhci_msm_ice_init(struct sdhci_host *host); -void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot); -int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq, - u32 slot); -int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host, - struct mmc_request *mrq, u32 slot, u64 *ice_ctx); -int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq); -int sdhci_msm_ice_reset(struct sdhci_host *host); -int sdhci_msm_ice_resume(struct sdhci_host *host); -int sdhci_msm_ice_suspend(struct sdhci_host *host); -int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status); -void sdhci_msm_ice_print_regs(struct sdhci_host *host); -#else -inline int sdhci_msm_ice_get_dev(struct sdhci_host *host) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - - if (msm_host) { - msm_host->ice.pdev = NULL; - msm_host->ice.vops = NULL; - } - return -ENODEV; -} -inline int sdhci_msm_ice_init(struct sdhci_host *host) -{ - return 0; -} - -inline void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot) -{ -} - -inline int sdhci_msm_ice_cfg(struct sdhci_host *host, - struct mmc_request *mrq, u32 slot) -{ - return 0; -} -static inline int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host, - struct mmc_request *mrq, u32 slot, u64 *ice_ctx) -{ - return 0; -} -static inline int sdhci_msm_ice_cfg_end(struct sdhci_host *host, - struct mmc_request *mrq) -{ - return 0; -} -inline int sdhci_msm_ice_reset(struct sdhci_host *host) -{ - return 0; -} -inline int sdhci_msm_ice_resume(struct sdhci_host *host) -{ - return 0; -} -inline int sdhci_msm_ice_suspend(struct sdhci_host *host) -{ - return 0; -} -inline int sdhci_msm_ice_get_status(struct sdhci_host *host, - int *ice_status) -{ - return 0; -} -inline void sdhci_msm_ice_print_regs(struct sdhci_host *host) -{ -} -#endif /* CONFIG_MMC_SDHCI_MSM_ICE */ -#endif /* __SDHCI_MSM_ICE_H__ */ diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index a6bd35e37a31..ed15b5bc8018 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -42,7 +42,6 @@ #include #include "sdhci-msm.h" -#include "sdhci-msm-ice.h" #include "cmdq_hci.h" #define QOS_REMOVE_DELAY_MS 10 @@ -2055,26 +2054,20 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, } } - if (msm_host->ice.pdev) { - if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates", - &ice_clk_table, &ice_clk_table_len, 0)) { - dev_err(dev, "failed parsing supported ice clock rates\n"); - goto out; - } - if (!ice_clk_table || !ice_clk_table_len) { - dev_err(dev, "Invalid clock table\n"); - goto out; - } - if (ice_clk_table_len != 2) { - dev_err(dev, "Need max and min frequencies in the table\n"); - goto out; - } - pdata->sup_ice_clk_table = ice_clk_table; - pdata->sup_ice_clk_cnt = ice_clk_table_len; - pdata->ice_clk_max = pdata->sup_ice_clk_table[0]; - pdata->ice_clk_min = pdata->sup_ice_clk_table[1]; - dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n", + if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates", + &ice_clk_table, &ice_clk_table_len, 0)) { + if (ice_clk_table && ice_clk_table_len) { + if (ice_clk_table_len != 2) { + dev_err(dev, "Need max and min frequencies\n"); + goto out; + } + pdata->sup_ice_clk_table = ice_clk_table; + pdata->sup_ice_clk_cnt = ice_clk_table_len; + pdata->ice_clk_max = pdata->sup_ice_clk_table[0]; + pdata->ice_clk_min = pdata->sup_ice_clk_table[1]; + dev_dbg(dev, "ICE clock rates (Hz): max: %u min: %u\n", pdata->ice_clk_max, pdata->ice_clk_min); + } } pdata->vreg_data = devm_kzalloc(dev, sizeof(struct @@ -3782,7 +3775,6 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) int i, index = 0; u32 test_bus_val = 0; u32 debug_reg[MAX_TEST_BUS] = {0}; - u32 sts = 0; sdhci_msm_cache_debug_data(host); pr_info("----------- VENDOR REGISTER DUMP -----------\n"); @@ -3855,28 +3847,10 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, i + 3, debug_reg[i], debug_reg[i+1], debug_reg[i+2], debug_reg[i+3]); - if (host->is_crypto_en) { - sdhci_msm_ice_get_status(host, &sts); - pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts); - sdhci_msm_ice_print_regs(host); - } } static void sdhci_msm_reset(struct sdhci_host *host, u8 mask) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - - /* Set ICE core to be reset in sync with SDHC core */ - if (msm_host->ice.pdev) { - if (msm_host->ice_hci_support) - writel_relaxed(1, host->ioaddr + - HC_VENDOR_SPECIFIC_ICE_CTRL); - else - writel_relaxed(1, - host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL); - } - sdhci_reset(host, mask); } @@ -4516,11 +4490,6 @@ static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state) } static struct sdhci_ops sdhci_msm_ops = { - .crypto_engine_cfg = sdhci_msm_ice_cfg, - .crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg, - .crypto_engine_cfg_end = sdhci_msm_ice_cfg_end, - .crypto_cfg_reset = sdhci_msm_ice_cfg_reset, - .crypto_engine_reset = sdhci_msm_ice_reset, .set_uhs_signaling = sdhci_msm_set_uhs_signaling, .check_power_status = sdhci_msm_check_power_status, .platform_execute_tuning = sdhci_msm_execute_tuning, @@ -4646,7 +4615,6 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host, msm_host->caps_0 = caps; if ((major == 1) && (minor >= 0x6b)) { - msm_host->ice_hci_support = true; host->cdr_support = true; } @@ -4750,31 +4718,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_host->mmc = host->mmc; msm_host->pdev = pdev; - /* get the ice device vops if present */ - ret = sdhci_msm_ice_get_dev(host); - if (ret == -EPROBE_DEFER) { - /* - * SDHCI driver might be probed before ICE driver does. - * In that case we would like to return EPROBE_DEFER code - * in order to delay its probing. - */ - dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n", - __func__, ret); - goto pltfm_free; - - } else if (ret == -ENODEV) { - /* - * ICE device is not enabled in DTS file. No need for further - * initialization of ICE driver. - */ - dev_warn(&pdev->dev, "%s: ICE device is not enabled", - __func__); - } else if (ret) { - dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n", - __func__, ret); - goto pltfm_free; - } - /* Extract platform data */ if (pdev->dev.of_node) { ret = of_alias_get_id(pdev->dev.of_node, "sdhc"); @@ -4849,26 +4792,24 @@ static int sdhci_msm_probe(struct platform_device *pdev) } } - if (msm_host->ice.pdev) { - /* Setup SDC ICE clock */ - msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk"); - if (!IS_ERR(msm_host->ice_clk)) { - /* ICE core has only one clock frequency for now */ - ret = clk_set_rate(msm_host->ice_clk, - msm_host->pdata->ice_clk_max); - if (ret) { - dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n", - ret, - msm_host->pdata->ice_clk_max); - goto bus_aggr_clk_disable; - } - ret = clk_prepare_enable(msm_host->ice_clk); - if (ret) - goto bus_aggr_clk_disable; - - msm_host->ice_clk_rate = - msm_host->pdata->ice_clk_max; + /* Setup SDC ICE clock */ + msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk"); + if (!IS_ERR(msm_host->ice_clk)) { + /* ICE core has only one clock frequency for now */ + ret = clk_set_rate(msm_host->ice_clk, + msm_host->pdata->ice_clk_max); + if (ret) { + dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n", + ret, + msm_host->pdata->ice_clk_max); + goto bus_aggr_clk_disable; } + ret = clk_prepare_enable(msm_host->ice_clk); + if (ret) + goto bus_aggr_clk_disable; + + msm_host->ice_clk_rate = + msm_host->pdata->ice_clk_max; } /* Setup SDC MMC clock */ @@ -5117,22 +5058,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa; - /* Initialize ICE if present */ - if (msm_host->ice.pdev) { - ret = sdhci_msm_ice_init(host); - if (ret) { - dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n", - mmc_hostname(host->mmc), ret); - ret = -EINVAL; - goto vreg_deinit; - } - host->is_crypto_en = true; - msm_host->mmc->inlinecrypt_support = true; - /* Packed commands cannot be encrypted/decrypted using ICE */ - msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR | - MMC_CAP2_PACKED_WR_CONTROL); - } - init_completion(&msm_host->pwr_irq_completion); if (gpio_is_valid(msm_host->pdata->status_gpio)) { @@ -5413,7 +5338,6 @@ static int sdhci_msm_runtime_suspend(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = pltfm_host->priv; ktime_t start = ktime_get(); - int ret; if (host->mmc->card && mmc_card_sdio(host->mmc->card)) goto defer_disable_host_irq; @@ -5433,12 +5357,6 @@ defer_disable_host_irq: sdhci_msm_bus_cancel_work_and_set_vote(host, 0); } - if (host->is_crypto_en) { - ret = sdhci_msm_ice_suspend(host); - if (ret < 0) - pr_err("%s: failed to suspend crypto engine %d\n", - mmc_hostname(host->mmc), ret); - } trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0, ktime_to_us(ktime_sub(ktime_get(), start))); return 0; @@ -5450,21 +5368,6 @@ static int sdhci_msm_runtime_resume(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = pltfm_host->priv; ktime_t start = ktime_get(); - int ret; - - if (host->is_crypto_en) { - ret = sdhci_msm_enable_controller_clock(host); - if (ret) { - pr_err("%s: Failed to enable reqd clocks\n", - mmc_hostname(host->mmc)); - goto skip_ice_resume; - } - ret = sdhci_msm_ice_resume(host); - if (ret) - pr_err("%s: failed to resume crypto engine %d\n", - mmc_hostname(host->mmc), ret); - } -skip_ice_resume: if (host->mmc->card && mmc_card_sdio(host->mmc->card)) goto defer_enable_host_irq; diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h index 66e48622498b..8a52f8e9e201 100644 --- a/drivers/mmc/host/sdhci-msm.h +++ b/drivers/mmc/host/sdhci-msm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -171,12 +171,6 @@ struct sdhci_msm_bus_vote { struct device_attribute max_bus_bw; }; -struct sdhci_msm_ice_data { - struct qcom_ice_variant_ops *vops; - struct platform_device *pdev; - int state; -}; - struct sdhci_msm_regs_restore { bool is_supported; bool is_valid; @@ -221,8 +215,6 @@ struct sdhci_msm_debug_data { struct sdhci_msm_host { struct platform_device *pdev; void __iomem *core_mem; /* MSM SDCC mapped address */ - void __iomem *cryptoio; /* ICE HCI mapped address */ - bool ice_hci_support; int pwr_irq; /* power irq */ struct clk *clk; /* main SD/MMC bus clock */ struct clk *pclk; /* SDHC peripheral bus clock */ @@ -256,7 +248,6 @@ struct sdhci_msm_host { bool enhanced_strobe; bool rclk_delay_fix; u32 caps_0; - struct sdhci_msm_ice_data ice; u32 ice_clk_rate; struct sdhci_msm_pm_qos_group *pm_qos; int pm_qos_prev_cpu; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 90ff537636b1..efd37a9d94f7 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1834,50 +1834,6 @@ static int sdhci_get_tuning_cmd(struct sdhci_host *host) return MMC_SEND_TUNING_BLOCK; } -static int sdhci_crypto_cfg(struct sdhci_host *host, struct mmc_request *mrq, - u32 slot) -{ - int err = 0; - - if (host->mmc->inlinecrypt_reset_needed && - host->ops->crypto_engine_reset) { - err = host->ops->crypto_engine_reset(host); - if (err) { - pr_err("%s: crypto reset failed\n", - mmc_hostname(host->mmc)); - goto out; - } - host->mmc->inlinecrypt_reset_needed = false; - } - - if (host->ops->crypto_engine_cfg) { - err = host->ops->crypto_engine_cfg(host, mrq, slot); - if (err) { - pr_err("%s: failed to configure crypto\n", - mmc_hostname(host->mmc)); - goto out; - } - } -out: - return err; -} - -static int sdhci_crypto_cfg_end(struct sdhci_host *host, - struct mmc_request *mrq) -{ - int err = 0; - - if (host->ops->crypto_engine_cfg_end) { - err = host->ops->crypto_engine_cfg_end(host, mrq); - if (err) { - pr_err("%s: failed to configure crypto\n", - mmc_hostname(host->mmc)); - return err; - } - } - return 0; -} - static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct sdhci_host *host; @@ -1944,13 +1900,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_get_tuning_cmd(host)); } - if (host->is_crypto_en) { - spin_unlock_irqrestore(&host->lock, flags); - if (sdhci_crypto_cfg(host, mrq, 0)) - goto end_req; - spin_lock_irqsave(&host->lock, flags); - } - if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) sdhci_send_command(host, mrq->sbc); else @@ -1960,11 +1909,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) mmiowb(); spin_unlock_irqrestore(&host->lock, flags); return; -end_req: - mrq->cmd->error = -EIO; - if (mrq->data) - mrq->data->error = -EIO; - mmc_request_done(host->mmc, mrq); } void sdhci_set_bus_width(struct sdhci_host *host, int width) @@ -3009,7 +2953,6 @@ static bool sdhci_request_done(struct sdhci_host *host) mmiowb(); spin_unlock_irqrestore(&host->lock, flags); - sdhci_crypto_cfg_end(host, mrq); mmc_request_done(host->mmc, mrq); return false; @@ -4087,59 +4030,6 @@ static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc) SDHCI_INT_RESPONSE, SDHCI_INT_ENABLE); sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); } -static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc, - struct mmc_request *mrq, u32 slot, u64 *ice_ctx) -{ - struct sdhci_host *host = mmc_priv(mmc); - int err = 0; - - if (!host->is_crypto_en) - return 0; - - if (mmc->inlinecrypt_reset_needed && host->ops->crypto_engine_reset) { - err = host->ops->crypto_engine_reset(host); - if (err) { - pr_err("%s: crypto reset failed\n", - mmc_hostname(host->mmc)); - goto out; - } - mmc->inlinecrypt_reset_needed = false; - } - - if (host->ops->crypto_engine_cmdq_cfg) { - err = host->ops->crypto_engine_cmdq_cfg(host, mrq, - slot, ice_ctx); - if (err) { - pr_err("%s: failed to configure crypto\n", - mmc_hostname(host->mmc)); - goto out; - } - } -out: - return err; -} - -static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc, - struct mmc_request *mrq) -{ - struct sdhci_host *host = mmc_priv(mmc); - - if (!host->is_crypto_en) - return 0; - - return sdhci_crypto_cfg_end(host, mrq); -} - -static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot) -{ - struct sdhci_host *host = mmc_priv(mmc); - - if (!host->is_crypto_en) - return; - - if (host->ops->crypto_cfg_reset) - host->ops->crypto_cfg_reset(host, slot); -} #else static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc) { @@ -4184,23 +4074,6 @@ static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set) static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc) { -} - -static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc, - struct mmc_request *mrq, u32 slot, u64 *ice_ctx) -{ - return 0; -} - -static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc, - struct mmc_request *mrq) -{ - return 0; -} - -static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot) -{ - } #endif @@ -4213,9 +4086,6 @@ static const struct cmdq_host_ops sdhci_cmdq_ops = { .enhanced_strobe_mask = sdhci_enhanced_strobe_mask, .post_cqe_halt = sdhci_cmdq_post_cqe_halt, .set_transfer_params = sdhci_cmdq_set_transfer_params, - .crypto_cfg = sdhci_cmdq_crypto_cfg, - .crypto_cfg_end = sdhci_cmdq_crypto_cfg_end, - .crypto_cfg_reset = sdhci_cmdq_crypto_cfg_reset, }; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index f2cf328764d9..47f05e16d685 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -653,7 +653,6 @@ struct sdhci_host { enum sdhci_power_policy power_policy; bool sdio_irq_async_status; - bool is_crypto_en; u32 auto_cmd_err_sts; struct ratelimit_state dbg_dump_rs; @@ -695,14 +694,6 @@ struct sdhci_ops { unsigned int (*get_ro)(struct sdhci_host *host); void (*reset)(struct sdhci_host *host, u8 mask); int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode); - int (*crypto_engine_cfg)(struct sdhci_host *host, - struct mmc_request *mrq, u32 slot); - int (*crypto_engine_cmdq_cfg)(struct sdhci_host *host, - struct mmc_request *mrq, u32 slot, u64 *ice_ctx); - int (*crypto_engine_cfg_end)(struct sdhci_host *host, - struct mmc_request *mrq); - int (*crypto_engine_reset)(struct sdhci_host *host); - void (*crypto_cfg_reset)(struct sdhci_host *host, unsigned int slot); void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); void (*hw_reset)(struct sdhci_host *host); void (*adma_workaround)(struct sdhci_host *host, u32 intmask); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index e29a46abfd4f..2adab19052d0 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2172,8 +2172,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) if (!shost->use_clustering) q->limits.cluster = 0; - if (shost->inlinecrypt_support) - queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q); /* * Set a reasonable default alignment: The larger of 32-byte (dword), * which is a common minimum for HBAs, and the minimum DMA alignment, diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index a3c906c78a69..8d4ef369aa15 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -101,19 +101,6 @@ config SCSI_UFS_QCOM Select this if you have UFS controller on QCOM chipset. If unsure, say N. -config SCSI_UFS_QCOM_ICE - bool "QCOM specific hooks to Inline Crypto Engine for UFS driver" - depends on SCSI_UFS_QCOM && CRYPTO_DEV_QCOM_ICE - help - This selects the QCOM specific additions to support Inline Crypto - Engine (ICE). - ICE accelerates the crypto operations and maintains the high UFS - performance. - - Select this if you have ICE supported for UFS on QCOM chipset. - If unsure, say N. - - config SCSI_UFS_TEST tristate "Universal Flash Storage host controller driver unit-tests" depends on SCSI_UFSHCD && IOSCHED_TEST diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 935b34a2fa0b..bf374ee1f6e2 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -3,7 +3,6 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o -obj-$(CONFIG_SCSI_UFS_QCOM_ICE) += ufs-qcom-ice.o obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c deleted file mode 100644 index 8bb1f54455d1..000000000000 --- a/drivers/scsi/ufs/ufs-qcom-ice.c +++ /dev/null @@ -1,777 +0,0 @@ -/* - * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include - -#include "ufshcd.h" -#include "ufs-qcom-ice.h" -#include "ufs-qcom-debugfs.h" - -#define UFS_QCOM_CRYPTO_LABEL "ufs-qcom-crypto" -/* Timeout waiting for ICE initialization, that requires TZ access */ -#define UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS 500 - -#define UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN 0 - -static struct workqueue_struct *ice_workqueue; - -static void ufs_qcom_ice_dump_regs(struct ufs_qcom_host *qcom_host, int offset, - int len, char *prefix) -{ - print_hex_dump(KERN_ERR, prefix, - len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, - 16, 4, qcom_host->hba->mmio_base + offset, len * 4, - false); -} - -void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host) -{ - int i; - - if (!(qcom_host->dbg_print_en & UFS_QCOM_DBG_PRINT_ICE_REGS_EN)) - return; - - ufs_qcom_ice_dump_regs(qcom_host, REG_UFS_QCOM_ICE_CFG, 1, - "REG_UFS_QCOM_ICE_CFG "); - for (i = 0; i < NUM_QCOM_ICE_CTRL_INFO_n_REGS; i++) { - pr_err("REG_UFS_QCOM_ICE_CTRL_INFO_1_%d = 0x%08X\n", i, - ufshcd_readl(qcom_host->hba, - (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * i))); - - pr_err("REG_UFS_QCOM_ICE_CTRL_INFO_2_%d = 0x%08X\n", i, - ufshcd_readl(qcom_host->hba, - (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * i))); - } - - if (qcom_host->ice.pdev && qcom_host->ice.vops && - qcom_host->ice.vops->debug) - qcom_host->ice.vops->debug(qcom_host->ice.pdev); -} - -static void ufs_qcom_ice_error_cb(void *host_ctrl, u32 error) -{ - struct ufs_qcom_host *qcom_host = (struct ufs_qcom_host *)host_ctrl; - - dev_err(qcom_host->hba->dev, "%s: Error in ice operation 0x%x", - __func__, error); - - if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE) - qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED; -} - -static struct platform_device *ufs_qcom_ice_get_pdevice(struct device *ufs_dev) -{ - struct device_node *node; - struct platform_device *ice_pdev = NULL; - - node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0); - - if (!node) { - dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n", - __func__); - goto out; - } - - ice_pdev = qcom_ice_get_pdevice(node); -out: - return ice_pdev; -} - -static -struct qcom_ice_variant_ops *ufs_qcom_ice_get_vops(struct device *ufs_dev) -{ - struct qcom_ice_variant_ops *ice_vops = NULL; - struct device_node *node; - - node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0); - - if (!node) { - dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n", - __func__); - goto out; - } - - ice_vops = qcom_ice_get_variant_ops(node); - - if (!ice_vops) - dev_err(ufs_dev, "%s: invalid ice_vops\n", __func__); - - of_node_put(node); -out: - return ice_vops; -} - -/** - * ufs_qcom_ice_get_dev() - sets pointers to ICE data structs in UFS QCom host - * @qcom_host: Pointer to a UFS QCom internal host structure. - * - * Sets ICE platform device pointer and ICE vops structure - * corresponding to the current UFS device. - * - * Return: -EINVAL in-case of invalid input parameters: - * qcom_host, qcom_host->hba or qcom_host->hba->dev - * -ENODEV in-case ICE device is not required - * -EPROBE_DEFER in-case ICE is required and hasn't been probed yet - * 0 otherwise - */ -int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host) -{ - struct device *ufs_dev; - int err = 0; - - if (!qcom_host || !qcom_host->hba || !qcom_host->hba->dev) { - pr_err("%s: invalid qcom_host %p or qcom_host->hba or qcom_host->hba->dev\n", - __func__, qcom_host); - err = -EINVAL; - goto out; - } - - ufs_dev = qcom_host->hba->dev; - - qcom_host->ice.vops = ufs_qcom_ice_get_vops(ufs_dev); - qcom_host->ice.pdev = ufs_qcom_ice_get_pdevice(ufs_dev); - - if (qcom_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) { - dev_err(ufs_dev, "%s: ICE device not probed yet\n", - __func__); - qcom_host->ice.pdev = NULL; - qcom_host->ice.vops = NULL; - err = -EPROBE_DEFER; - goto out; - } - - if (!qcom_host->ice.pdev || !qcom_host->ice.vops) { - dev_err(ufs_dev, "%s: invalid platform device %p or vops %p\n", - __func__, qcom_host->ice.pdev, qcom_host->ice.vops); - qcom_host->ice.pdev = NULL; - qcom_host->ice.vops = NULL; - err = -ENODEV; - goto out; - } - - qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED; - -out: - return err; -} - -static void ufs_qcom_ice_cfg_work(struct work_struct *work) -{ - unsigned long flags; - struct ufs_qcom_host *qcom_host = - container_of(work, struct ufs_qcom_host, ice_cfg_work); - - if (!qcom_host->ice.vops->config_start) - return; - - spin_lock_irqsave(&qcom_host->ice_work_lock, flags); - if (!qcom_host->req_pending || - ufshcd_is_shutdown_ongoing(qcom_host->hba)) { - qcom_host->work_pending = false; - spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags); - return; - } - spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags); - - /* - * config_start is called again as previous attempt returned -EAGAIN, - * this call shall now take care of the necessary key setup. - */ - qcom_host->ice.vops->config_start(qcom_host->ice.pdev, - qcom_host->req_pending, NULL, false); - - spin_lock_irqsave(&qcom_host->ice_work_lock, flags); - qcom_host->req_pending = NULL; - qcom_host->work_pending = false; - spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags); -} - -/** - * ufs_qcom_ice_init() - initializes the ICE-UFS interface and ICE device - * @qcom_host: Pointer to a UFS QCom internal host structure. - * qcom_host, qcom_host->hba and qcom_host->hba->dev should all - * be valid pointers. - * - * Return: -EINVAL in-case of an error - * 0 otherwise - */ -int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host) -{ - struct device *ufs_dev = qcom_host->hba->dev; - int err; - - err = qcom_host->ice.vops->init(qcom_host->ice.pdev, - qcom_host, - ufs_qcom_ice_error_cb); - if (err) { - dev_err(ufs_dev, "%s: ice init failed. err = %d\n", - __func__, err); - goto out; - } else { - qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE; - } - - qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN; - if (!ice_workqueue) { - ice_workqueue = alloc_workqueue("ice-set-key", - WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0); - if (!ice_workqueue) { - dev_err(ufs_dev, "%s: workqueue allocation failed.\n", - __func__); - err = -ENOMEM; - goto out; - } - INIT_WORK(&qcom_host->ice_cfg_work, ufs_qcom_ice_cfg_work); - } - -out: - return err; -} - -static inline bool ufs_qcom_is_data_cmd(char cmd_op, bool is_write) -{ - if (is_write) { - if (cmd_op == WRITE_6 || cmd_op == WRITE_10 || - cmd_op == WRITE_16) - return true; - } else { - if (cmd_op == READ_6 || cmd_op == READ_10 || - cmd_op == READ_16) - return true; - } - - return false; -} - -int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host, - struct scsi_cmnd *cmd, u8 *cc_index, bool *enable) -{ - struct ice_data_setting ice_set; - char cmd_op = cmd->cmnd[0]; - int err; - unsigned long flags; - - if (!qcom_host->ice.pdev || !qcom_host->ice.vops) { - dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n", - __func__); - return 0; - } - - if (qcom_host->ice.vops->config_start) { - memset(&ice_set, 0, sizeof(ice_set)); - - spin_lock_irqsave( - &qcom_host->ice_work_lock, flags); - - err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev, - cmd->request, &ice_set, true); - if (err) { - /* - * config_start() returns -EAGAIN when a key slot is - * available but still not configured. As configuration - * requires a non-atomic context, this means we should - * call the function again from the worker thread to do - * the configuration. For this request the error will - * propagate so it will be re-queued. - */ - if (err == -EAGAIN) { - if (!ice_workqueue) { - spin_unlock_irqrestore( - &qcom_host->ice_work_lock, - flags); - - dev_err(qcom_host->hba->dev, - "%s: error %d workqueue NULL\n", - __func__, err); - return -EINVAL; - } - - dev_dbg(qcom_host->hba->dev, - "%s: scheduling task for ice setup\n", - __func__); - - if (!qcom_host->work_pending) { - qcom_host->req_pending = cmd->request; - - if (!queue_work(ice_workqueue, - &qcom_host->ice_cfg_work)) { - qcom_host->req_pending = NULL; - - spin_unlock_irqrestore( - &qcom_host->ice_work_lock, - flags); - - return err; - } - qcom_host->work_pending = true; - } - - } else { - if (err != -EBUSY) - dev_err(qcom_host->hba->dev, - "%s: error in ice_vops->config %d\n", - __func__, err); - } - - spin_unlock_irqrestore(&qcom_host->ice_work_lock, - flags); - - return err; - } - - spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags); - - if (ufs_qcom_is_data_cmd(cmd_op, true)) - *enable = !ice_set.encr_bypass; - else if (ufs_qcom_is_data_cmd(cmd_op, false)) - *enable = !ice_set.decr_bypass; - - if (ice_set.crypto_data.key_index >= 0) - *cc_index = (u8)ice_set.crypto_data.key_index; - } - return 0; -} - -/** - * ufs_qcom_ice_cfg_start() - starts configuring UFS's ICE registers - * for an ICE transaction - * @qcom_host: Pointer to a UFS QCom internal host structure. - * qcom_host, qcom_host->hba and qcom_host->hba->dev should all - * be valid pointers. - * @cmd: Pointer to a valid scsi command. cmd->request should also be - * a valid pointer. - * - * Return: -EINVAL in-case of an error - * 0 otherwise - */ -int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host, - struct scsi_cmnd *cmd) -{ - struct device *dev = qcom_host->hba->dev; - int err = 0; - struct ice_data_setting ice_set; - unsigned int slot = 0; - sector_t lba = 0; - unsigned int ctrl_info_val = 0; - unsigned int bypass = 0; - struct request *req; - char cmd_op; - unsigned long flags; - - if (!qcom_host->ice.pdev || !qcom_host->ice.vops) { - dev_dbg(dev, "%s: ice device is not enabled\n", __func__); - goto out; - } - - if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) { - dev_err(dev, "%s: ice state (%d) is not active\n", - __func__, qcom_host->ice.state); - return -EINVAL; - } - - if (qcom_host->hw_ver.major >= 0x3) { - /* - * ICE 3.0 crypto sequences were changed, - * CTRL_INFO register no longer exists - * and doesn't need to be configured. - * The configuration is done via utrd. - */ - return 0; - } - - req = cmd->request; - if (req->bio) - lba = (req->bio->bi_iter.bi_sector) >> - UFS_QCOM_ICE_TR_DATA_UNIT_4_KB; - - slot = req->tag; - if (slot < 0 || slot > qcom_host->hba->nutrs) { - dev_err(dev, "%s: slot (%d) is out of boundaries (0...%d)\n", - __func__, slot, qcom_host->hba->nutrs); - return -EINVAL; - } - - - memset(&ice_set, 0, sizeof(ice_set)); - if (qcom_host->ice.vops->config_start) { - - spin_lock_irqsave( - &qcom_host->ice_work_lock, flags); - - err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev, - req, &ice_set, true); - if (err) { - /* - * config_start() returns -EAGAIN when a key slot is - * available but still not configured. As configuration - * requires a non-atomic context, this means we should - * call the function again from the worker thread to do - * the configuration. For this request the error will - * propagate so it will be re-queued. - */ - if (err == -EAGAIN) { - if (!ice_workqueue) { - spin_unlock_irqrestore( - &qcom_host->ice_work_lock, - flags); - - dev_err(qcom_host->hba->dev, - "%s: error %d workqueue NULL\n", - __func__, err); - return -EINVAL; - } - - dev_dbg(qcom_host->hba->dev, - "%s: scheduling task for ice setup\n", - __func__); - - if (!qcom_host->work_pending) { - - qcom_host->req_pending = cmd->request; - if (!queue_work(ice_workqueue, - &qcom_host->ice_cfg_work)) { - qcom_host->req_pending = NULL; - - spin_unlock_irqrestore( - &qcom_host->ice_work_lock, - flags); - - return err; - } - qcom_host->work_pending = true; - } - - } else { - if (err != -EBUSY) - dev_err(qcom_host->hba->dev, - "%s: error in ice_vops->config %d\n", - __func__, err); - } - - spin_unlock_irqrestore( - &qcom_host->ice_work_lock, flags); - - return err; - } - - spin_unlock_irqrestore( - &qcom_host->ice_work_lock, flags); - } - - cmd_op = cmd->cmnd[0]; - -#define UFS_QCOM_DIR_WRITE true -#define UFS_QCOM_DIR_READ false - /* if non data command, bypass shall be enabled */ - if (!ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE) && - !ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ)) - bypass = UFS_QCOM_ICE_ENABLE_BYPASS; - /* if writing data command */ - else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE)) - bypass = ice_set.encr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS : - UFS_QCOM_ICE_DISABLE_BYPASS; - /* if reading data command */ - else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ)) - bypass = ice_set.decr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS : - UFS_QCOM_ICE_DISABLE_BYPASS; - - - /* Configure ICE index */ - ctrl_info_val = - (ice_set.crypto_data.key_index & - MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX) - << OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX; - - /* Configure data unit size of transfer request */ - ctrl_info_val |= - UFS_QCOM_ICE_TR_DATA_UNIT_4_KB - << OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU; - - /* Configure ICE bypass mode */ - ctrl_info_val |= - (bypass & MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS) - << OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS; - - if (qcom_host->hw_ver.major == 0x1) { - ufshcd_writel(qcom_host->hba, lba, - (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * slot)); - - ufshcd_writel(qcom_host->hba, ctrl_info_val, - (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * slot)); - } - if (qcom_host->hw_ver.major == 0x2) { - ufshcd_writel(qcom_host->hba, (lba & 0xFFFFFFFF), - (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 16 * slot)); - - ufshcd_writel(qcom_host->hba, ((lba >> 32) & 0xFFFFFFFF), - (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 16 * slot)); - - ufshcd_writel(qcom_host->hba, ctrl_info_val, - (REG_UFS_QCOM_ICE_CTRL_INFO_3_n + 16 * slot)); - } - - /* - * Ensure UFS-ICE registers are being configured - * before next operation, otherwise UFS Host Controller might - * set get errors - */ - mb(); -out: - return err; -} - -/** - * ufs_qcom_ice_cfg_end() - finishes configuring UFS's ICE registers - * for an ICE transaction - * @qcom_host: Pointer to a UFS QCom internal host structure. - * qcom_host, qcom_host->hba and - * qcom_host->hba->dev should all - * be valid pointers. - * @cmd: Pointer to a valid scsi command. cmd->request should also be - * a valid pointer. - * - * Return: -EINVAL in-case of an error - * 0 otherwise - */ -int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, struct request *req) -{ - int err = 0; - struct device *dev = qcom_host->hba->dev; - - if (qcom_host->ice.vops->config_end) { - err = qcom_host->ice.vops->config_end(req); - if (err) { - dev_err(dev, "%s: error in ice_vops->config_end %d\n", - __func__, err); - return err; - } - } - - return 0; -} - -/** - * ufs_qcom_ice_reset() - resets UFS-ICE interface and ICE device - * @qcom_host: Pointer to a UFS QCom internal host structure. - * qcom_host, qcom_host->hba and qcom_host->hba->dev should all - * be valid pointers. - * - * Return: -EINVAL in-case of an error - * 0 otherwise - */ -int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host) -{ - struct device *dev = qcom_host->hba->dev; - int err = 0; - - if (!qcom_host->ice.pdev) { - dev_dbg(dev, "%s: ice device is not enabled\n", __func__); - goto out; - } - - if (!qcom_host->ice.vops) { - dev_err(dev, "%s: invalid ice_vops\n", __func__); - return -EINVAL; - } - - if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) - goto out; - - if (qcom_host->ice.vops->reset) { - err = qcom_host->ice.vops->reset(qcom_host->ice.pdev); - if (err) { - dev_err(dev, "%s: ice_vops->reset failed. err %d\n", - __func__, err); - goto out; - } - } - - if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) { - dev_err(qcom_host->hba->dev, - "%s: error. ice.state (%d) is not in active state\n", - __func__, qcom_host->ice.state); - err = -EINVAL; - } - -out: - return err; -} - - -/** - * ufs_qcom_ice_resume() - resumes UFS-ICE interface and ICE device from power - * collapse - * @qcom_host: Pointer to a UFS QCom internal host structure. - * qcom_host, qcom_host->hba and qcom_host->hba->dev should all - * be valid pointers. - * - * Return: -EINVAL in-case of an error - * 0 otherwise - */ -int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host) -{ - struct device *dev = qcom_host->hba->dev; - int err = 0; - - if (!qcom_host->ice.pdev) { - dev_dbg(dev, "%s: ice device is not enabled\n", __func__); - goto out; - } - - if (qcom_host->ice.state != - UFS_QCOM_ICE_STATE_SUSPENDED) { - goto out; - } - - if (!qcom_host->ice.vops) { - dev_err(dev, "%s: invalid ice_vops\n", __func__); - return -EINVAL; - } - - if (qcom_host->ice.vops->resume) { - err = qcom_host->ice.vops->resume(qcom_host->ice.pdev); - if (err) { - dev_err(dev, "%s: ice_vops->resume failed. err %d\n", - __func__, err); - return err; - } - } - qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE; -out: - return err; -} - -/** - * ufs_qcom_is_ice_busy() - lets the caller of the function know if - * there is any ongoing operation in ICE in workqueue context. - * @qcom_host: Pointer to a UFS QCom internal host structure. - * qcom_host should be a valid pointer. - * - * Return: 1 if ICE is busy, 0 if it is free. - * -EINVAL in case of error. - */ -int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host) -{ - if (!qcom_host) { - pr_err("%s: invalid qcom_host %pK", __func__, qcom_host); - return -EINVAL; - } - - if (qcom_host->req_pending) - return 1; - else - return 0; -} - -/** - * ufs_qcom_ice_suspend() - suspends UFS-ICE interface and ICE device - * @qcom_host: Pointer to a UFS QCom internal host structure. - * qcom_host, qcom_host->hba and qcom_host->hba->dev should all - * be valid pointers. - * - * Return: -EINVAL in-case of an error - * 0 otherwise - */ -int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host) -{ - struct device *dev = qcom_host->hba->dev; - int err = 0; - - if (!qcom_host->ice.pdev) { - dev_dbg(dev, "%s: ice device is not enabled\n", __func__); - goto out; - } - - if (qcom_host->ice.vops->suspend) { - err = qcom_host->ice.vops->suspend(qcom_host->ice.pdev); - if (err) { - dev_err(qcom_host->hba->dev, - "%s: ice_vops->suspend failed. err %d\n", - __func__, err); - return -EINVAL; - } - } - - if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE) { - qcom_host->ice.state = UFS_QCOM_ICE_STATE_SUSPENDED; - } else if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_DISABLED) { - dev_err(qcom_host->hba->dev, - "%s: ice state is invalid: disabled\n", - __func__); - err = -EINVAL; - } - -out: - return err; -} - -/** - * ufs_qcom_ice_get_status() - returns the status of an ICE transaction - * @qcom_host: Pointer to a UFS QCom internal host structure. - * qcom_host, qcom_host->hba and qcom_host->hba->dev should all - * be valid pointers. - * @ice_status: Pointer to a valid output parameter. - * < 0 in case of ICE transaction failure. - * 0 otherwise. - * - * Return: -EINVAL in-case of an error - * 0 otherwise - */ -int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status) -{ - struct device *dev = NULL; - int err = 0; - int stat = -EINVAL; - - *ice_status = 0; - - dev = qcom_host->hba->dev; - if (!dev) { - err = -EINVAL; - goto out; - } - - if (!qcom_host->ice.pdev) { - dev_dbg(dev, "%s: ice device is not enabled\n", __func__); - goto out; - } - - if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) { - err = -EINVAL; - goto out; - } - - if (!qcom_host->ice.vops) { - dev_err(dev, "%s: invalid ice_vops\n", __func__); - return -EINVAL; - } - - if (qcom_host->ice.vops->status) { - stat = qcom_host->ice.vops->status(qcom_host->ice.pdev); - if (stat < 0) { - dev_err(dev, "%s: ice_vops->status failed. stat %d\n", - __func__, stat); - err = -EINVAL; - goto out; - } - - *ice_status = stat; - } - -out: - return err; -} diff --git a/drivers/scsi/ufs/ufs-qcom-ice.h b/drivers/scsi/ufs/ufs-qcom-ice.h deleted file mode 100644 index 88ffeb35f9f3..000000000000 --- a/drivers/scsi/ufs/ufs-qcom-ice.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef _UFS_QCOM_ICE_H_ -#define _UFS_QCOM_ICE_H_ - -#include - -#include "ufs-qcom.h" - -/* - * UFS host controller ICE registers. There are n [0..31] - * of each of these registers - */ -enum { - REG_UFS_QCOM_ICE_CFG = 0x2200, - REG_UFS_QCOM_ICE_CTRL_INFO_1_n = 0x2204, - REG_UFS_QCOM_ICE_CTRL_INFO_2_n = 0x2208, - REG_UFS_QCOM_ICE_CTRL_INFO_3_n = 0x220C, -}; -#define NUM_QCOM_ICE_CTRL_INFO_n_REGS 32 - -/* UFS QCOM ICE CTRL Info register offset */ -enum { - OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS = 0, - OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX = 0x1, - OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU = 0x6, -}; - -/* UFS QCOM ICE CTRL Info register masks */ -enum { - MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS = 0x1, - MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX = 0x1F, - MASK_UFS_QCOM_ICE_CTRL_INFO_CDU = 0x8, -}; - -/* UFS QCOM ICE encryption/decryption bypass state */ -enum { - UFS_QCOM_ICE_DISABLE_BYPASS = 0, - UFS_QCOM_ICE_ENABLE_BYPASS = 1, -}; - -/* UFS QCOM ICE Crypto Data Unit of target DUN of Transfer Request */ -enum { - UFS_QCOM_ICE_TR_DATA_UNIT_512_B = 0, - UFS_QCOM_ICE_TR_DATA_UNIT_1_KB = 1, - UFS_QCOM_ICE_TR_DATA_UNIT_2_KB = 2, - UFS_QCOM_ICE_TR_DATA_UNIT_4_KB = 3, - UFS_QCOM_ICE_TR_DATA_UNIT_8_KB = 4, - UFS_QCOM_ICE_TR_DATA_UNIT_16_KB = 5, - UFS_QCOM_ICE_TR_DATA_UNIT_32_KB = 6, -}; - -/* UFS QCOM ICE internal state */ -enum { - UFS_QCOM_ICE_STATE_DISABLED = 0, - UFS_QCOM_ICE_STATE_ACTIVE = 1, - UFS_QCOM_ICE_STATE_SUSPENDED = 2, -}; - -#ifdef CONFIG_SCSI_UFS_QCOM_ICE -int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host); -int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host); -int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host, - struct scsi_cmnd *cmd, u8 *cc_index, bool *enable); -int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host, - struct scsi_cmnd *cmd); -int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, - struct request *req); -int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host); -int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host); -int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host); -int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status); -void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host); -int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host); -#else -inline int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host) -{ - if (qcom_host) { - qcom_host->ice.pdev = NULL; - qcom_host->ice.vops = NULL; - } - return -ENODEV; -} -inline int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host) -{ - return 0; -} -inline int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host, - struct scsi_cmnd *cmd) -{ - return 0; -} -inline int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, - struct request *req) -{ - return 0; -} -inline int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host) -{ - return 0; -} -inline int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host) -{ - return 0; -} -inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host) -{ - return 0; -} -inline int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, - int *ice_status) -{ - return 0; -} -inline void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host) -{ - return; -} -inline int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host) -{ - return 0; -} -#endif /* CONFIG_SCSI_UFS_QCOM_ICE */ - -#endif /* UFS_QCOM_ICE_H_ */ diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 195e0428cb54..ff66f7c5893a 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019, Linux Foundation. All rights reserved. + * Copyright (c) 2013-2020, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -28,7 +28,6 @@ #include "unipro.h" #include "ufs-qcom.h" #include "ufshci.h" -#include "ufs-qcom-ice.h" #include "ufs-qcom-debugfs.h" #include "ufs_quirks.h" @@ -406,14 +405,6 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, * is initialized. */ err = ufs_qcom_enable_lane_clks(host); - if (!err && host->ice.pdev) { - err = ufs_qcom_ice_init(host); - if (err) { - dev_err(hba->dev, "%s: ICE init failed (%d)\n", - __func__, err); - err = -EINVAL; - } - } break; case POST_CHANGE: @@ -849,7 +840,6 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufs_qcom_config_vreg(hba->dev, host->vccq_parent, false); - ufs_qcom_ice_suspend(host); if (ufs_qcom_is_link_off(hba)) { /* Assert PHY soft reset */ ufs_qcom_assert_reset(hba); @@ -889,13 +879,6 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (err) goto out; - err = ufs_qcom_ice_resume(host); - if (err) { - dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n", - __func__, err); - goto out; - } - hba->is_sys_suspended = false; out: @@ -935,119 +918,6 @@ out: return ret; } -#ifdef CONFIG_SCSI_UFS_QCOM_ICE -static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun) -{ - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct request *req; - int ret; - - if (lrbp->cmd && lrbp->cmd->request) - req = lrbp->cmd->request; - else - return 0; - - /* Use request LBA or given dun as the DUN value */ - if (req->bio) { -#ifdef CONFIG_PFK - if (bio_dun(req->bio)) { - /* dun @bio can be split, so we have to adjust offset */ - *dun = bio_dun(req->bio); - } else { - *dun = req->bio->bi_iter.bi_sector; - *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB; - } -#else - *dun = req->bio->bi_iter.bi_sector; - *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB; -#endif - } - ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable); - - return ret; -} - -static -int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag) -{ - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; - int err = 0; - - if (!host->ice.pdev || - !lrbp->cmd || - (lrbp->command_type != UTP_CMD_TYPE_SCSI && - lrbp->command_type != UTP_CMD_TYPE_UFS_STORAGE)) - goto out; - - err = ufs_qcom_ice_cfg_start(host, lrbp->cmd); -out: - return err; -} - -static -int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp, struct request *req) -{ - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - int err = 0; - - if (!host->ice.pdev || (lrbp->command_type != UTP_CMD_TYPE_SCSI && - lrbp->command_type != UTP_CMD_TYPE_UFS_STORAGE)) - goto out; - - err = ufs_qcom_ice_cfg_end(host, req); -out: - return err; -} - -static -int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba) -{ - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - int err = 0; - - if (!host->ice.pdev) - goto out; - - err = ufs_qcom_ice_reset(host); -out: - return err; -} - -static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status) -{ - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - - if (!status) - return -EINVAL; - - return ufs_qcom_ice_get_status(host, status); -} - -static int ufs_qcom_crypto_get_pending_req_status(struct ufs_hba *hba) -{ - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - int err = 0; - - if (!host->ice.pdev) - goto out; - - err = ufs_qcom_is_ice_busy(host); -out: - return err; -} - -#else /* !CONFIG_SCSI_UFS_QCOM_ICE */ -#define ufs_qcom_crypto_req_setup NULL -#define ufs_qcom_crytpo_engine_cfg_start NULL -#define ufs_qcom_crytpo_engine_cfg_end NULL -#define ufs_qcom_crytpo_engine_reset NULL -#define ufs_qcom_crypto_engine_get_status NULL -#define ufs_qcom_crypto_get_pending_req_status NULL -#endif /* CONFIG_SCSI_UFS_QCOM_ICE */ - struct ufs_qcom_dev_params { u32 pwm_rx_gear; /* pwm rx gear to work in */ u32 pwm_tx_gear; /* pwm tx gear to work in */ @@ -1629,7 +1499,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - int err = 0; /* * In case ufs_qcom_init() is not yet done, simply ignore. @@ -1648,14 +1517,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, if (ufshcd_is_hs_mode(&hba->pwr_info)) ufs_qcom_dev_ref_clk_ctrl(host, true); - err = ufs_qcom_ice_resume(host); - if (err) - goto out; } else if (!on && (status == PRE_CHANGE)) { - err = ufs_qcom_ice_suspend(host); - if (err) - goto out; - /* * If auto hibern8 is supported then the link will already * be in hibern8 state and the ref clock can be gated. @@ -1674,8 +1536,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, } } -out: - return err; + return 0; } #ifdef CONFIG_SMP /* CONFIG_SMP */ @@ -2209,36 +2070,9 @@ static int ufs_qcom_init(struct ufs_hba *hba) /* Make a two way bind between the qcom host and the hba */ host->hba = hba; - spin_lock_init(&host->ice_work_lock); ufshcd_set_variant(hba, host); - err = ufs_qcom_ice_get_dev(host); - if (err == -EPROBE_DEFER) { - /* - * UFS driver might be probed before ICE driver does. - * In that case we would like to return EPROBE_DEFER code - * in order to delay its probing. - */ - dev_err(dev, "%s: required ICE device not probed yet err = %d\n", - __func__, err); - goto out_variant_clear; - - } else if (err == -ENODEV) { - /* - * ICE device is not enabled in DTS file. No need for further - * initialization of ICE driver. - */ - dev_warn(dev, "%s: ICE device is not enabled", - __func__); - } else if (err) { - dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n", - __func__, err); - goto out_variant_clear; - } else { - hba->host->inlinecrypt_support = 1; - } - host->generic_phy = devm_phy_get(dev, "ufsphy"); if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) { @@ -2812,7 +2646,6 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep) usleep_range(1000, 1100); ufs_qcom_phy_dbg_register_dump(phy); usleep_range(1000, 1100); - ufs_qcom_ice_print_regs(host); } /** @@ -2843,15 +2676,6 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { #endif }; -static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = { - .crypto_req_setup = ufs_qcom_crypto_req_setup, - .crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start, - .crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end, - .crypto_engine_reset = ufs_qcom_crytpo_engine_reset, - .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status, - .crypto_get_req_status = ufs_qcom_crypto_get_pending_req_status, -}; - static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = { .req_start = ufs_qcom_pm_qos_req_start, .req_end = ufs_qcom_pm_qos_req_end, @@ -2860,7 +2684,6 @@ static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = { static struct ufs_hba_variant ufs_hba_qcom_variant = { .name = "qcom", .vops = &ufs_hba_qcom_vops, - .crypto_vops = &ufs_hba_crypto_variant_ops, .pm_qos_vops = &ufs_hba_pm_qos_variant_ops, }; diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 95304a581453..34140f91437d 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -236,26 +236,6 @@ struct ufs_qcom_testbus { u8 select_minor; }; -/** - * struct ufs_qcom_ice_data - ICE related information - * @vops: pointer to variant operations of ICE - * @async_done: completion for supporting ICE's driver asynchronous nature - * @pdev: pointer to the proper ICE platform device - * @state: UFS-ICE interface's internal state (see - * ufs-qcom-ice.h for possible internal states) - * @quirks: UFS-ICE interface related quirks - * @crypto_engine_err: crypto engine errors - */ -struct ufs_qcom_ice_data { - struct qcom_ice_variant_ops *vops; - struct platform_device *pdev; - int state; - - u16 quirks; - - bool crypto_engine_err; -}; - #ifdef CONFIG_DEBUG_FS struct qcom_debugfs_files { struct dentry *debugfs_root; @@ -363,7 +343,6 @@ struct ufs_qcom_host { bool disable_lpm; bool is_lane_clks_enabled; bool sec_cfg_updated; - struct ufs_qcom_ice_data ice; void __iomem *dev_ref_clk_ctrl_mmio; bool is_dev_ref_clk_enabled; @@ -378,8 +357,6 @@ struct ufs_qcom_host { u32 dbg_print_en; struct ufs_qcom_testbus testbus; - spinlock_t ice_work_lock; - struct work_struct ice_cfg_work; struct request *req_pending; struct ufs_vreg *vddp_ref_clk; struct ufs_vreg *vccq_parent; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index f1896b29b3ac..d125c70bfe72 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1409,8 +1409,6 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba) { u32 val = CONTROLLER_ENABLE; - if (ufshcd_is_crypto_supported(hba)) - val |= CRYPTO_GENERAL_ENABLE; ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); } @@ -3360,41 +3358,6 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); } -static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp) -{ - struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; - u8 cc_index = 0; - bool enable = false; - u64 dun = 0; - int ret; - - /* - * Call vendor specific code to get crypto info for this request: - * enable, crypto config. index, DUN. - * If bypass is set, don't bother setting the other fields. - */ - ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun); - if (ret) { - if (ret != -EAGAIN) { - dev_err(hba->dev, - "%s: failed to setup crypto request (%d)\n", - __func__, ret); - } - - return ret; - } - - if (!enable) - goto out; - - req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE; - req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF); - req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF); -out: - return 0; -} - /** * ufshcd_prepare_req_desc_hdr() - Fills the requests header * descriptor according to request @@ -3443,9 +3406,6 @@ static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, req_desc->prd_table_length = 0; - if (ufshcd_is_crypto_supported(hba)) - return ufshcd_prepare_crypto_utrd(hba, lrbp); - return 0; } @@ -3709,13 +3669,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) err = ufshcd_get_read_lock(hba, cmd->device->lun); if (unlikely(err < 0)) { if (err == -EPERM) { - if (!ufshcd_vops_crypto_engine_get_req_status(hba)) { - set_host_byte(cmd, DID_ERROR); - cmd->scsi_done(cmd); - return 0; - } else { - return SCSI_MLQUEUE_HOST_BUSY; - } + return SCSI_MLQUEUE_HOST_BUSY; } if (err == -EAGAIN) return SCSI_MLQUEUE_HOST_BUSY; @@ -3851,22 +3805,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } - err = ufshcd_vops_crypto_engine_cfg_start(hba, tag); - if (err) { - if (err != -EAGAIN) - dev_err(hba->dev, - "%s: failed to configure crypto engine %d\n", - __func__, err); - - scsi_dma_unmap(lrbp->cmd); - lrbp->cmd = NULL; - clear_bit_unlock(tag, &hba->lrb_in_use); - ufshcd_release_all(hba); - ufshcd_vops_pm_qos_req_end(hba, cmd->request, true); - - goto out; - } - /* Make sure descriptors are ready before ringing the doorbell */ wmb(); @@ -3882,7 +3820,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) clear_bit_unlock(tag, &hba->lrb_in_use); ufshcd_release_all(hba); ufshcd_vops_pm_qos_req_end(hba, cmd->request, true); - ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request); dev_err(hba->dev, "%s: failed sending command, %d\n", __func__, err); err = DID_ERROR; @@ -6452,8 +6389,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, */ ufshcd_vops_pm_qos_req_end(hba, cmd->request, false); - ufshcd_vops_crypto_engine_cfg_end(hba, - lrbp, cmd->request); } req = cmd->request; @@ -6536,8 +6471,6 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result) */ ufshcd_vops_pm_qos_req_end(hba, cmd->request, true); - ufshcd_vops_crypto_engine_cfg_end(hba, - lrbp, cmd->request); } /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); @@ -7474,8 +7407,6 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_INTR, intr_status, &intr_status); - ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error); - hba->errors = UFSHCD_ERROR_MASK & intr_status; if (hba->errors || hba->ce_error) retval |= ufshcd_check_errors(hba); @@ -7952,16 +7883,6 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) goto out; } - if (!err) { - err = ufshcd_vops_crypto_engine_reset(hba); - if (err) { - dev_err(hba->dev, - "%s: failed to reset crypto engine %d\n", - __func__, err); - goto out; - } - } - out: if (err) dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index e94b04baa3ef..a51cc94ad603 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -369,30 +369,6 @@ struct ufs_hba_variant_ops { #endif }; -/** - * struct ufs_hba_crypto_variant_ops - variant specific crypto callbacks - * @crypto_req_setup: retreieve the necessary cryptographic arguments to setup - a requests's transfer descriptor. - * @crypto_engine_cfg_start: start configuring cryptographic engine - * according to tag - * parameter - * @crypto_engine_cfg_end: end configuring cryptographic engine - * according to tag parameter - * @crypto_engine_reset: perform reset to the cryptographic engine - * @crypto_engine_get_status: get errors status of the cryptographic engine - * @crypto_get_req_status: Check if crypto driver still holds request or not - */ -struct ufs_hba_crypto_variant_ops { - int (*crypto_req_setup)(struct ufs_hba *, struct ufshcd_lrb *lrbp, - u8 *cc_index, bool *enable, u64 *dun); - int (*crypto_engine_cfg_start)(struct ufs_hba *, unsigned int); - int (*crypto_engine_cfg_end)(struct ufs_hba *, struct ufshcd_lrb *, - struct request *); - int (*crypto_engine_reset)(struct ufs_hba *); - int (*crypto_engine_get_status)(struct ufs_hba *, u32 *); - int (*crypto_get_req_status)(struct ufs_hba *); -}; - /** * struct ufs_hba_pm_qos_variant_ops - variant specific PM QoS callbacks */ @@ -409,7 +385,6 @@ struct ufs_hba_variant { struct device *dev; const char *name; struct ufs_hba_variant_ops *vops; - struct ufs_hba_crypto_variant_ops *crypto_vops; struct ufs_hba_pm_qos_variant_ops *pm_qos_vops; }; @@ -1501,55 +1476,6 @@ static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba) } #endif -static inline int ufshcd_vops_crypto_req_setup(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun) -{ - if (hba->var && hba->var->crypto_vops && - hba->var->crypto_vops->crypto_req_setup) - return hba->var->crypto_vops->crypto_req_setup(hba, lrbp, - cc_index, enable, dun); - return 0; -} - -static inline int ufshcd_vops_crypto_engine_cfg_start(struct ufs_hba *hba, - unsigned int task_tag) -{ - if (hba->var && hba->var->crypto_vops && - hba->var->crypto_vops->crypto_engine_cfg_start) - return hba->var->crypto_vops->crypto_engine_cfg_start - (hba, task_tag); - return 0; -} - -static inline int ufshcd_vops_crypto_engine_cfg_end(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp, - struct request *req) -{ - if (hba->var && hba->var->crypto_vops && - hba->var->crypto_vops->crypto_engine_cfg_end) - return hba->var->crypto_vops->crypto_engine_cfg_end - (hba, lrbp, req); - return 0; -} - -static inline int ufshcd_vops_crypto_engine_reset(struct ufs_hba *hba) -{ - if (hba->var && hba->var->crypto_vops && - hba->var->crypto_vops->crypto_engine_reset) - return hba->var->crypto_vops->crypto_engine_reset(hba); - return 0; -} - -static inline int ufshcd_vops_crypto_engine_get_status(struct ufs_hba *hba, - u32 *status) -{ - if (hba->var && hba->var->crypto_vops && - hba->var->crypto_vops->crypto_engine_get_status) - return hba->var->crypto_vops->crypto_engine_get_status(hba, - status); - return 0; -} - static inline void ufshcd_vops_pm_qos_req_start(struct ufs_hba *hba, struct request *req) { @@ -1565,13 +1491,4 @@ static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba, hba->var->pm_qos_vops->req_end(hba, req, lock); } -static inline int ufshcd_vops_crypto_engine_get_req_status(struct ufs_hba *hba) - -{ - if (hba->var && hba->var->crypto_vops && - hba->var->crypto_vops->crypto_get_req_status) - return hba->var->crypto_vops->crypto_get_req_status(hba); - return 0; -} - #endif /* End of Header */ diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile index f36e47a10455..0a78543f6cec 100644 --- a/fs/crypto/Makefile +++ b/fs/crypto/Makefile @@ -1,15 +1,11 @@ obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o -ccflags-y += -Ifs/ext4 -ccflags-y += -Ifs/f2fs - fscrypto-y := crypto.o \ fname.o \ hkdf.o \ hooks.o \ keyring.o \ keysetup.o \ - fscrypt_ice.o \ keysetup_v1.o \ policy.o diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index b0033880d8c9..699bb4d426f2 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -33,14 +33,10 @@ void fscrypt_decrypt_bio(struct bio *bio) bio_for_each_segment_all(bv, bio, i) { struct page *page = bv->bv_page; - if (fscrypt_using_hardware_encryption(page->mapping->host)) { - SetPageUptodate(page); - } else { - int ret = fscrypt_decrypt_pagecache_blocks(page, - bv->bv_len, bv->bv_offset); - if (ret) - SetPageError(page); - } + int ret = fscrypt_decrypt_pagecache_blocks(page, + bv->bv_len, bv->bv_offset); + if (ret) + SetPageError(page); } } EXPORT_SYMBOL(fscrypt_decrypt_bio); @@ -72,7 +68,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, } bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - 9); - bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_NOENCRYPT); + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ret = bio_add_page(bio, ciphertext_page, blocksize, 0); if (WARN_ON(ret != blocksize)) { /* should never happen! */ diff --git a/fs/crypto/fscrypt_ice.c b/fs/crypto/fscrypt_ice.c deleted file mode 100644 index c5b6bdf3b3eb..000000000000 --- a/fs/crypto/fscrypt_ice.c +++ /dev/null @@ -1,190 +0,0 @@ -/* Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "fscrypt_ice.h" - -extern int fscrypt_get_mode_key_size(int mode); - -int fscrypt_using_hardware_encryption(const struct inode *inode) -{ - struct fscrypt_info *ci = inode->i_crypt_info; - - return S_ISREG(inode->i_mode) && ci && - (fscrypt_policy_contents_mode(&(ci->ci_policy)) == FSCRYPT_MODE_PRIVATE); -} -EXPORT_SYMBOL(fscrypt_using_hardware_encryption); - -size_t fscrypt_get_ice_encryption_key_size(const struct inode *inode) -{ - struct fscrypt_info *ci = NULL; - - if (inode) - ci = inode->i_crypt_info; - if (!ci) - return 0; - - return fscrypt_get_mode_key_size(fscrypt_policy_contents_mode(&(ci->ci_policy))) / 2; -} - -size_t fscrypt_get_ice_encryption_salt_size(const struct inode *inode) -{ - struct fscrypt_info *ci = NULL; - - if (inode) - ci = inode->i_crypt_info; - if (!ci) - return 0; - - return fscrypt_get_mode_key_size(fscrypt_policy_contents_mode(&(ci->ci_policy))) / 2; -} - -/* - * Retrieves encryption key from the inode - */ -char *fscrypt_get_ice_encryption_key(const struct inode *inode) -{ - struct fscrypt_info *ci = NULL; - - if (!inode) - return NULL; - - ci = inode->i_crypt_info; - if (!ci) - return NULL; - - return &(ci->ci_raw_key[0]); -} - -/* - * Retrieves encryption salt from the inode - */ -char *fscrypt_get_ice_encryption_salt(const struct inode *inode) -{ - struct fscrypt_info *ci = NULL; - int size = 0; - - if (!inode) - return NULL; - - ci = inode->i_crypt_info; - if (!ci) - return NULL; - - size = fscrypt_get_ice_encryption_key_size(inode); - if (!size) - return NULL; - - return &(ci->ci_raw_key[size]); -} - -/* - * returns true if the cipher mode in inode is AES XTS - */ -int fscrypt_is_aes_xts_cipher(const struct inode *inode) -{ - struct fscrypt_info *ci = inode->i_crypt_info; - - if (!ci) - return 0; - - return (fscrypt_policy_contents_mode(&(ci->ci_policy)) == FSCRYPT_MODE_PRIVATE); -} - -/* - * returns true if encryption info in both inodes is equal - */ -bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1, - const struct inode *inode2) -{ - char *key1 = NULL; - char *key2 = NULL; - char *salt1 = NULL; - char *salt2 = NULL; - - if (!inode1 || !inode2) - return false; - - if (inode1 == inode2) - return true; - - /* both do not belong to ice, so we don't care, they are equal - *for us - */ - if (!fscrypt_should_be_processed_by_ice(inode1) && - !fscrypt_should_be_processed_by_ice(inode2)) - return true; - - /* one belongs to ice, the other does not -> not equal */ - if (fscrypt_should_be_processed_by_ice(inode1) ^ - fscrypt_should_be_processed_by_ice(inode2)) - return false; - - key1 = fscrypt_get_ice_encryption_key(inode1); - key2 = fscrypt_get_ice_encryption_key(inode2); - salt1 = fscrypt_get_ice_encryption_salt(inode1); - salt2 = fscrypt_get_ice_encryption_salt(inode2); - - /* key and salt should not be null by this point */ - if (!key1 || !key2 || !salt1 || !salt2 || - (fscrypt_get_ice_encryption_key_size(inode1) != - fscrypt_get_ice_encryption_key_size(inode2)) || - (fscrypt_get_ice_encryption_salt_size(inode1) != - fscrypt_get_ice_encryption_salt_size(inode2))) - return false; - - if ((memcmp(key1, key2, - fscrypt_get_ice_encryption_key_size(inode1)) == 0) && - (memcmp(salt1, salt2, - fscrypt_get_ice_encryption_salt_size(inode1)) == 0)) - return true; - - return false; -} - -void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun) -{ - if (fscrypt_should_be_processed_by_ice(inode)) - bio->bi_iter.bi_dun = dun; -} -EXPORT_SYMBOL(fscrypt_set_ice_dun); - -void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip) -{ -#ifdef CONFIG_DM_DEFAULT_KEY - bio->bi_crypt_skip = bi_crypt_skip; -#endif -} -EXPORT_SYMBOL(fscrypt_set_ice_skip); - -/* - * This function will be used for filesystem when deciding to merge bios. - * Basic assumption is, if inline_encryption is set, single bio has to - * guarantee consecutive LBAs as well as ino|pg->index. - */ -bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted, - int bi_crypt_skip) -{ - if (!bio) - return true; - -#ifdef CONFIG_DM_DEFAULT_KEY - if (bi_crypt_skip != bio->bi_crypt_skip) - return false; -#endif - /* if both of them are not encrypted, no further check is needed */ - if (!bio_dun(bio) && !bio_encrypted) - return true; - - /* ICE allows only consecutive iv_key stream. */ - return bio_end_dun(bio) == dun; -} -EXPORT_SYMBOL(fscrypt_mergeable_bio); diff --git a/fs/crypto/fscrypt_ice.h b/fs/crypto/fscrypt_ice.h deleted file mode 100644 index d448b4289317..000000000000 --- a/fs/crypto/fscrypt_ice.h +++ /dev/null @@ -1,99 +0,0 @@ -/* Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _FSCRYPT_ICE_H -#define _FSCRYPT_ICE_H - -#include -#include "fscrypt_private.h" - -#if IS_ENABLED(CONFIG_FS_ENCRYPTION) -static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode) -{ - if (!inode->i_sb->s_cop) - return 0; - if (!IS_ENCRYPTED((struct inode *)inode)) - return 0; - - return fscrypt_using_hardware_encryption(inode); -} - -static inline int fscrypt_is_ice_capable(const struct super_block *sb) -{ - return blk_queue_inlinecrypt(bdev_get_queue(sb->s_bdev)); -} - -int fscrypt_is_aes_xts_cipher(const struct inode *inode); - -char *fscrypt_get_ice_encryption_key(const struct inode *inode); -char *fscrypt_get_ice_encryption_salt(const struct inode *inode); - -bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1, - const struct inode *inode2); - -size_t fscrypt_get_ice_encryption_key_size(const struct inode *inode); - -size_t fscrypt_get_ice_encryption_salt_size(const struct inode *inode); - -#else -static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode) -{ - return 0; -} - -static inline int fscrypt_is_ice_capable(const struct super_block *sb) -{ - return 0; -} - -static inline char *fscrypt_get_ice_encryption_key(const struct inode *inode) -{ - return NULL; -} - -static inline char *fscrypt_get_ice_encryption_salt(const struct inode *inode) -{ - return NULL; -} - -static inline size_t fscrypt_get_ice_encryption_key_size( - const struct inode *inode) -{ - return 0; -} - -static inline size_t fscrypt_get_ice_encryption_salt_size( - const struct inode *inode) -{ - return 0; -} - -static inline int fscrypt_is_xts_cipher(const struct inode *inode) -{ - return 0; -} - -static inline bool fscrypt_is_ice_encryption_info_equal( - const struct inode *inode1, - const struct inode *inode2) -{ - return 0; -} - -static inline int fscrypt_is_aes_xts_cipher(const struct inode *inode) -{ - return 0; -} - -#endif - -#endif /* _FSCRYPT_ICE_H */ diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index d6134d07ccdb..94da6bad5f19 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -13,7 +13,6 @@ #include #include -#include #define CONST_STRLEN(str) (sizeof(str) - 1) @@ -160,8 +159,10 @@ struct fscrypt_symlink_data { * inode is evicted. */ struct fscrypt_info { - /* The actual crypto transform used for encryption and decryption */ + u8 ci_data_mode; + u8 ci_filename_mode; + u8 ci_flags; struct crypto_skcipher *ci_ctfm; /* True if the key should be freed when this fscrypt_info is freed */ @@ -219,10 +220,6 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode, filenames_mode == FSCRYPT_MODE_AES_256_CTS) return true; - if (contents_mode == FSCRYPT_MODE_PRIVATE && - filenames_mode == FSCRYPT_MODE_AES_256_CTS) - return true; - if (contents_mode == FSCRYPT_MODE_ADIANTUM && filenames_mode == FSCRYPT_MODE_ADIANTUM) return true; diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 16413b728b2b..0380ae882441 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -12,7 +12,6 @@ #include #include "fscrypt_private.h" -#include "fscrypt_ice.h" static struct fscrypt_mode available_modes[] = { [FSCRYPT_MODE_AES_256_XTS] = { @@ -52,12 +51,6 @@ static struct fscrypt_mode available_modes[] = { }, }; -static int fscrypt_data_encryption_mode(struct inode *inode) -{ - return fscrypt_should_be_processed_by_ice(inode) ? - FSCRYPT_MODE_PRIVATE : FSCRYPT_MODE_AES_256_XTS; -} - static struct fscrypt_mode * select_encryption_mode(const union fscrypt_policy *policy, const struct inode *inode) @@ -393,7 +386,7 @@ int fscrypt_get_encryption_info(struct inode *inode) /* Fake up a context for an unencrypted directory */ memset(&ctx, 0, sizeof(ctx)); ctx.version = FSCRYPT_CONTEXT_V1; - ctx.v1.contents_encryption_mode = fscrypt_data_encryption_mode(inode); + ctx.v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; ctx.v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; memset(ctx.v1.master_key_descriptor, 0x42, FSCRYPT_KEY_DESCRIPTOR_SIZE); @@ -487,11 +480,6 @@ void fscrypt_put_encryption_info(struct inode *inode) } EXPORT_SYMBOL(fscrypt_put_encryption_info); -int fscrypt_get_mode_key_size(int mode) -{ - return available_modes[mode].keysize; -} - /** * fscrypt_free_inode - free an inode's fscrypt data requiring RCU delay * diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index b217970ef392..454fb03fc30e 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -306,25 +306,10 @@ out: int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key) { - int err; - if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { + if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) return setup_v1_file_key_direct(ci, raw_master_key); - } else if(S_ISREG(ci->ci_inode->i_mode) && - (fscrypt_policy_contents_mode(&(ci->ci_policy)) == FSCRYPT_MODE_PRIVATE)) { - /* Inline encryption: no key derivation required because IVs are - * assigned based on iv_sector. - */ - if (ci->ci_mode->keysize != FSCRYPT_MAX_KEY_SIZE) { - err = -EINVAL; - } else { - memcpy(ci->ci_raw_key, raw_master_key, ci->ci_mode->keysize); - err = 0; - } - } - else { + else return setup_v1_file_key_derived(ci, raw_master_key); - } - return err; } int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci) diff --git a/fs/direct-io.c b/fs/direct-io.c index b88a0a9a66dd..30bf22c989de 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -37,8 +37,6 @@ #include #include #include -#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION) -#include /* * How many user pages to map in one call to get_user_pages(). This determines @@ -454,23 +452,6 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; } -#ifdef CONFIG_PFK -static bool is_inode_filesystem_type(const struct inode *inode, - const char *fs_type) -{ - if (!inode || !fs_type) - return false; - - if (!inode->i_sb) - return false; - - if (!inode->i_sb->s_type) - return false; - - return (strcmp(inode->i_sb->s_type->name, fs_type) == 0); -} -#endif - /* * In the AIO read case we speculatively dirty the pages before starting IO. * During IO completion, any of these pages which happen to have been written @@ -493,17 +474,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) bio_set_pages_dirty(bio); dio->bio_disk = bio->bi_disk; -#ifdef CONFIG_PFK - bio->bi_dio_inode = dio->inode; -/* iv sector for security/pfe/pfk_fscrypt.c and f2fs in fs/f2fs/f2fs.h */ -#define PG_DUN_NEW(i,p) \ - (((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p) & 0xffffffff)) - - if (is_inode_filesystem_type(dio->inode, "f2fs")) - fscrypt_set_ice_dun(dio->inode, bio, PG_DUN_NEW(dio->inode, - (sdio->logical_offset_in_bio >> PAGE_SHIFT))); -#endif if (sdio->submit_io) { sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); dio->bio_cookie = BLK_QC_T_NONE; @@ -515,18 +486,6 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) sdio->logical_offset_in_bio = 0; } -struct inode *dio_bio_get_inode(struct bio *bio) -{ - struct inode *inode = NULL; - - if (bio == NULL) - return NULL; -#ifdef CONFIG_PFK - inode = bio->bi_dio_inode; -#endif - return inode; -} - /* * Release any resources in case of a failure */ diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 6eea530054d2..ac2a73c00bfa 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -108,16 +108,10 @@ config EXT4_ENCRYPTION files config EXT4_FS_ENCRYPTION - bool "Ext4 FS Encryption" + bool default n depends on EXT4_ENCRYPTION -config EXT4_FS_ICE_ENCRYPTION - bool "Ext4 Encryption with ICE support" - default n - depends on EXT4_FS_ENCRYPTION - depends on PFK - config EXT4_DEBUG bool "EXT4 debugging support" depends on EXT4_FS diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 1578a86784a6..6c129067c07e 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -205,10 +205,7 @@ typedef struct ext4_io_end { ssize_t size; /* size of the extent */ } ext4_io_end_t; -#define EXT4_IO_ENCRYPTED 1 - struct ext4_io_submit { - unsigned int io_flags; struct writeback_control *io_wbc; struct bio *io_bio; ext4_io_end_t *io_end; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2dd1114d5f6c..c134c701a034 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1234,12 +1234,10 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { - decrypt = IS_ENCRYPTED(inode) && - S_ISREG(inode->i_mode) && - !fscrypt_using_hardware_encryption(inode); - ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), - 1, &bh); + ll_rw_block(REQ_OP_READ, 0, 1, &bh); *wait_bh++ = bh; + decrypt = IS_ENCRYPTED(inode) && + S_ISREG(inode->i_mode); } } /* @@ -3744,14 +3742,9 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) get_block_func = ext4_dio_get_block_unwritten_async; dio_flags = DIO_LOCKING; } -#if defined(CONFIG_EXT4_FS_ENCRYPTION) - WARN_ON(IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) - && !fscrypt_using_hardware_encryption(inode)); -#endif - ret = __blockdev_direct_IO(iocb, inode, - inode->i_sb->s_bdev, iter, - get_block_func, - ext4_end_io_dio, NULL, dio_flags); + ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, + get_block_func, ext4_end_io_dio, NULL, + dio_flags); if (ret > 0 && !overwrite && ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN)) { @@ -3863,9 +3856,8 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ssize_t ret; int rw = iov_iter_rw(iter); -#if defined(CONFIG_FS_ENCRYPTION) - if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) - && !fscrypt_using_hardware_encryption(inode)) +#ifdef CONFIG_FS_ENCRYPTION + if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) return 0; #endif if (fsverity_active(inode)) @@ -4028,7 +4020,6 @@ static int __ext4_block_zero_page_range(handle_t *handle, struct inode *inode = mapping->host; struct buffer_head *bh; struct page *page; - bool decrypt; int err = 0; page = find_or_create_page(mapping, from >> PAGE_SHIFT, @@ -4071,15 +4062,13 @@ static int __ext4_block_zero_page_range(handle_t *handle, if (!buffer_uptodate(bh)) { err = -EIO; - decrypt = S_ISREG(inode->i_mode) && - IS_ENCRYPTED(inode) && - !fscrypt_using_hardware_encryption(inode); - ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), 1, &bh); + ll_rw_block(REQ_OP_READ, 0, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; - if (decrypt) { + if (S_ISREG(inode->i_mode) && + IS_ENCRYPTED(inode)) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); BUG_ON(blocksize != PAGE_SIZE); diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 4935c146bbc7..ced39d449fd0 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -603,13 +603,10 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, return -EOPNOTSUPP; } - if (!fscrypt_using_hardware_encryption(orig_inode) || - !fscrypt_using_hardware_encryption(donor_inode)) { - if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) { - ext4_msg(orig_inode->i_sb, KERN_ERR, - "Online defrag not supported for encrypted files"); - return -EOPNOTSUPP; - } + if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) { + ext4_msg(orig_inode->i_sb, KERN_ERR, + "Online defrag not supported for encrypted files"); + return -EOPNOTSUPP; } /* Protect orig and donor inodes against a truncate */ diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 5c32a6d30c60..a6ec98d494b8 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -344,8 +344,6 @@ void ext4_io_submit(struct ext4_io_submit *io) int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0; io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint; - if (io->io_flags & EXT4_IO_ENCRYPTED) - io_op_flags |= REQ_NOENCRYPT; bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); submit_bio(io->io_bio); } @@ -355,7 +353,6 @@ void ext4_io_submit(struct ext4_io_submit *io) void ext4_io_submit_init(struct ext4_io_submit *io, struct writeback_control *wbc) { - io->io_flags = 0; io->io_wbc = wbc; io->io_bio = NULL; io->io_end = NULL; @@ -483,24 +480,22 @@ int ext4_bio_write_page(struct ext4_io_submit *io, if (io->io_bio) gfp_flags = GFP_NOWAIT | __GFP_NOWARN; retry_encrypt: - if (!fscrypt_using_hardware_encryption(inode)) { - bounce_page = fscrypt_encrypt_pagecache_blocks(page, - PAGE_SIZE, 0, gfp_flags); - if (IS_ERR(bounce_page)) { - ret = PTR_ERR(bounce_page); - if (ret == -ENOMEM && (io->io_bio || - wbc->sync_mode == WB_SYNC_ALL)) { - gfp_flags = GFP_NOFS; - if (io->io_bio) - ext4_io_submit(io); - else - gfp_flags |= __GFP_NOFAIL; - congestion_wait(BLK_RW_ASYNC, HZ/50); - goto retry_encrypt; - } - bounce_page = NULL; - goto out; + bounce_page = fscrypt_encrypt_pagecache_blocks(page, + PAGE_SIZE, 0, gfp_flags); + if (IS_ERR(bounce_page)) { + ret = PTR_ERR(bounce_page); + if (ret == -ENOMEM && (io->io_bio || + wbc->sync_mode == WB_SYNC_ALL)) { + gfp_flags = GFP_NOFS; + if (io->io_bio) + ext4_io_submit(io); + else + gfp_flags |= __GFP_NOFAIL; + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto retry_encrypt; } + bounce_page = NULL; + goto out; } } @@ -508,8 +503,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io, do { if (!buffer_async_write(bh)) continue; - if (bounce_page) - io->io_flags |= EXT4_IO_ENCRYPTED; ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh); if (ret) { /* diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 49806a7bcecf..aefcd712df85 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -412,8 +412,7 @@ int ext4_mpage_readpages(struct address_space *mapping, bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_end_io = mpage_end_io; bio->bi_private = ctx; - bio_set_op_attrs(bio, REQ_OP_READ, - ctx ? REQ_NOENCRYPT : 0); + bio_set_op_attrs(bio, REQ_OP_READ, 0); } length = first_hole << blkbits; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index ca538c1f5a26..2d4c259624b3 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -700,7 +700,6 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) struct bio *bio; struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; - struct inode *inode = fio->page->mapping->host; if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, fio->is_por ? META_POR : (__is_meta_io(fio) ? @@ -713,15 +712,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) /* Allocate a new bio */ bio = __bio_alloc(fio, 1); - if (f2fs_may_encrypt_bio(inode, fio)) - fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page)); - fscrypt_set_ice_skip(bio, fio->encrypted_page ? 1 : 0); - if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); return -EFAULT; } - fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0; if (fio->io_wbc && !is_read_io(fio->op)) wbc_account_io(fio->io_wbc, page, PAGE_SIZE); @@ -902,9 +896,6 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; struct inode *inode; - bool bio_encrypted; - int bi_crypt_skip; - u64 dun; if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) @@ -914,26 +905,14 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) f2fs_trace_ios(fio, 0); inode = fio->page->mapping->host; - dun = PG_DUN(inode, fio->page); - bi_crypt_skip = fio->encrypted_page ? 1 : 0; - bio_encrypted = f2fs_may_encrypt_bio(inode, fio); - fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0; if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, fio->new_blkaddr)) f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); - /* ICE support */ - if (bio && !fscrypt_mergeable_bio(bio, dun, - bio_encrypted, bi_crypt_skip)) { - f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); - } alloc_new: if (!bio) { bio = __bio_alloc(fio, BIO_MAX_PAGES); bio_set_op_attrs(bio, fio->op, fio->op_flags); - if (bio_encrypted) - fscrypt_set_ice_dun(inode, bio, dun); - fscrypt_set_ice_skip(bio, bi_crypt_skip); add_bio_entry(fio->sbi, bio, page, fio->temp); } else { if (add_ipu_page(fio->sbi, &bio, page)) @@ -957,10 +936,6 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio) enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp; struct page *bio_page; - struct inode *inode; - bool bio_encrypted; - int bi_crypt_skip; - u64 dun; f2fs_bug_on(sbi, is_read_io(fio->op)); @@ -987,12 +962,6 @@ next: else bio_page = fio->page; - inode = fio->page->mapping->host; - dun = PG_DUN(inode, fio->page); - bi_crypt_skip = fio->encrypted_page ? 1 : 0; - bio_encrypted = f2fs_may_encrypt_bio(inode, fio); - fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0; - /* set submitted = true as a return value */ fio->submitted = true; @@ -1001,11 +970,6 @@ next: if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, fio->new_blkaddr)) __submit_merged_bio(io); - - /* ICE support */ - if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted, bi_crypt_skip)) - __submit_merged_bio(io); - alloc_new: if (io->bio == NULL) { if (F2FS_IO_ALIGNED(sbi) && @@ -1016,9 +980,6 @@ alloc_new: goto skip; } io->bio = __bio_alloc(fio, BIO_MAX_PAGES); - if (bio_encrypted) - fscrypt_set_ice_dun(inode, io->bio, dun); - fscrypt_set_ice_skip(io->bio, bi_crypt_skip); io->fio = *fio; } @@ -1065,13 +1026,9 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, return ERR_PTR(-ENOMEM); f2fs_target_device(sbi, blkaddr, bio); bio->bi_end_io = f2fs_read_end_io; - bio_set_op_attrs(bio, REQ_OP_READ, - (IS_ENCRYPTED(inode) ? - REQ_NOENCRYPT : - op_flag)); + bio_set_op_attrs(bio, REQ_OP_READ, op_flag); - if (f2fs_encrypted_file(inode) && - !fscrypt_using_hardware_encryption(inode)) + if (f2fs_encrypted_file(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (f2fs_compressed_file(inode)) post_read_steps |= 1 << STEP_DECOMPRESS; @@ -1108,9 +1065,6 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page, if (IS_ERR(bio)) return PTR_ERR(bio); - if (f2fs_may_encrypt_bio(inode, NULL)) - fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, page)); - /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, blkaddr); @@ -2037,8 +1991,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, sector_t last_block_in_file; sector_t block_nr; int ret = 0; - bool bio_encrypted; - u64 dun; block_in_file = (sector_t)page_index(page); last_block = block_in_file + nr_pages; @@ -2109,13 +2061,6 @@ submit_and_realloc: bio = NULL; } - dun = PG_DUN(inode, page); - bio_encrypted = f2fs_may_encrypt_bio(inode, NULL); - if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted, 0)) { - __submit_bio(F2FS_I_SB(inode), bio, DATA); - bio = NULL; - } - if (bio == NULL) { bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, is_readahead ? REQ_RAHEAD : 0, page->index, @@ -2125,10 +2070,7 @@ submit_and_realloc: bio = NULL; goto out; } - if (bio_encrypted) - fscrypt_set_ice_dun(inode, bio, dun); } - /* * If the page is under writeback, we need to wait for * its completion to see the correct decrypted data. @@ -2465,9 +2407,6 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio) f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); retry_encrypt: - if (fscrypt_using_hardware_encryption(inode)) - return 0; - fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page, PAGE_SIZE, 0, gfp_flags); if (IS_ERR(fio->encrypted_page)) { diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index a12a09565dc6..1e2c9a59393a 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -4035,8 +4035,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int rw = iov_iter_rw(iter); - if (f2fs_encrypted_file(inode) && - !fscrypt_using_hardware_encryption(inode)) + if (f2fs_encrypted_file(inode)) return true; if (f2fs_is_multi_device(sbi)) return true; @@ -4061,16 +4060,6 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, return false; } -static inline bool f2fs_may_encrypt_bio(struct inode *inode, - struct f2fs_io_info *fio) -{ - if (fio && (fio->type != DATA || fio->encrypted_page)) - return false; - - return (f2fs_encrypted_file(inode) && - fscrypt_using_hardware_encryption(inode)); -} - #ifdef CONFIG_F2FS_FAULT_INJECTION extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, unsigned int type); diff --git a/fs/namei.c b/fs/namei.c index 6c933d1cc941..1c626f56d21d 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3039,11 +3039,6 @@ int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, if (error) return error; error = dir->i_op->create(dir, dentry, mode, want_excl); - if (error) - return error; - error = security_inode_post_create(dir, dentry, mode); - if (error) - return error; if (!error) fsnotify_create(dir, dentry); return error; @@ -3876,11 +3871,6 @@ int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, u return error; error = dir->i_op->mknod(dir, dentry, mode, dev); - if (error) - return error; - error = security_inode_post_create(dir, dentry, mode); - if (error) - return error; if (!error) fsnotify_create(dir, dentry); return error; diff --git a/include/linux/bio.h b/include/linux/bio.h index bcdbd29052e0..e260f000b9ac 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -69,9 +69,6 @@ ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) -#define bio_dun(bio) ((bio)->bi_iter.bi_dun) -#define bio_duns(bio) (bio_sectors(bio) >> 3) /* 4KB unit */ -#define bio_end_dun(bio) (bio_dun(bio) + bio_duns(bio)) /* * Return the data direction, READ or WRITE. @@ -181,11 +178,6 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, { iter->bi_sector += bytes >> 9; -#ifdef CONFIG_PFK - if (iter->bi_dun) - iter->bi_dun += bytes >> 12; -#endif - if (bio_no_advance_iter(bio)) { iter->bi_size -= bytes; iter->bi_done += bytes; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index d24227285a44..415811f0b24a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -100,13 +100,6 @@ struct bio { struct bio_integrity_payload *bi_integrity; /* data integrity */ #endif }; -#ifdef CONFIG_PFK - /* Encryption key to use (NULL if none) */ - const struct blk_encryption_key *bi_crypt_key; -#endif -#ifdef CONFIG_DM_DEFAULT_KEY - int bi_crypt_skip; -#endif unsigned short bi_vcnt; /* how many bio_vec's */ @@ -121,9 +114,7 @@ struct bio { struct bio_vec *bi_io_vec; /* the actual vec list */ struct bio_set *bi_pool; -#ifdef CONFIG_PFK - struct inode *bi_dio_inode; -#endif + /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member @@ -248,13 +239,6 @@ enum req_flag_bits { __REQ_URGENT, /* urgent request */ __REQ_NOWAIT, /* Don't wait if request will block */ - - /* Android specific flags */ - __REQ_NOENCRYPT, /* - * ok to not encrypt (already encrypted at fs - * level) - */ - __REQ_NR_BITS, /* stops here */ }; @@ -272,7 +256,6 @@ enum req_flag_bits { #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) -#define REQ_NOENCRYPT (1ULL << __REQ_NOENCRYPT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1673d238b60f..835a3cf3b47b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -154,7 +154,6 @@ struct request { unsigned int __data_len; /* total data len */ int tag; sector_t __sector; /* sector cursor */ - u64 __dun; /* dun for UFS */ struct bio *bio; struct bio *biotail; @@ -653,7 +652,6 @@ struct request_queue { #define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ #define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ -#define QUEUE_FLAG_INLINECRYPT 29 /* inline encryption support */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -753,8 +751,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) #define blk_queue_scsi_passthrough(q) \ test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) -#define blk_queue_inlinecrypt(q) \ - test_bit(QUEUE_FLAG_INLINECRYPT, &(q)->queue_flags) #define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ @@ -1035,11 +1031,6 @@ static inline sector_t blk_rq_pos(const struct request *rq) return rq->__sector; } -static inline sector_t blk_rq_dun(const struct request *rq) -{ - return rq->__dun; -} - static inline unsigned int blk_rq_bytes(const struct request *rq) { return rq->__data_len; diff --git a/include/linux/bvec.h b/include/linux/bvec.h index 711236dba71d..ec8a4d7af6bd 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -44,9 +44,6 @@ struct bvec_iter { unsigned int bi_bvec_done; /* number of bytes completed in current bvec */ -#ifdef CONFIG_PFK - u64 bi_dun; /* DUN setting for bio */ -#endif }; /* diff --git a/include/linux/fs.h b/include/linux/fs.h index 236c4d59b9ae..1d8a53a6211a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3071,8 +3071,6 @@ static inline void inode_dio_end(struct inode *inode) wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); } -struct inode *dio_bio_get_inode(struct bio *bio); - extern void inode_set_flags(struct inode *inode, unsigned int flags, unsigned int mask); diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index e65ce4237f52..5977a6ced502 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -20,10 +20,6 @@ #define FS_CRYPTO_BLOCK_SIZE 16 -/* iv sector for security/pfe/pfk_fscrypt.c and f2fs */ -#define PG_DUN(i, p) \ - (((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p)->index & 0xffffffff)) - struct fscrypt_info; struct fscrypt_str { @@ -745,33 +741,6 @@ static inline int fscrypt_encrypt_symlink(struct inode *inode, return 0; } -/* fscrypt_ice.c */ -#ifdef CONFIG_PFK -extern int fscrypt_using_hardware_encryption(const struct inode *inode); -extern void fscrypt_set_ice_dun(const struct inode *inode, - struct bio *bio, u64 dun); -extern void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip); -extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted, - int bi_crypt_skip); -#else -static inline int fscrypt_using_hardware_encryption(const struct inode *inode) -{ - return 0; -} - -static inline void fscrypt_set_ice_dun(const struct inode *inode, - struct bio *bio, u64 dun){} - -static inline void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip) -{} - -static inline bool fscrypt_mergeable_bio(struct bio *bio, - u64 dun, bool bio_encrypted, int bi_crypt_skip) -{ - return true; -} -#endif - /* If *pagep is a bounce page, free it and set *pagep to the pagecache page */ static inline void fscrypt_finalize_bounce_page(struct page **pagep) { diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index f40789bd5c15..7e9f59aeadb6 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1475,8 +1475,6 @@ union security_list_options { size_t *len); int (*inode_create)(struct inode *dir, struct dentry *dentry, umode_t mode); - int (*inode_post_create)(struct inode *dir, struct dentry *dentry, - umode_t mode); int (*inode_link)(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int (*inode_unlink)(struct inode *dir, struct dentry *dentry); @@ -1790,7 +1788,6 @@ struct security_hook_heads { struct list_head inode_free_security; struct list_head inode_init_security; struct list_head inode_create; - struct list_head inode_post_create; struct list_head inode_link; struct list_head inode_unlink; struct list_head inode_symlink; diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index ee92081416f3..aded5a8a733d 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -12,6 +12,7 @@ #include #include #include +#include struct mmc_data; struct mmc_request; @@ -169,8 +170,8 @@ struct mmc_request { void (*recovery_notifier)(struct mmc_request *); struct mmc_host *host; struct mmc_cmdq_req *cmdq_req; - struct request *req; + struct request *req; /* Allow other commands during this ongoing data transfer or busy wait */ bool cap_cmd_during_tfr; ktime_t io_start; diff --git a/include/linux/pfk.h b/include/linux/pfk.h deleted file mode 100644 index bba8fc2681b8..000000000000 --- a/include/linux/pfk.h +++ /dev/null @@ -1,79 +0,0 @@ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef PFK_H_ -#define PFK_H_ - -#include - -struct ice_crypto_setting; - -#ifdef CONFIG_PFK - -/* - * Default key for inline encryption. - * - * For now only AES-256-XTS is supported, so this is a fixed length. But if - * ever needed, this should be made variable-length with a 'mode' and 'size'. - * (Remember to update pfk_allow_merge_bio() when doing so!) - */ -#define BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS 64 - -struct blk_encryption_key { - u8 raw[BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS]; -}; - -int pfk_load_key_start(const struct bio *bio, - struct ice_crypto_setting *ice_setting, - bool *is_pfe, bool async); -int pfk_load_key_end(const struct bio *bio, bool *is_pfe); -int pfk_remove_key(const unsigned char *key, size_t key_size); -int pfk_fbe_clear_key(const unsigned char *key, size_t key_size, - const unsigned char *salt, size_t salt_size); -bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2); -void pfk_clear_on_reset(void); - -#else -static inline int pfk_load_key_start(const struct bio *bio, - struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async) -{ - return -ENODEV; -} - -static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe) -{ - return -ENODEV; -} - -static inline int pfk_remove_key(const unsigned char *key, size_t key_size) -{ - return -ENODEV; -} - -static inline bool pfk_allow_merge_bio(const struct bio *bio1, - const struct bio *bio2) -{ - return true; -} - -static inline int pfk_fbe_clear_key(const unsigned char *key, size_t key_size, - const unsigned char *salt, size_t salt_size) -{ - return -ENODEV; -} - -static inline void pfk_clear_on_reset(void) -{} - -#endif /* CONFIG_PFK */ - -#endif /* PFK_H */ diff --git a/include/linux/security.h b/include/linux/security.h index ecd3b0dd9c12..666c75c2269c 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -31,7 +31,6 @@ #include #include #include -#include struct linux_binprm; struct cred; @@ -271,8 +270,6 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const char **name, void **value, size_t *len); int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode); -int security_inode_post_create(struct inode *dir, struct dentry *dentry, - umode_t mode); int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int security_inode_unlink(struct inode *dir, struct dentry *dentry); @@ -667,13 +664,6 @@ static inline int security_inode_create(struct inode *dir, return 0; } -static inline int security_inode_post_create(struct inode *dir, - struct dentry *dentry, - umode_t mode) -{ - return 0; -} - static inline int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 0472647a9cf7..203fec0bd88c 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -666,9 +666,6 @@ struct Scsi_Host { /* The controller does not support WRITE SAME */ unsigned no_write_same:1; - /* Inline encryption support? */ - unsigned inlinecrypt_support:1; - unsigned use_blk_mq:1; unsigned use_cmd_list:1; diff --git a/security/Kconfig b/security/Kconfig index daaf13e06d83..8b6c5e9528e0 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -6,10 +6,6 @@ menu "Security options" source security/keys/Kconfig -if ARCH_QCOM -source security/pfe/Kconfig -endif - config SECURITY_DMESG_RESTRICT bool "Restrict unprivileged access to the kernel syslog" default n diff --git a/security/Makefile b/security/Makefile index 47bffaa3f5f8..4d2d3782ddef 100644 --- a/security/Makefile +++ b/security/Makefile @@ -10,7 +10,6 @@ subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor subdir-$(CONFIG_SECURITY_YAMA) += yama subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin -subdir-$(CONFIG_ARCH_QCOM) += pfe # always enable default capabilities obj-y += commoncap.o @@ -27,7 +26,6 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/ obj-$(CONFIG_SECURITY_YAMA) += yama/ obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/ obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o -obj-$(CONFIG_ARCH_QCOM) += pfe/ # Object integrity file lists subdir-$(CONFIG_INTEGRITY) += integrity diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig deleted file mode 100644 index 923fe1cd9987..000000000000 --- a/security/pfe/Kconfig +++ /dev/null @@ -1,50 +0,0 @@ -menu "Qualcomm Technologies, Inc Per File Encryption security device drivers" - depends on ARCH_QCOM - -config PFT - bool "Per-File-Tagger driver" - depends on SECURITY - default n - help - This driver is used for tagging enterprise files. - It is part of the Per-File-Encryption (PFE) feature. - The driver is tagging files when created by - registered application. - Tagged files are encrypted using the dm-req-crypt driver. - -config PFK - bool "Per-File-Key driver" - depends on SECURITY - depends on SECURITY_SELINUX - default n - help - This driver is used for storing eCryptfs information - in file node. - This is part of eCryptfs hardware enhanced solution - provided by Qualcomm Technologies, Inc. - Information is used when file is encrypted later using - ICE or dm crypto engine - -config PFK_WRAPPED_KEY_SUPPORTED - bool "Per-File-Key driver with wrapped key support" - depends on SECURITY - depends on SECURITY_SELINUX - depends on QSEECOM - depends on PFK - default n - help - Adds wrapped key support in PFK driver. Instead of setting - the key directly in ICE, it unwraps the key and sets the key - in ICE. - -config PFK_VIRTUALIZED - bool "Per-File-Key driver virtualized version" - depends on SECURITY - depends on SECURITY_SELINUX - depends on QSEECOM - depends on PFK - depends on MSM_HAB - help - Makes the driver to use the hypervisor back end for ICE HW - operation virtualization instead of calling directly to TZ. -endmenu diff --git a/security/pfe/Makefile b/security/pfe/Makefile deleted file mode 100644 index c95f02a46bba..000000000000 --- a/security/pfe/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -# -# Makefile for the MSM specific security device drivers. -# - -ccflags-y += -Isecurity/selinux -Isecurity/selinux/include -ccflags-y += -Ifs/crypto -ccflags-y += -Idrivers/misc - -obj-$(CONFIG_PFT) += pft.o -obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ext4.o pfk_f2fs.o -ifdef CONFIG_PFK_VIRTUALIZED -obj-$(CONFIG_PFK_VIRTUALIZED) += pfk_ice_virt.o -else -obj-$(CONFIG_PFK) += pfk_ice.o -endif diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c deleted file mode 100644 index ae681487248c..000000000000 --- a/security/pfe/pfk.c +++ /dev/null @@ -1,570 +0,0 @@ -/* - * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/* - * Per-File-Key (PFK). - * - * This driver is responsible for overall management of various - * Per File Encryption variants that work on top of or as part of different - * file systems. - * - * The driver has the following purpose : - * 1) Define priorities between PFE's if more than one is enabled - * 2) Extract key information from inode - * 3) Load and manage various keys in ICE HW engine - * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER - * that need to take decision on HW encryption management of the data - * Some examples: - * BLOCK LAYER: when it takes decision on whether 2 chunks can be united - * to one encryption / decryption request sent to the HW - * - * UFS DRIVER: when it need to configure ICE HW with a particular key slot - * to be used for encryption / decryption - * - * PFE variants can differ on particular way of storing the cryptographic info - * inside inode, actions to be taken upon file operations, etc., but the common - * properties are described above - * - */ - - -/* Uncomment the line below to enable debug messages */ -/* #define DEBUG 1 */ -#define pr_fmt(fmt) "pfk [%s]: " fmt, __func__ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "pfk_kc.h" -#include "objsec.h" -#include "pfk_ice.h" -#include "pfk_ext4.h" -#include "pfk_f2fs.h" -#include "pfk_internal.h" - -static bool pfk_ready; - - -/* might be replaced by a table when more than one cipher is supported */ -#define PFK_SUPPORTED_KEY_SIZE 32 -#define PFK_SUPPORTED_SALT_SIZE 32 - -/* Various PFE types and function tables to support each one of them */ -enum pfe_type {EXT4_CRYPT_PFE, F2FS_CRYPT_PFE, INVALID_PFE}; - -typedef int (*pfk_parse_inode_type)(const struct bio *bio, - const struct inode *inode, - struct pfk_key_info *key_info, - enum ice_cryto_algo_mode *algo, - bool *is_pfe); - -typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1, - const struct bio *bio2, const struct inode *inode1, - const struct inode *inode2); - -static const pfk_parse_inode_type pfk_parse_inode_ftable[] = { - /* EXT4_CRYPT_PFE */ &pfk_ext4_parse_inode, - /* F2FS_CRYPT_PFE */ &pfk_f2fs_parse_inode, -}; - -static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = { - /* EXT4_CRYPT_PFE */ &pfk_ext4_allow_merge_bio, - /* F2FS_CRYPT_PFE */ &pfk_f2fs_allow_merge_bio, -}; - -static void __exit pfk_exit(void) -{ - pfk_ready = false; - pfk_ext4_deinit(); - pfk_f2fs_deinit(); - pfk_kc_deinit(); -} - -static int __init pfk_init(void) -{ - - int ret = 0; - - ret = pfk_ext4_init(); - if (ret != 0) - goto fail; - - ret = pfk_f2fs_init(); - if (ret != 0) - goto fail; - - ret = pfk_kc_init(true); - if (ret != 0 && ret != -EAGAIN) { - pr_err("could init pfk key cache, error %d\n", ret); - pfk_ext4_deinit(); - pfk_f2fs_deinit(); - goto fail; - } - - pfk_ready = true; - pr_info("Driver initialized successfully\n"); - - return 0; - -fail: - pr_err("Failed to init driver\n"); - return -ENODEV; -} - -/* - * If more than one type is supported simultaneously, this function will also - * set the priority between them - */ -static enum pfe_type pfk_get_pfe_type(const struct inode *inode) -{ - if (!inode) - return INVALID_PFE; - - if (pfk_is_ext4_type(inode)) - return EXT4_CRYPT_PFE; - - if (pfk_is_f2fs_type(inode)) - return F2FS_CRYPT_PFE; - - return INVALID_PFE; -} - -/** - * inode_to_filename() - get the filename from inode pointer. - * @inode: inode pointer - * - * it is used for debug prints. - * - * Return: filename string or "unknown". - */ -char *inode_to_filename(const struct inode *inode) -{ - struct dentry *dentry = NULL; - char *filename = NULL; - - if (!inode) - return "NULL"; - - if (hlist_empty(&inode->i_dentry)) - return "unknown"; - - dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); - filename = dentry->d_iname; - - return filename; -} - -/** - * pfk_is_ready() - driver is initialized and ready. - * - * Return: true if the driver is ready. - */ -static inline bool pfk_is_ready(void) -{ - return pfk_ready; -} - -/** - * pfk_bio_get_inode() - get the inode from a bio. - * @bio: Pointer to BIO structure. - * - * Walk the bio struct links to get the inode. - * Please note, that in general bio may consist of several pages from - * several files, but in our case we always assume that all pages come - * from the same file, since our logic ensures it. That is why we only - * walk through the first page to look for inode. - * - * Return: pointer to the inode struct if successful, or NULL otherwise. - * - */ -static struct inode *pfk_bio_get_inode(const struct bio *bio) -{ - struct address_space *mapping = NULL; - - if (!bio) - return NULL; - if (!bio_has_data((struct bio *)bio)) - return NULL; - if (!bio->bi_io_vec) - return NULL; - if (!bio->bi_io_vec->bv_page) - return NULL; - - if (PageAnon(bio->bi_io_vec->bv_page)) { - struct inode *inode; - - /* Using direct-io (O_DIRECT) without page cache */ - inode = dio_bio_get_inode((struct bio *)bio); - pr_debug("inode on direct-io, inode = 0x%pK.\n", inode); - - return inode; - } - - mapping = page_mapping(bio->bi_io_vec->bv_page); - if (!mapping) - return NULL; - - return mapping->host; -} - -/** - * pfk_key_size_to_key_type() - translate key size to key size enum - * @key_size: key size in bytes - * @key_size_type: pointer to store the output enum (can be null) - * - * return 0 in case of success, error otherwise (i.e not supported key size) - */ -int pfk_key_size_to_key_type(size_t key_size, - enum ice_crpto_key_size *key_size_type) -{ - /* - * currently only 32 bit key size is supported - * in the future, table with supported key sizes might - * be introduced - */ - - if (key_size != PFK_SUPPORTED_KEY_SIZE) { - pr_err("not supported key size %zu\n", key_size); - return -EINVAL; - } - - if (key_size_type) - *key_size_type = ICE_CRYPTO_KEY_SIZE_256; - - return 0; -} - -/* - * Retrieves filesystem type from inode's superblock - */ -bool pfe_is_inode_filesystem_type(const struct inode *inode, - const char *fs_type) -{ - if (!inode || !fs_type) - return false; - - if (!inode->i_sb) - return false; - - if (!inode->i_sb->s_type) - return false; - - return (strcmp(inode->i_sb->s_type->name, fs_type) == 0); -} - -/** - * pfk_get_key_for_bio() - get the encryption key to be used for a bio - * - * @bio: pointer to the BIO - * @key_info: pointer to the key information which will be filled in - * @algo_mode: optional pointer to the algorithm identifier which will be set - * @is_pfe: will be set to false if the BIO should be left unencrypted - * - * Return: 0 if a key is being used, otherwise a -errno value - */ -static int pfk_get_key_for_bio(const struct bio *bio, - struct pfk_key_info *key_info, - enum ice_cryto_algo_mode *algo_mode, - bool *is_pfe, unsigned int *data_unit) -{ - const struct inode *inode; - enum pfe_type which_pfe; - const struct blk_encryption_key *key = NULL; - char *s_type = NULL; - - inode = pfk_bio_get_inode(bio); - which_pfe = pfk_get_pfe_type(inode); - s_type = (char *)pfk_kc_get_storage_type(); - - /* - * Update dun based on storage type. - * 512 byte dun - For ext4 emmc - * 4K dun - For ext4 ufs, f2fs ufs and f2fs emmc - */ - - if (data_unit && bio) { - if (!bio_dun(bio) && !memcmp(s_type, "sdcc", strlen("sdcc"))) - *data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B; - else - *data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB; - } - - if (which_pfe != INVALID_PFE) { - /* Encrypted file; override ->bi_crypt_key */ - pr_debug("parsing inode %lu with PFE type %d\n", - inode->i_ino, which_pfe); - return (*(pfk_parse_inode_ftable[which_pfe])) - (bio, inode, key_info, algo_mode, is_pfe); - } - - /* - * bio is not for an encrypted file. Use ->bi_crypt_key if it was set. - * Otherwise, don't encrypt/decrypt the bio. - */ -#ifdef CONFIG_DM_DEFAULT_KEY - key = bio->bi_crypt_key; -#endif - if (!key) { - *is_pfe = false; - return -EINVAL; - } - - /* Note: the "salt" is really just the second half of the XTS key. */ - BUILD_BUG_ON(sizeof(key->raw) != - PFK_SUPPORTED_KEY_SIZE + PFK_SUPPORTED_SALT_SIZE); - key_info->key = &key->raw[0]; - key_info->key_size = PFK_SUPPORTED_KEY_SIZE; - key_info->salt = &key->raw[PFK_SUPPORTED_KEY_SIZE]; - key_info->salt_size = PFK_SUPPORTED_SALT_SIZE; - if (algo_mode) - *algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS; - return 0; -} - - -/** - * pfk_load_key_start() - loads PFE encryption key to the ICE - * Can also be invoked from non - * PFE context, in this case it - * is not relevant and is_pfe - * flag is set to false - * - * @bio: Pointer to the BIO structure - * @ice_setting: Pointer to ice setting structure that will be filled with - * ice configuration values, including the index to which the key was loaded - * @is_pfe: will be false if inode is not relevant to PFE, in such a case - * it should be treated as non PFE by the block layer - * - * Returns the index where the key is stored in encryption hw and additional - * information that will be used later for configuration of the encryption hw. - * - * Must be followed by pfk_load_key_end when key is no longer used by ice - * - */ -int pfk_load_key_start(const struct bio *bio, - struct ice_crypto_setting *ice_setting, bool *is_pfe, - bool async) -{ - int ret = 0; - struct pfk_key_info key_info = {NULL, NULL, 0, 0}; - enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS; - enum ice_crpto_key_size key_size_type = 0; - unsigned int data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B; - u32 key_index = 0; - - if (!is_pfe) { - pr_err("is_pfe is NULL\n"); - return -EINVAL; - } - - /* - * only a few errors below can indicate that - * this function was not invoked within PFE context, - * otherwise we will consider it PFE - */ - *is_pfe = true; - - if (!pfk_is_ready()) - return -ENODEV; - - if (!ice_setting) { - pr_err("ice setting is NULL\n"); - return -EINVAL; - } - - ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe, - &data_unit); - - if (ret != 0) - return ret; - - ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type); - if (ret != 0) - return ret; - - ret = pfk_kc_load_key_start(key_info.key, key_info.key_size, - key_info.salt, key_info.salt_size, &key_index, async, - data_unit); - if (ret) { - if (ret != -EBUSY && ret != -EAGAIN) - pr_err("start: could not load key into pfk key cache, error %d\n", - ret); - - return ret; - } - - ice_setting->key_size = key_size_type; - ice_setting->algo_mode = algo_mode; - /* hardcoded for now */ - ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY; - ice_setting->key_index = key_index; - - pr_debug("loaded key for file %s key_index %d\n", - inode_to_filename(pfk_bio_get_inode(bio)), key_index); - - return 0; -} - -/** - * pfk_load_key_end() - marks the PFE key as no longer used by ICE - * Can also be invoked from non - * PFE context, in this case it is not - * relevant and is_pfe flag is - * set to false - * - * @bio: Pointer to the BIO structure - * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked - * from PFE context - */ -int pfk_load_key_end(const struct bio *bio, bool *is_pfe) -{ - int ret = 0; - struct pfk_key_info key_info = {NULL, NULL, 0, 0}; - - if (!is_pfe) { - pr_err("is_pfe is NULL\n"); - return -EINVAL; - } - - /* only a few errors below can indicate that - * this function was not invoked within PFE context, - * otherwise we will consider it PFE - */ - *is_pfe = true; - - if (!pfk_is_ready()) - return -ENODEV; - - ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe, NULL); - if (ret != 0) - return ret; - - pfk_kc_load_key_end(key_info.key, key_info.key_size, - key_info.salt, key_info.salt_size); - - pr_debug("finished using key for file %s\n", - inode_to_filename(pfk_bio_get_inode(bio))); - - return 0; -} - -/** - * pfk_allow_merge_bio() - Check if 2 BIOs can be merged. - * @bio1: Pointer to first BIO structure. - * @bio2: Pointer to second BIO structure. - * - * Prevent merging of BIOs from encrypted and non-encrypted - * files, or files encrypted with different key. - * Also prevent non encrypted and encrypted data from the same file - * to be merged (ecryptfs header if stored inside file should be non - * encrypted) - * This API is called by the file system block layer. - * - * Return: true if the BIOs allowed to be merged, false - * otherwise. - */ -bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2) -{ - const struct blk_encryption_key *key1 = NULL; - const struct blk_encryption_key *key2 = NULL; - const struct inode *inode1; - const struct inode *inode2; - enum pfe_type which_pfe1; - enum pfe_type which_pfe2; - -#ifdef CONFIG_DM_DEFAULT_KEY - key1 = bio1->bi_crypt_key; - key2 = bio2->bi_crypt_key; -#endif - - if (!pfk_is_ready()) - return false; - - if (!bio1 || !bio2) - return false; - - if (bio1 == bio2) - return true; - - key1 = bio1->bi_crypt_key; - key2 = bio2->bi_crypt_key; - - inode1 = pfk_bio_get_inode(bio1); - inode2 = pfk_bio_get_inode(bio2); - - which_pfe1 = pfk_get_pfe_type(inode1); - which_pfe2 = pfk_get_pfe_type(inode2); - - /* - * If one bio is for an encrypted file and the other is for a different - * type of encrypted file or for blocks that are not part of an - * encrypted file, do not merge. - */ - if (which_pfe1 != which_pfe2) - return false; - - if (which_pfe1 != INVALID_PFE) { - /* Both bios are for the same type of encrypted file. */ - return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2, - inode1, inode2); - } - - /* - * Neither bio is for an encrypted file. Merge only if the default keys - * are the same (or both are NULL). - */ - return key1 == key2 || - (key1 && key2 && - !crypto_memneq(key1->raw, key2->raw, sizeof(key1->raw))); -} - -int pfk_fbe_clear_key(const unsigned char *key, size_t key_size, - const unsigned char *salt, size_t salt_size) -{ - int ret = -EINVAL; - - if (!key || !salt) - return ret; - - ret = pfk_kc_remove_key_with_salt(key, key_size, salt, salt_size); - if (ret) - pr_err("Clear key error: ret value %d\n", ret); - return ret; -} - -/** - * Flush key table on storage core reset. During core reset key configuration - * is lost in ICE. We need to flash the cache, so that the keys will be - * reconfigured again for every subsequent transaction - */ -void pfk_clear_on_reset(void) -{ - if (!pfk_is_ready()) - return; - - pfk_kc_clear_on_reset(); -} - -module_init(pfk_init); -module_exit(pfk_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Per-File-Key driver"); diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c deleted file mode 100644 index 0eb122565ecc..000000000000 --- a/security/pfe/pfk_ext4.c +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/* - * Per-File-Key (PFK) - EXT4 - * - * This driver is used for working with EXT4 crypt extension - * - * The key information is stored in node by EXT4 when file is first opened - * and will be later accessed by Block Device Driver to actually load the key - * to encryption hw. - * - * PFK exposes API's for loading and removing keys from encryption hw - * and also API to determine whether 2 adjacent blocks can be agregated by - * Block Layer in one request to encryption hw. - * - */ - - -/* Uncomment the line below to enable debug messages */ -/* #define DEBUG 1 */ -#define pr_fmt(fmt) "pfk_ext4 [%s]: " fmt, __func__ - -#include -#include -#include -#include - -#include "fscrypt_ice.h" -#include "pfk_ext4.h" -//#include "ext4_ice.h" - -static bool pfk_ext4_ready; - -/* - * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer - */ -void pfk_ext4_deinit(void) -{ - pfk_ext4_ready = false; -} - -/* - * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer - */ -int __init pfk_ext4_init(void) -{ - pfk_ext4_ready = true; - pr_info("PFK EXT4 inited successfully\n"); - - return 0; -} - -/** - * pfk_ecryptfs_is_ready() - driver is initialized and ready. - * - * Return: true if the driver is ready. - */ -static inline bool pfk_ext4_is_ready(void) -{ - return pfk_ext4_ready; -} - -/** - * pfk_ext4_dump_inode() - dumps all interesting info about inode to the screen - * - * - */ -/* - * static void pfk_ext4_dump_inode(const struct inode* inode) - * { - * struct ext4_crypt_info *ci = ext4_encryption_info((struct inode*)inode); - * - * pr_debug("dumping inode with address 0x%p\n", inode); - * pr_debug("S_ISREG is %d\n", S_ISREG(inode->i_mode)); - * pr_debug("EXT4_INODE_ENCRYPT flag is %d\n", - * ext4_test_inode_flag((struct inode*)inode, EXT4_INODE_ENCRYPT)); - * if (ci) { - * pr_debug("crypt_info address 0x%p\n", ci); - * pr_debug("ci->ci_data_mode %d\n", ci->ci_data_mode); - * } else { - * pr_debug("crypt_info is NULL\n"); - * } - * } - */ - -/** - * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE - * @inode: inode pointer - */ -bool pfk_is_ext4_type(const struct inode *inode) -{ - if (!pfe_is_inode_filesystem_type(inode, "ext4")) - return false; - - return fscrypt_should_be_processed_by_ice(inode); -} - -/** - * pfk_ext4_parse_cipher() - parse cipher from inode to enum - * @inode: inode - * @algo: pointer to store the output enum (can be null) - * - * return 0 in case of success, error otherwise (i.e not supported cipher) - */ -static int pfk_ext4_parse_cipher(const struct inode *inode, - enum ice_cryto_algo_mode *algo) -{ - /* - * currently only AES XTS algo is supported - * in the future, table with supported ciphers might - * be introduced - */ - - if (!inode) - return -EINVAL; - - if (!fscrypt_is_aes_xts_cipher(inode)) { - pr_err("ext4 alghoritm is not supported by pfk\n"); - return -EINVAL; - } - - if (algo) - *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS; - - return 0; -} - - -int pfk_ext4_parse_inode(const struct bio *bio, - const struct inode *inode, - struct pfk_key_info *key_info, - enum ice_cryto_algo_mode *algo, - bool *is_pfe) -{ - int ret = 0; - - if (!is_pfe) - return -EINVAL; - - /* - * only a few errors below can indicate that - * this function was not invoked within PFE context, - * otherwise we will consider it PFE - */ - *is_pfe = true; - - if (!pfk_ext4_is_ready()) - return -ENODEV; - - if (!inode) - return -EINVAL; - - if (!key_info) - return -EINVAL; - - key_info->key = fscrypt_get_ice_encryption_key(inode); - if (!key_info->key) { - pr_err("could not parse key from ext4\n"); - return -EINVAL; - } - - key_info->key_size = fscrypt_get_ice_encryption_key_size(inode); - if (!key_info->key_size) { - pr_err("could not parse key size from ext4\n"); - return -EINVAL; - } - - key_info->salt = fscrypt_get_ice_encryption_salt(inode); - if (!key_info->salt) { - pr_err("could not parse salt from ext4\n"); - return -EINVAL; - } - - key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode); - if (!key_info->salt_size) { - pr_err("could not parse salt size from ext4\n"); - return -EINVAL; - } - - ret = pfk_ext4_parse_cipher(inode, algo); - if (ret != 0) { - pr_err("not supported cipher\n"); - return ret; - } - - return 0; -} - -bool pfk_ext4_allow_merge_bio(const struct bio *bio1, - const struct bio *bio2, const struct inode *inode1, - const struct inode *inode2) -{ - /* if there is no ext4 pfk, don't disallow merging blocks */ - if (!pfk_ext4_is_ready()) - return true; - - if (!inode1 || !inode2) - return false; - - return fscrypt_is_ice_encryption_info_equal(inode1, inode2); -} diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h deleted file mode 100644 index c33232f35a14..000000000000 --- a/security/pfe/pfk_ext4.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _PFK_EXT4_H_ -#define _PFK_EXT4_H_ - -#include -#include -#include -#include "pfk_internal.h" - -bool pfk_is_ext4_type(const struct inode *inode); - -int pfk_ext4_parse_inode(const struct bio *bio, - const struct inode *inode, - struct pfk_key_info *key_info, - enum ice_cryto_algo_mode *algo, - bool *is_pfe); - -bool pfk_ext4_allow_merge_bio(const struct bio *bio1, - const struct bio *bio2, const struct inode *inode1, - const struct inode *inode2); - -int __init pfk_ext4_init(void); - -void pfk_ext4_deinit(void); - -#endif /* _PFK_EXT4_H_ */ diff --git a/security/pfe/pfk_f2fs.c b/security/pfe/pfk_f2fs.c deleted file mode 100644 index 8b9d515043e8..000000000000 --- a/security/pfe/pfk_f2fs.c +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/* - * Per-File-Key (PFK) - f2fs - * - * This driver is used for working with EXT4/F2FS crypt extension - * - * The key information is stored in node by EXT4/F2FS when file is first opened - * and will be later accessed by Block Device Driver to actually load the key - * to encryption hw. - * - * PFK exposes API's for loading and removing keys from encryption hw - * and also API to determine whether 2 adjacent blocks can be agregated by - * Block Layer in one request to encryption hw. - * - */ - - -/* Uncomment the line below to enable debug messages */ -#define DEBUG 1 -#define pr_fmt(fmt) "pfk_f2fs [%s]: " fmt, __func__ - -#include -#include -#include -#include - -#include "fscrypt_ice.h" -#include "pfk_f2fs.h" - -static bool pfk_f2fs_ready; - -/* - * pfk_f2fs_deinit() - Deinit function, should be invoked by upper PFK layer - */ -void pfk_f2fs_deinit(void) -{ - pfk_f2fs_ready = false; -} - -/* - * pfk_f2fs_init() - Init function, should be invoked by upper PFK layer - */ -int __init pfk_f2fs_init(void) -{ - pfk_f2fs_ready = true; - pr_info("PFK F2FS inited successfully\n"); - - return 0; -} - -/** - * pfk_f2fs_is_ready() - driver is initialized and ready. - * - * Return: true if the driver is ready. - */ -static inline bool pfk_f2fs_is_ready(void) -{ - return pfk_f2fs_ready; -} - -/** - * pfk_is_f2fs_type() - return true if inode belongs to ICE F2FS PFE - * @inode: inode pointer - */ -bool pfk_is_f2fs_type(const struct inode *inode) -{ - if (!pfe_is_inode_filesystem_type(inode, "f2fs")) - return false; - - return fscrypt_should_be_processed_by_ice(inode); -} - -/** - * pfk_f2fs_parse_cipher() - parse cipher from inode to enum - * @inode: inode - * @algo: pointer to store the output enum (can be null) - * - * return 0 in case of success, error otherwise (i.e not supported cipher) - */ -static int pfk_f2fs_parse_cipher(const struct inode *inode, - enum ice_cryto_algo_mode *algo) -{ - /* - * currently only AES XTS algo is supported - * in the future, table with supported ciphers might - * be introduced - */ - if (!inode) - return -EINVAL; - - if (!fscrypt_is_aes_xts_cipher(inode)) { - pr_err("f2fs alghoritm is not supported by pfk\n"); - return -EINVAL; - } - - if (algo) - *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS; - - return 0; -} - - -int pfk_f2fs_parse_inode(const struct bio *bio, - const struct inode *inode, - struct pfk_key_info *key_info, - enum ice_cryto_algo_mode *algo, - bool *is_pfe) -{ - int ret = 0; - - if (!is_pfe) - return -EINVAL; - - /* - * only a few errors below can indicate that - * this function was not invoked within PFE context, - * otherwise we will consider it PFE - */ - *is_pfe = true; - - if (!pfk_f2fs_is_ready()) - return -ENODEV; - - if (!inode) - return -EINVAL; - - if (!key_info) - return -EINVAL; - - key_info->key = fscrypt_get_ice_encryption_key(inode); - if (!key_info->key) { - pr_err("could not parse key from f2fs\n"); - return -EINVAL; - } - - key_info->key_size = fscrypt_get_ice_encryption_key_size(inode); - if (!key_info->key_size) { - pr_err("could not parse key size from f2fs\n"); - return -EINVAL; - } - - key_info->salt = fscrypt_get_ice_encryption_salt(inode); - if (!key_info->salt) { - pr_err("could not parse salt from f2fs\n"); - return -EINVAL; - } - - key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode); - if (!key_info->salt_size) { - pr_err("could not parse salt size from f2fs\n"); - return -EINVAL; - } - - ret = pfk_f2fs_parse_cipher(inode, algo); - if (ret != 0) { - pr_err("not supported cipher\n"); - return ret; - } - - return 0; -} - -bool pfk_f2fs_allow_merge_bio(const struct bio *bio1, - const struct bio *bio2, const struct inode *inode1, - const struct inode *inode2) -{ - bool mergeable; - - /* if there is no f2fs pfk, don't disallow merging blocks */ - if (!pfk_f2fs_is_ready()) - return true; - - if (!inode1 || !inode2) - return false; - - mergeable = fscrypt_is_ice_encryption_info_equal(inode1, inode2); - if (!mergeable) - return false; - - - /* ICE allows only consecutive iv_key stream. */ - if (!bio_dun(bio1) && !bio_dun(bio2)) - return true; - else if (!bio_dun(bio1) || !bio_dun(bio2)) - return false; - - return bio_end_dun(bio1) == bio_dun(bio2); -} diff --git a/security/pfe/pfk_f2fs.h b/security/pfe/pfk_f2fs.h deleted file mode 100644 index 551d529bced6..000000000000 --- a/security/pfe/pfk_f2fs.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _PFK_F2FS_H_ -#define _PFK_F2FS_H_ - -#include -#include -#include -#include "pfk_internal.h" - -bool pfk_is_f2fs_type(const struct inode *inode); - -int pfk_f2fs_parse_inode(const struct bio *bio, - const struct inode *inode, - struct pfk_key_info *key_info, - enum ice_cryto_algo_mode *algo, - bool *is_pfe); - -bool pfk_f2fs_allow_merge_bio(const struct bio *bio1, - const struct bio *bio2, const struct inode *inode1, - const struct inode *inode2); - -int __init pfk_f2fs_init(void); - -void pfk_f2fs_deinit(void); - -#endif /* _PFK_F2FS_H_ */ diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c deleted file mode 100644 index b627c92aaf5d..000000000000 --- a/security/pfe/pfk_ice.c +++ /dev/null @@ -1,216 +0,0 @@ -/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "pfk_ice.h" - -/**********************************/ -/** global definitions **/ -/**********************************/ - -#define TZ_ES_INVALIDATE_ICE_KEY 0x3 -#define TZ_ES_CONFIG_SET_ICE_KEY 0x4 - -/* index 0 and 1 is reserved for FDE */ -#define MIN_ICE_KEY_INDEX 2 -#define NUM_ICE_SLOTS 32 -#define MAX_ICE_KEY_INDEX (NUM_ICE_SLOTS - 1) - -#define TZ_ES_CONFIG_SET_ICE_KEY_ID \ - TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \ - TZ_ES_CONFIG_SET_ICE_KEY) - -#define TZ_ES_INVALIDATE_ICE_KEY_ID \ - TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \ - TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY) - -#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \ - TZ_SYSCALL_CREATE_PARAM_ID_1( \ - TZ_SYSCALL_PARAM_TYPE_VAL) - -#define TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID \ - TZ_SYSCALL_CREATE_PARAM_ID_5( \ - TZ_SYSCALL_PARAM_TYPE_VAL, \ - TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \ - TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL) - -#define CONTEXT_SIZE 0x1000 - -#define ICE_BUFFER_SIZE 64 - -static uint8_t ice_buffer[ICE_BUFFER_SIZE]; - -enum { - ICE_CIPHER_MODE_XTS_128 = 0, - ICE_CIPHER_MODE_CBC_128 = 1, - ICE_CIPHER_MODE_XTS_256 = 3, - ICE_CIPHER_MODE_CBC_256 = 4 -}; - -static int set_key(uint32_t index, const uint8_t *key, const uint8_t *salt, - unsigned int data_unit) -{ - struct scm_desc desc = {0}; - int ret = 0; - uint32_t smc_id = 0; - char *tzbuf = (char *)ice_buffer; - uint32_t size = ICE_BUFFER_SIZE / 2; - - if (!tzbuf) { - pr_err("%s No Memory\n", __func__); - return -ENOMEM; - } - - memset(tzbuf, 0, ICE_BUFFER_SIZE); - - memcpy(ice_buffer, key, size); - memcpy(ice_buffer+size, salt, size); - - dmac_flush_range(tzbuf, tzbuf + ICE_BUFFER_SIZE); - - smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID; - - desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID; - desc.args[0] = index; - desc.args[1] = virt_to_phys(tzbuf); - desc.args[2] = ICE_BUFFER_SIZE; - desc.args[3] = ICE_CIPHER_MODE_XTS_256; - desc.args[4] = data_unit; - - ret = scm_call2_noretry(smc_id, &desc); - if (ret) - pr_err("%s:SCM call Error: 0x%x\n", __func__, ret); - - return ret; -} - -static int clear_key(uint32_t index) -{ - struct scm_desc desc = {0}; - int ret = 0; - uint32_t smc_id = 0; - - smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID; - - desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID; - desc.args[0] = index; - - ret = scm_call2_noretry(smc_id, &desc); - if (ret) - pr_err("%s:SCM call Error: 0x%x\n", __func__, ret); - return ret; -} - -int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt, - char *storage_type, unsigned int data_unit) -{ - int ret = 0, ret1 = 0; - char *s_type = storage_type; - - if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) { - pr_err("%s Invalid index %d\n", __func__, index); - return -EINVAL; - } - if (!key || !salt) { - pr_err("%s Invalid key/salt\n", __func__); - return -EINVAL; - } - - if (s_type == NULL) { - pr_err("%s Invalid Storage type\n", __func__); - return -EINVAL; - } - - ret = qcom_ice_setup_ice_hw((const char *)s_type, true); - if (ret) { - pr_err("%s: could not enable clocks: %d\n", __func__, ret); - goto out; - } - - ret = set_key(index, key, salt, data_unit); - if (ret) { - pr_err("%s: Set Key Error: %d\n", __func__, ret); - if (ret == -EBUSY) { - if (qcom_ice_setup_ice_hw((const char *)s_type, false)) - pr_err("%s: clock disable failed\n", __func__); - goto out; - } - /* Try to invalidate the key to keep ICE in proper state */ - ret1 = clear_key(index); - if (ret1) - pr_err("%s: Invalidate key error: %d\n", __func__, ret); - } - - ret1 = qcom_ice_setup_ice_hw((const char *)s_type, false); - if (ret) - pr_err("%s: Error %d disabling clocks\n", __func__, ret); - -out: - return ret; -} - -int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type) -{ - int ret = 0; - - if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) { - pr_err("%s Invalid index %d\n", __func__, index); - return -EINVAL; - } - - if (storage_type == NULL) { - pr_err("%s Invalid Storage type\n", __func__); - return -EINVAL; - } - - ret = qcom_ice_setup_ice_hw((const char *)storage_type, true); - if (ret) { - pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret); - return ret; - } - - ret = clear_key(index); - if (ret) - pr_err("%s: Invalidate key error: %d\n", __func__, ret); - - if (qcom_ice_setup_ice_hw((const char *)storage_type, false)) - pr_err("%s: could not disable clocks\n", __func__); - - return ret; -} - -int qti_pfk_ice_get_info(uint32_t *min_slot_index, uint32_t *total_num_slots, - bool async) -{ - - if (!min_slot_index || !total_num_slots) { - pr_err("%s Null input\n", __func__); - return -EINVAL; - } - - *min_slot_index = MIN_ICE_KEY_INDEX; - *total_num_slots = NUM_ICE_SLOTS - MIN_ICE_KEY_INDEX; - - return 0; -} diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h deleted file mode 100644 index bc919744e7a2..000000000000 --- a/security/pfe/pfk_ice.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef PFK_ICE_H_ -#define PFK_ICE_H_ - -/* - * PFK ICE - * - * ICE keys configuration through scm calls. - * - */ - -#include - -int pfk_ice_init(void); -int pfk_ice_deinit(void); - -int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt, - char *storage_type, unsigned int data_unit); -int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type); -int qti_pfk_ice_get_info(uint32_t *min_slot_index, uint32_t *total_num_slots, - bool async); - -#endif /* PFK_ICE_H_ */ diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h deleted file mode 100644 index 3214327b8bcd..000000000000 --- a/security/pfe/pfk_internal.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _PFK_INTERNAL_H_ -#define _PFK_INTERNAL_H_ - -#include -#include - -struct pfk_key_info { - const unsigned char *key; - const unsigned char *salt; - size_t key_size; - size_t salt_size; -}; - -int pfk_key_size_to_key_type(size_t key_size, - enum ice_crpto_key_size *key_size_type); - -bool pfe_is_inode_filesystem_type(const struct inode *inode, - const char *fs_type); - -char *inode_to_filename(const struct inode *inode); - -#endif /* _PFK_INTERNAL_H_ */ diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c deleted file mode 100644 index c07c82c5260d..000000000000 --- a/security/pfe/pfk_kc.c +++ /dev/null @@ -1,951 +0,0 @@ -/* - * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/* - * PFK Key Cache - * - * Key Cache used internally in PFK. - * The purpose of the cache is to save access time to QSEE when loading keys. - * Currently the cache is the same size as the total number of keys that can - * be loaded to ICE. Since this number is relatively small, the algorithms for - * cache eviction are simple, linear and based on last usage timestamp, i.e - * the node that will be evicted is the one with the oldest timestamp. - * Empty entries always have the oldest timestamp. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pfk_kc.h" -#include "pfk_ice.h" - -/** currently the only supported key and salt sizes */ -#define PFK_KC_KEY_SIZE 32 -#define PFK_KC_SALT_SIZE 32 - -/** Table size limitations */ -#define PFK_KC_MAX_TABLE_SIZE (32) -#define PFK_KC_MIN_TABLE_SIZE (1) - -/** The maximum key and salt size */ -#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE -#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE -#define PFK_UFS "ufs" - -static DEFINE_SPINLOCK(kc_lock); -static unsigned long flags; -static bool kc_ready; -static char *s_type = "sdcc"; - - -/** Actual table size */ -static uint32_t kc_table_size; - -/** - * enum pfk_kc_entry_state - state of the entry inside kc table - * - * @FREE: entry is free - * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine - and cannot be used by others. SCM call - to load key to ICE is pending to be performed - * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and - cannot be used by others. SCM call to load the - key to ICE was successfully executed and key is - now loaded - * @INACTIVE_INVALIDATING: entry is being invalidated during file close - and cannot be used by others until invalidation - is complete - * @INACTIVE: entry's key is already loaded, but is not - currently being used. It can be re-used for - optimization and to avoid SCM call cost or - it can be taken by another key if there are - no FREE entries - * @SCM_ERROR: error occurred while scm call was performed to - load the key to ICE - */ -enum pfk_kc_entry_state { - FREE, - ACTIVE_ICE_PRELOAD, - ACTIVE_ICE_LOADED, - INACTIVE_INVALIDATING, - INACTIVE, - SCM_ERROR -}; - -struct kc_entry { - unsigned char key[PFK_MAX_KEY_SIZE]; - size_t key_size; - - unsigned char salt[PFK_MAX_SALT_SIZE]; - size_t salt_size; - - u64 time_stamp; - u32 key_index; - - struct task_struct *thread_pending; - - enum pfk_kc_entry_state state; - - /* ref count for the number of requests in the HW queue for this key */ - int loaded_ref_cnt; - int scm_error; -}; - -static struct kc_entry kc_table[PFK_KC_MAX_TABLE_SIZE]; - - - -static inline void kc_spin_lock(void) -{ - spin_lock_irqsave(&kc_lock, flags); -} - -static inline void kc_spin_unlock(void) -{ - spin_unlock_irqrestore(&kc_lock, flags); -} -/** - * kc_is_ready() - driver is initialized and ready. - * - * Return: true if the key cache is ready. - */ -static inline bool kc_is_ready(void) -{ - bool res; - - kc_spin_lock(); - res = kc_ready; - kc_spin_unlock(); - return res; -} -/** - * pfk_kc_get_storage_type() - return the hardware storage type. - * - * Return: storage type queried during bootup. - */ -const char *pfk_kc_get_storage_type(void) -{ - return s_type; -} - -/** - * kc_entry_is_available() - checks whether the entry is available - * - * Return true if it is , false otherwise or if invalid - * Should be invoked under spinlock - */ -static bool kc_entry_is_available(const struct kc_entry *entry) -{ - if (!entry) - return false; - - return (entry->state == FREE || entry->state == INACTIVE); -} - -/** - * kc_entry_wait_till_available() - waits till entry is available - * - * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted - * by signal - * - * Should be invoked under spinlock - */ -static int kc_entry_wait_till_available(struct kc_entry *entry) -{ - int res = 0; - - while (!kc_entry_is_available(entry)) { - set_current_state(TASK_INTERRUPTIBLE); - if (signal_pending(current)) { - res = -ERESTARTSYS; - break; - } - /* assuming only one thread can try to invalidate - * the same entry - */ - entry->thread_pending = current; - kc_spin_unlock(); - schedule(); - kc_spin_lock(); - } - set_current_state(TASK_RUNNING); - - return res; -} - -/** - * kc_entry_start_invalidating() - moves entry to state - * INACTIVE_INVALIDATING - * If entry is in use, waits till - * it gets available - * @entry: pointer to entry - * - * Return 0 in case of success, otherwise error - * Should be invoked under spinlock - */ -static int kc_entry_start_invalidating(struct kc_entry *entry) -{ - int res; - - res = kc_entry_wait_till_available(entry); - if (res) - return res; - - entry->state = INACTIVE_INVALIDATING; - - return 0; -} - -/** - * kc_entry_finish_invalidating() - moves entry to state FREE - * wakes up all the tasks waiting - * on it - * - * @entry: pointer to entry - * - * Return 0 in case of success, otherwise error - * Should be invoked under spinlock - */ -static void kc_entry_finish_invalidating(struct kc_entry *entry) -{ - if (!entry) - return; - - if (entry->state != INACTIVE_INVALIDATING) - return; - - entry->state = FREE; -} - -/** - * kc_min_entry() - compare two entries to find one with minimal time - * @a: ptr to the first entry. If NULL the other entry will be returned - * @b: pointer to the second entry - * - * Return the entry which timestamp is the minimal, or b if a is NULL - */ -static inline struct kc_entry *kc_min_entry(struct kc_entry *a, - struct kc_entry *b) -{ - if (!a) - return b; - - if (time_before64(b->time_stamp, a->time_stamp)) - return b; - - return a; -} - -/** - * kc_entry_at_index() - return entry at specific index - * @index: index of entry to be accessed - * - * Return entry - * Should be invoked under spinlock - */ -static struct kc_entry *kc_entry_at_index(int index) -{ - return &(kc_table[index]); -} - -/** - * kc_find_key_at_index() - find kc entry starting at specific index - * @key: key to look for - * @key_size: the key size - * @salt: salt to look for - * @salt_size: the salt size - * @sarting_index: index to start search with, if entry found, updated with - * index of that entry - * - * Return entry or NULL in case of error - * Should be invoked under spinlock - */ -static struct kc_entry *kc_find_key_at_index(const unsigned char *key, - size_t key_size, const unsigned char *salt, size_t salt_size, - int *starting_index) -{ - struct kc_entry *entry = NULL; - int i = 0; - - for (i = *starting_index; i < kc_table_size; i++) { - entry = kc_entry_at_index(i); - - if (salt != NULL) { - if (entry->salt_size != salt_size) - continue; - - if (memcmp(entry->salt, salt, salt_size) != 0) - continue; - } - - if (entry->key_size != key_size) - continue; - - if (memcmp(entry->key, key, key_size) == 0) { - *starting_index = i; - return entry; - } - } - - return NULL; -} - -/** - * kc_find_key() - find kc entry - * @key: key to look for - * @key_size: the key size - * @salt: salt to look for - * @salt_size: the salt size - * - * Return entry or NULL in case of error - * Should be invoked under spinlock - */ -static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size, - const unsigned char *salt, size_t salt_size) -{ - int index = 0; - - return kc_find_key_at_index(key, key_size, salt, salt_size, &index); -} - -/** - * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp - * that is not locked - * - * Returns entry with minimal timestamp. Empty entries have timestamp - * of 0, therefore they are returned first. - * If all the entries are locked, will return NULL - * Should be invoked under spin lock - */ -static struct kc_entry *kc_find_oldest_entry_non_locked(void) -{ - struct kc_entry *curr_min_entry = NULL; - struct kc_entry *entry = NULL; - int i = 0; - - for (i = 0; i < kc_table_size; i++) { - entry = kc_entry_at_index(i); - - if (entry->state == FREE) - return entry; - - if (entry->state == INACTIVE) - curr_min_entry = kc_min_entry(curr_min_entry, entry); - } - - return curr_min_entry; -} - -/** - * kc_update_timestamp() - updates timestamp of entry to current - * - * @entry: entry to update - * - */ -static void kc_update_timestamp(struct kc_entry *entry) -{ - if (!entry) - return; - - entry->time_stamp = get_jiffies_64(); -} - -/** - * kc_clear_entry() - clear the key from entry and mark entry not in use - * - * @entry: pointer to entry - * - * Should be invoked under spinlock - */ -static void kc_clear_entry(struct kc_entry *entry) -{ - if (!entry) - return; - - memset(entry->key, 0, entry->key_size); - memset(entry->salt, 0, entry->salt_size); - - entry->key_size = 0; - entry->salt_size = 0; - - entry->time_stamp = 0; - entry->scm_error = 0; - - entry->state = FREE; - - entry->loaded_ref_cnt = 0; - entry->thread_pending = NULL; -} - -/** - * kc_update_entry() - replaces the key in given entry and - * loads the new key to ICE - * - * @entry: entry to replace key in - * @key: key - * @key_size: key_size - * @salt: salt - * @salt_size: salt_size - * @data_unit: dun size - * - * The previous key is securely released and wiped, the new one is loaded - * to ICE. - * Should be invoked under spinlock - */ -static int kc_update_entry(struct kc_entry *entry, const unsigned char *key, - size_t key_size, const unsigned char *salt, size_t salt_size, - unsigned int data_unit) -{ - int ret; - kc_clear_entry(entry); - - memcpy(entry->key, key, key_size); - entry->key_size = key_size; - - memcpy(entry->salt, salt, salt_size); - entry->salt_size = salt_size; - - /* Mark entry as no longer free before releasing the lock */ - entry->state = ACTIVE_ICE_PRELOAD; - kc_spin_unlock(); - ret = qti_pfk_ice_set_key(entry->key_index, entry->key, - entry->salt, s_type, data_unit); - kc_spin_lock(); - return ret; -} - -/** - * pfk_kc_init() - init function - * - * Return 0 in case of success, error otherwise - */ -int pfk_kc_init(bool async) -{ - int ret = 0; - struct kc_entry *entry = NULL; - uint32_t i = 0, num_ice_slots = 0, kc_starting_index = 0; - - if (kc_is_ready()) - return 0; - - ret = qti_pfk_ice_get_info(&kc_starting_index, &num_ice_slots, async); - if (ret) { - pr_err("qti_pfk_ice_get_info failed ret = %d\n", ret); - return ret; - } - if (num_ice_slots > PFK_KC_MAX_TABLE_SIZE || - num_ice_slots < PFK_KC_MIN_TABLE_SIZE) { - pr_err("Received ICE num slots = %u not in [%u,%u]\n", - num_ice_slots, PFK_KC_MAX_TABLE_SIZE, - PFK_KC_MIN_TABLE_SIZE); - return -E2BIG; - } - - kc_spin_lock(); - if (!kc_ready) { - kc_table_size = num_ice_slots; - for (i = 0; i < kc_table_size; i++) { - entry = kc_entry_at_index(i); - entry->key_index = kc_starting_index + i; - } - kc_ready = true; - } - kc_spin_unlock(); - - return ret; -} - -/** - * pfk_kc_denit() - deinit function - * - * Return 0 in case of success, error otherwise - */ -int pfk_kc_deinit(void) -{ - int res = pfk_kc_clear(); - kc_spin_lock(); - kc_ready = false; - kc_spin_unlock(); - kc_table_size = 0; - - return res; -} - -/** - * pfk_kc_load_key_start() - retrieve the key from cache or add it if - * it's not there and return the ICE hw key index in @key_index. - * @key: pointer to the key - * @key_size: the size of the key - * @salt: pointer to the salt - * @salt_size: the size of the salt - * @key_index: the pointer to key_index where the output will be stored - * @async: whether scm calls are allowed in the caller context - * - * If key is present in cache, than the key_index will be retrieved from cache. - * If it is not present, the oldest entry from kc table will be evicted, - * the key will be loaded to ICE via QSEE to the index that is the evicted - * entry number and stored in cache. - * Entry that is going to be used is marked as being used, it will mark - * as not being used when ICE finishes using it and pfk_kc_load_key_end - * will be invoked. - * As QSEE calls can only be done from a non-atomic context, when @async flag - * is set to 'false', it specifies that it is ok to make the calls in the - * current context. Otherwise, when @async is set, the caller should retry the - * call again from a different context, and -EAGAIN error will be returned. - * - * Return 0 in case of success, error otherwise - */ -int pfk_kc_load_key_start(const unsigned char *key, size_t key_size, - const unsigned char *salt, size_t salt_size, u32 *key_index, - bool async, unsigned int data_unit) -{ - int ret = 0; - struct kc_entry *entry = NULL; - bool entry_exists = false; - - ret = pfk_kc_init(async); - if (ret) - return ret; - - if (!key || !salt || !key_index) { - pr_err("%s key/salt/key_index NULL\n", __func__); - return -EINVAL; - } - - if (key_size != PFK_KC_KEY_SIZE) { - pr_err("unsupported key size %zu\n", key_size); - return -EINVAL; - } - - if (salt_size != PFK_KC_SALT_SIZE) { - pr_err("unsupported salt size %zu\n", salt_size); - return -EINVAL; - } - - kc_spin_lock(); - - entry = kc_find_key(key, key_size, salt, salt_size); - if (!entry) { - if (async) { - pr_debug("%s task will populate entry\n", __func__); - kc_spin_unlock(); - return -EAGAIN; - } - - entry = kc_find_oldest_entry_non_locked(); - if (!entry) { - /* could not find a single non locked entry, - * return EBUSY to upper layers so that the - * request will be rescheduled - */ - kc_spin_unlock(); - return -EBUSY; - } - } else { - entry_exists = true; - } - - pr_debug("entry with index %d is in state %d\n", - entry->key_index, entry->state); - - switch (entry->state) { - case (INACTIVE): - if (entry_exists) { - kc_update_timestamp(entry); - entry->state = ACTIVE_ICE_LOADED; - - if (!strcmp(s_type, (char *)PFK_UFS)) { - if (async) - entry->loaded_ref_cnt++; - } else { - entry->loaded_ref_cnt++; - } - break; - } - case (FREE): - ret = kc_update_entry(entry, key, key_size, salt, salt_size, - data_unit); - if (ret) { - entry->state = SCM_ERROR; - entry->scm_error = ret; - pr_err("%s: key load error (%d)\n", __func__, ret); - } else { - kc_update_timestamp(entry); - entry->state = ACTIVE_ICE_LOADED; - - /* - * In case of UFS only increase ref cnt for async calls, - * sync calls from within work thread do not pass - * requests further to HW - */ - if (!strcmp(s_type, (char *)PFK_UFS)) { - if (async) - entry->loaded_ref_cnt++; - } else { - entry->loaded_ref_cnt++; - } - } - break; - case (ACTIVE_ICE_PRELOAD): - case (INACTIVE_INVALIDATING): - ret = -EAGAIN; - break; - case (ACTIVE_ICE_LOADED): - kc_update_timestamp(entry); - - if (!strcmp(s_type, (char *)PFK_UFS)) { - if (async) - entry->loaded_ref_cnt++; - } else { - entry->loaded_ref_cnt++; - } - break; - case(SCM_ERROR): - ret = entry->scm_error; - kc_clear_entry(entry); - entry->state = FREE; - break; - default: - pr_err("invalid state %d for entry with key index %d\n", - entry->state, entry->key_index); - ret = -EINVAL; - } - - *key_index = entry->key_index; - kc_spin_unlock(); - - return ret; -} - -/** - * pfk_kc_load_key_end() - finish the process of key loading that was started - * by pfk_kc_load_key_start - * by marking the entry as not - * being in use - * @key: pointer to the key - * @key_size: the size of the key - * @salt: pointer to the salt - * @salt_size: the size of the salt - * - */ -void pfk_kc_load_key_end(const unsigned char *key, size_t key_size, - const unsigned char *salt, size_t salt_size) -{ - struct kc_entry *entry = NULL; - struct task_struct *tmp_pending = NULL; - int ref_cnt = 0; - - if (!kc_is_ready()) - return; - - if (!key || !salt) - return; - - if (key_size != PFK_KC_KEY_SIZE) - return; - - if (salt_size != PFK_KC_SALT_SIZE) - return; - - kc_spin_lock(); - - entry = kc_find_key(key, key_size, salt, salt_size); - if (!entry) { - kc_spin_unlock(); - pr_err("internal error, there should an entry to unlock\n"); - - return; - } - ref_cnt = --entry->loaded_ref_cnt; - - if (ref_cnt < 0) - pr_err("internal error, ref count should never be negative\n"); - - if (!ref_cnt) { - entry->state = INACTIVE; - /* - * wake-up invalidation if it's waiting - * for the entry to be released - */ - if (entry->thread_pending) { - tmp_pending = entry->thread_pending; - entry->thread_pending = NULL; - - kc_spin_unlock(); - wake_up_process(tmp_pending); - return; - } - } - - kc_spin_unlock(); -} - -/** - * pfk_kc_remove_key() - remove the key from cache and from ICE engine - * @key: pointer to the key - * @key_size: the size of the key - * @salt: pointer to the key - * @salt_size: the size of the key - * - * Return 0 in case of success, error otherwise (also in case of non - * (existing key) - */ -int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size, - const unsigned char *salt, size_t salt_size) -{ - struct kc_entry *entry = NULL; - int res = 0; - - if (!kc_is_ready()) - return -ENODEV; - - if (!key) - return -EINVAL; - - if (!salt) - return -EINVAL; - - if (key_size != PFK_KC_KEY_SIZE) - return -EINVAL; - - if (salt_size != PFK_KC_SALT_SIZE) - return -EINVAL; - - kc_spin_lock(); - - entry = kc_find_key(key, key_size, salt, salt_size); - if (!entry) { - pr_debug("%s: key does not exist\n", __func__); - kc_spin_unlock(); - return -EINVAL; - } - - res = kc_entry_start_invalidating(entry); - if (res != 0) { - kc_spin_unlock(); - return res; - } - kc_clear_entry(entry); - - kc_spin_unlock(); - - qti_pfk_ice_invalidate_key(entry->key_index, s_type); - - kc_spin_lock(); - kc_entry_finish_invalidating(entry); - kc_spin_unlock(); - - return 0; -} - -/** - * pfk_kc_remove_key() - remove the key from cache and from ICE engine - * when no salt is available. Will only search key part, if there are several, - * all will be removed - * - * @key: pointer to the key - * @key_size: the size of the key - * - * Return 0 in case of success, error otherwise (also for non-existing key) - */ -int pfk_kc_remove_key(const unsigned char *key, size_t key_size) -{ - struct kc_entry *entry = NULL; - int index = 0; - int temp_indexes[PFK_KC_MAX_TABLE_SIZE] = {0}; - int temp_indexes_size = 0; - int i = 0; - int res = 0; - - if (!kc_is_ready()) - return -ENODEV; - - if (!key) - return -EINVAL; - - if (key_size != PFK_KC_KEY_SIZE) - return -EINVAL; - - memset(temp_indexes, -1, sizeof(temp_indexes)); - - kc_spin_lock(); - - entry = kc_find_key_at_index(key, key_size, NULL, 0, &index); - if (!entry) { - pr_err("%s: key does not exist\n", __func__); - kc_spin_unlock(); - return -EINVAL; - } - - res = kc_entry_start_invalidating(entry); - if (res != 0) { - kc_spin_unlock(); - return res; - } - - temp_indexes[temp_indexes_size++] = index; - kc_clear_entry(entry); - - /* let's clean additional entries with the same key if there are any */ - do { - index++; - entry = kc_find_key_at_index(key, key_size, NULL, 0, &index); - if (!entry) - break; - - res = kc_entry_start_invalidating(entry); - if (res != 0) { - kc_spin_unlock(); - goto out; - } - - temp_indexes[temp_indexes_size++] = index; - - kc_clear_entry(entry); - - - } while (true); - - kc_spin_unlock(); - - temp_indexes_size--; - for (i = temp_indexes_size; i >= 0 ; i--) - qti_pfk_ice_invalidate_key( - kc_entry_at_index(temp_indexes[i])->key_index, - s_type); - - /* fall through */ - res = 0; - -out: - kc_spin_lock(); - for (i = temp_indexes_size; i >= 0 ; i--) - kc_entry_finish_invalidating( - kc_entry_at_index(temp_indexes[i])); - kc_spin_unlock(); - - return res; -} - -/** - * pfk_kc_clear() - clear the table and remove all keys from ICE - * - * Return 0 on success, error otherwise - * - */ -int pfk_kc_clear(void) -{ - struct kc_entry *entry = NULL; - int i = 0; - int res = 0; - - if (!kc_is_ready()) - return -ENODEV; - - kc_spin_lock(); - for (i = 0; i < kc_table_size; i++) { - entry = kc_entry_at_index(i); - res = kc_entry_start_invalidating(entry); - if (res != 0) { - kc_spin_unlock(); - goto out; - } - kc_clear_entry(entry); - } - kc_spin_unlock(); - - for (i = 0; i < kc_table_size; i++) - qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index, - s_type); - - /* fall through */ - res = 0; -out: - kc_spin_lock(); - for (i = 0; i < kc_table_size; i++) - kc_entry_finish_invalidating(kc_entry_at_index(i)); - kc_spin_unlock(); - - return res; -} - -/** - * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE - * The assumption is that at this point we don't have any pending transactions - * Also, there is no need to clear keys from ICE - * - * Return 0 on success, error otherwise - * - */ -void pfk_kc_clear_on_reset(void) -{ - struct kc_entry *entry = NULL; - int i = 0; - - if (!kc_is_ready()) - return; - - kc_spin_lock(); - for (i = 0; i < kc_table_size; i++) { - entry = kc_entry_at_index(i); - kc_clear_entry(entry); - } - kc_spin_unlock(); -} - -static int pfk_kc_find_storage_type(char **device) -{ - -#ifdef CONFIG_PFK_VIRTUALIZED - *device = PFK_UFS; - return 0; -#else - char boot[20] = {'\0'}; - char *match = (char *)strnstr(saved_command_line, - "androidboot.bootdevice=", - strlen(saved_command_line)); - if (match) { - memcpy(boot, (match + strlen("androidboot.bootdevice=")), - sizeof(boot) - 1); - if (strnstr(boot, PFK_UFS, strlen(boot))) - *device = PFK_UFS; - - return 0; - } - return -EINVAL; -#endif -} - -static int __init pfk_kc_pre_init(void) -{ - return pfk_kc_find_storage_type(&s_type); -} - -static void __exit pfk_kc_exit(void) -{ - s_type = NULL; -} - -module_init(pfk_kc_pre_init); -module_exit(pfk_kc_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Per-File-Key-KC driver"); diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h deleted file mode 100644 index dc00d286377a..000000000000 --- a/security/pfe/pfk_kc.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef PFK_KC_H_ -#define PFK_KC_H_ - -#include - -int pfk_kc_init(bool async); -int pfk_kc_deinit(void); -int pfk_kc_load_key_start(const unsigned char *key, size_t key_size, - const unsigned char *salt, size_t salt_size, u32 *key_index, - bool async, unsigned int data_unit); -void pfk_kc_load_key_end(const unsigned char *key, size_t key_size, - const unsigned char *salt, size_t salt_size); -int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size, - const unsigned char *salt, size_t salt_size); -int pfk_kc_remove_key(const unsigned char *key, size_t key_size); -int pfk_kc_clear(void); -void pfk_kc_clear_on_reset(void); -const char *pfk_kc_get_storage_type(void); -extern char *saved_command_line; - - -#endif /* PFK_KC_H_ */ diff --git a/security/security.c b/security/security.c index 2655987c9638..5afd1dc81511 100644 --- a/security/security.c +++ b/security/security.c @@ -614,14 +614,6 @@ int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode } EXPORT_SYMBOL_GPL(security_inode_create); -int security_inode_post_create(struct inode *dir, struct dentry *dentry, - umode_t mode) -{ - if (unlikely(IS_PRIVATE(dir))) - return 0; - return call_int_hook(inode_post_create, 0, dir, dentry, mode); -} - int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index 2ac6edc1d131..512908b55ca3 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -26,9 +26,8 @@ #include #include #include -//#include "flask.h" -//#include "avc.h" -#include "security.h" +#include "flask.h" +#include "avc.h" struct task_security_struct { u32 osid; /* SID prior to last execve */ @@ -65,8 +64,6 @@ struct inode_security_struct { u32 sid; /* SID of this object */ u16 sclass; /* security class of this object */ unsigned char initialized; /* initialization flag */ - u32 tag; /* Per-File-Encryption tag */ - void *pfk_data; /* Per-File-Key data from ecryptfs */ spinlock_t lock; }; From 88205c5d9959b8ca4fb63bed60d0775b663b94c0 Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Sat, 18 Jul 2020 19:59:05 +0530 Subject: [PATCH 034/141] Integrate the new file encryption framework These changes integrate new file encryption framework to use new V2 encryption policies. These changes were earlier reverted in 'commit 4211691d298c ("Reverting crypto and incrementalfs changes")', as part of android-4.14.171 merge from Android common kernel. This patch attempts to bring them back post validation. commit a9a5450 ANDROID: dm: prevent default-key from being enabled without needed hooks commit e1a94e6 ANDROID: dm: add dm-default-key target for metadata encryption commit commit 232fd35 ANDROID: dm: enable may_passthrough_inline_crypto on some targets commit 53bc059 ANDROID: dm: add support for passing through inline crypto support commit aeed6db ANDROID: block: Introduce passthrough keyslot manager commit 4f27c8b ANDROID: ext4, f2fs: enable direct I/O with inline encryption commit c91db46 BACKPORT: FROMLIST: scsi: ufs: add program_key() variant op commit f9a8e4a ANDROID: block: export symbols needed for modules to use inline crypto commit 75fea5f ANDROID: block: fix some inline crypto bugs commit 2871f73 ANDROID: fscrypt: add support for hardware-wrapped keys commit bb5a657 ANDROID: block: add KSM op to derive software secret from wrapped key commit d42ba87 ANDROID: block: provide key size as input to inline crypto APIs commit 86646eb ANDROID: ufshcd-crypto: export cap find API commit 83bc20e ANDROID: scsi: ufs-qcom: Enable BROKEN_CRYPTO quirk flag commit c266a13 ANDROID: scsi: ufs: Add quirk bit for controllers that don't play well with inline crypto commit ea09b99 ANDROID: cuttlefish_defconfig: Enable blk-crypto fallback commit e12563c BACKPORT: FROMLIST: Update Inline Encryption from v5 to v6 of patch series commit 8e8f55d ANDROID: scsi: ufs: UFS init should not require inline crypto commit dae9899 ANDROID: scsi: ufs: UFS crypto variant operations API commit a69516d ANDROID: cuttlefish_defconfig: enable inline encryption commit b8f7b23 BACKPORT: FROMLIST: ext4: add inline encryption support commit e64327f BACKPORT: FROMLIST: f2fs: add inline encryption support commit a0dc8da BACKPORT: FROMLIST: fscrypt: add inline encryption support commit 19c3c62 BACKPORT: FROMLIST: scsi: ufs: Add inline encryption support to UFS commit f858a99 BACKPORT: FROMLIST: scsi: ufs: UFS crypto API commit 011b834 BACKPORT: FROMLIST: scsi: ufs: UFS driver v2.1 spec crypto additions commit ec0b569 BACKPORT: FROMLIST: block: blk-crypto for Inline Encryption commit 760b328 ANDROID: block: Fix bio_crypt_should_process WARN_ON commit 138adbb BACKPORT: FROMLIST: block: Add encryption context to struct bio commit 66b5609 BACKPORT: FROMLIST: block: Keyslot Manager for Inline Encryption Git-repo: https://android.googlesource.com/kernel/common/+/refs/heads/android-4.14-stable Git-commit: a9a545067a93d9821f965989b8eaea6fba7d27f7 Git-commit: e1a94e6b17e2610b56c5740b763df7858dad40f0 Git-commit: 232fd353e45d13576d507a011b5dac17e3c320ab Git-commit: 53bc059bc6d98631e8936ab9eeb7ac780c9ab2c3 Git-commit: aeed6db424b22148964d9788d4f9abac6e6cd7d8 Git-commit: 4f27c8b90bd223e967c98dc658961e67b9b864ae Git-commit: c91db466b51479ae761becc233d79c50ca3748a5 Git-commit: f9a8e4a5c5455a6bada70ed6d2f0af8900a872cb Git-commit: 75fea5f6057df78af1655f2f79a9c66a94bc838f Git-commit: 2871f731940165ed4042001a36bbe7d58f9d983b Git-commit: bb5a65771a206ae39086af1a9e78afeaf654cf03 Git-commit: d42ba87e29ab44aac446b5434298d1369c44fe3c Git-commit: 86646ebb1742a663c4c9c39c06d58dcb3f8f89e5 Git-commit: 83bc20ed4ba7dbf76964fd68905fde591b5de8b2 Git-commit: c266a1311e74b3ae1047a9d6abd6c6044059995c Git-commit: ea09b9954cc40b3088b8b2778b2daab12820a7e6 Git-commit: e12563c18d484e6379d03105b4565db7bb3a7975 Git-commit: 8e8f55d1a7e865562d2e3e022a7fcf13753a9c8e Git-commit: dae9899044f320bb119e02b45d816a493b1488ae Git-commit: a69516d0913e7f2c9bdde17c2ea6a793bb474830 Git-commit: b8f7b236748261bec545b69b39d7fb75e519f4ed Git-commit: e64327f5719b4a41e0de341ead7d48ed73216a23 Git-commit: a0dc8da519ccf2040af2dbbd6b4f688b50eb1755 Git-commit: 19c3c62836e5dbc9ceb620ecef0aa0c81578ed43 Git-commit: f858a9981a94a4e1d1b77b00bc05ab61b8431bce Git-commit: 011b8344c36d39255b8057c63d98e593e364ed7f Git-commit: ec0b569b5cc89391d9d6c90d2f76dc0a4db03e57 Git-commit: 760b3283e8056ffa6382722457c2e0cf08328629 Git-commit: 138adbbe5e4bfb6dee0571261f4d96a98f71d228 Git-commit: 66b5609826d60f80623643f1a7a1d865b5233f19 Change-Id: I171d90de41185824e0c7515f3a3b43ab88f4e058 Signed-off-by: Neeraj Soni --- Documentation/block/00-INDEX | 2 + Documentation/block/index.rst | 26 + Documentation/block/inline-encryption.rst | 183 ++++++ arch/arm64/configs/cuttlefish_defconfig | 3 + arch/x86/configs/x86_64_cuttlefish_defconfig | 3 + block/Kconfig | 17 + block/Makefile | 3 + block/bio-crypt-ctx.c | 142 ++++ block/bio.c | 23 +- block/blk-core.c | 11 +- block/blk-crypto-fallback.c | 650 +++++++++++++++++++ block/blk-crypto-internal.h | 58 ++ block/blk-crypto.c | 251 +++++++ block/blk-merge.c | 11 + block/keyslot-manager.c | 560 ++++++++++++++++ drivers/md/Kconfig | 21 + drivers/md/Makefile | 1 + drivers/md/dm-bow.c | 1 + drivers/md/dm-default-key.c | 403 ++++++++++++ drivers/md/dm-linear.c | 1 + drivers/md/dm-table.c | 52 ++ drivers/md/dm.c | 100 ++- drivers/scsi/ufs/Kconfig | 9 + drivers/scsi/ufs/Makefile | 4 +- drivers/scsi/ufs/ufs-qcom.c | 6 + drivers/scsi/ufs/ufshcd-crypto.c | 499 ++++++++++++++ drivers/scsi/ufs/ufshcd-crypto.h | 167 +++++ drivers/scsi/ufs/ufshcd.c | 67 +- drivers/scsi/ufs/ufshcd.h | 59 ++ drivers/scsi/ufs/ufshci.h | 56 ++ fs/buffer.c | 3 + fs/crypto/Kconfig | 6 + fs/crypto/Makefile | 1 + fs/crypto/bio.c | 28 +- fs/crypto/crypto.c | 2 +- fs/crypto/fname.c | 4 +- fs/crypto/fscrypt_private.h | 155 ++++- fs/crypto/inline_crypt.c | 353 ++++++++++ fs/crypto/keyring.c | 61 +- fs/crypto/keysetup.c | 165 +++-- fs/crypto/keysetup_v1.c | 17 +- fs/direct-io.c | 5 + fs/ext4/ext4.h | 1 + fs/ext4/inode.c | 16 +- fs/ext4/page-io.c | 6 +- fs/ext4/readpage.c | 11 +- fs/ext4/super.c | 13 + fs/f2fs/data.c | 71 +- fs/f2fs/f2fs.h | 11 +- fs/f2fs/super.c | 41 ++ fs/iomap.c | 6 + include/linux/bio-crypt-ctx.h | 228 +++++++ include/linux/bio.h | 1 + include/linux/blk-crypto.h | 66 ++ include/linux/blk_types.h | 9 + include/linux/blkdev.h | 6 + include/linux/device-mapper.h | 6 + include/linux/fscrypt.h | 72 ++ include/linux/keyslot-manager.h | 84 +++ include/uapi/linux/fscrypt.h | 2 + 60 files changed, 4696 insertions(+), 143 deletions(-) create mode 100644 Documentation/block/index.rst create mode 100644 Documentation/block/inline-encryption.rst create mode 100644 block/bio-crypt-ctx.c create mode 100644 block/blk-crypto-fallback.c create mode 100644 block/blk-crypto-internal.h create mode 100644 block/blk-crypto.c create mode 100644 block/keyslot-manager.c create mode 100644 drivers/md/dm-default-key.c create mode 100644 drivers/scsi/ufs/ufshcd-crypto.c create mode 100644 drivers/scsi/ufs/ufshcd-crypto.h create mode 100644 fs/crypto/inline_crypt.c create mode 100644 include/linux/bio-crypt-ctx.h create mode 100644 include/linux/blk-crypto.h create mode 100644 include/linux/keyslot-manager.h diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX index f8614b3d49f9..1c63f2cba97e 100644 --- a/Documentation/block/00-INDEX +++ b/Documentation/block/00-INDEX @@ -16,6 +16,8 @@ data-integrity.txt - Block data integrity deadline-iosched.txt - Deadline IO scheduler tunables +inline-encryption.rst + - Blk-crypto internals and inline encryption ioprio.txt - Block io priorities (in CFQ scheduler) pr.txt diff --git a/Documentation/block/index.rst b/Documentation/block/index.rst new file mode 100644 index 000000000000..026addfc69bc --- /dev/null +++ b/Documentation/block/index.rst @@ -0,0 +1,26 @@ +.. SPDX-License-Identifier: GPL-2.0 + +===== +Block +===== + +.. toctree:: + :maxdepth: 1 + + bfq-iosched + biodoc + biovecs + capability + cmdline-partition + data-integrity + deadline-iosched + inline-encryption + ioprio + kyber-iosched + null_blk + pr + queue-sysfs + request + stat + switching-sched + writeback_cache_control diff --git a/Documentation/block/inline-encryption.rst b/Documentation/block/inline-encryption.rst new file mode 100644 index 000000000000..330106b23c09 --- /dev/null +++ b/Documentation/block/inline-encryption.rst @@ -0,0 +1,183 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================= +Inline Encryption +================= + +Objective +========= + +We want to support inline encryption (IE) in the kernel. +To allow for testing, we also want a crypto API fallback when actual +IE hardware is absent. We also want IE to work with layered devices +like dm and loopback (i.e. we want to be able to use the IE hardware +of the underlying devices if present, or else fall back to crypto API +en/decryption). + + +Constraints and notes +===================== + +- IE hardware have a limited number of "keyslots" that can be programmed + with an encryption context (key, algorithm, data unit size, etc.) at any time. + One can specify a keyslot in a data request made to the device, and the + device will en/decrypt the data using the encryption context programmed into + that specified keyslot. When possible, we want to make multiple requests with + the same encryption context share the same keyslot. + +- We need a way for filesystems to specify an encryption context to use for + en/decrypting a struct bio, and a device driver (like UFS) needs to be able + to use that encryption context when it processes the bio. + +- We need a way for device drivers to expose their capabilities in a unified + way to the upper layers. + + +Design +====== + +We add a struct bio_crypt_ctx to struct bio that can represent an +encryption context, because we need to be able to pass this encryption +context from the FS layer to the device driver to act upon. + +While IE hardware works on the notion of keyslots, the FS layer has no +knowledge of keyslots - it simply wants to specify an encryption context to +use while en/decrypting a bio. + +We introduce a keyslot manager (KSM) that handles the translation from +encryption contexts specified by the FS to keyslots on the IE hardware. +This KSM also serves as the way IE hardware can expose their capabilities to +upper layers. The generic mode of operation is: each device driver that wants +to support IE will construct a KSM and set it up in its struct request_queue. +Upper layers that want to use IE on this device can then use this KSM in +the device's struct request_queue to translate an encryption context into +a keyslot. The presence of the KSM in the request queue shall be used to mean +that the device supports IE. + +On the device driver end of the interface, the device driver needs to tell the +KSM how to actually manipulate the IE hardware in the device to do things like +programming the crypto key into the IE hardware into a particular keyslot. All +this is achieved through the :c:type:`struct keyslot_mgmt_ll_ops` that the +device driver passes to the KSM when creating it. + +It uses refcounts to track which keyslots are idle (either they have no +encryption context programmed, or there are no in-flight struct bios +referencing that keyslot). When a new encryption context needs a keyslot, it +tries to find a keyslot that has already been programmed with the same +encryption context, and if there is no such keyslot, it evicts the least +recently used idle keyslot and programs the new encryption context into that +one. If no idle keyslots are available, then the caller will sleep until there +is at least one. + + +Blk-crypto +========== + +The above is sufficient for simple cases, but does not work if there is a +need for a crypto API fallback, or if we are want to use IE with layered +devices. To these ends, we introduce blk-crypto. Blk-crypto allows us to +present a unified view of encryption to the FS (so FS only needs to specify +an encryption context and not worry about keyslots at all), and blk-crypto +can decide whether to delegate the en/decryption to IE hardware or to the +crypto API. Blk-crypto maintains an internal KSM that serves as the crypto +API fallback. + +Blk-crypto needs to ensure that the encryption context is programmed into the +"correct" keyslot manager for IE. If a bio is submitted to a layered device +that eventually passes the bio down to a device that really does support IE, we +want the encryption context to be programmed into a keyslot for the KSM of the +device with IE support. However, blk-crypto does not know a priori whether a +particular device is the final device in the layering structure for a bio or +not. So in the case that a particular device does not support IE, since it is +possibly the final destination device for the bio, if the bio requires +encryption (i.e. the bio is doing a write operation), blk-crypto must fallback +to the crypto API *before* sending the bio to the device. + +Blk-crypto ensures that: + +- The bio's encryption context is programmed into a keyslot in the KSM of the + request queue that the bio is being submitted to (or the crypto API fallback + KSM if the request queue doesn't have a KSM), and that the ``bc_ksm`` + in the ``bi_crypt_context`` is set to this KSM + +- That the bio has its own individual reference to the keyslot in this KSM. + Once the bio passes through blk-crypto, its encryption context is programmed + in some KSM. The "its own individual reference to the keyslot" ensures that + keyslots can be released by each bio independently of other bios while + ensuring that the bio has a valid reference to the keyslot when, for e.g., the + crypto API fallback KSM in blk-crypto performs crypto on the device's behalf. + The individual references are ensured by increasing the refcount for the + keyslot in the ``bc_ksm`` when a bio with a programmed encryption + context is cloned. + + +What blk-crypto does on bio submission +-------------------------------------- + +**Case 1:** blk-crypto is given a bio with only an encryption context that hasn't +been programmed into any keyslot in any KSM (for e.g. a bio from the FS). + In this case, blk-crypto will program the encryption context into the KSM of the + request queue the bio is being submitted to (and if this KSM does not exist, + then it will program it into blk-crypto's internal KSM for crypto API + fallback). The KSM that this encryption context was programmed into is stored + as the ``bc_ksm`` in the bio's ``bi_crypt_context``. + +**Case 2:** blk-crypto is given a bio whose encryption context has already been +programmed into a keyslot in the *crypto API fallback* KSM. + In this case, blk-crypto does nothing; it treats the bio as not having + specified an encryption context. Note that we cannot do here what we will do + in Case 3 because we would have already encrypted the bio via the crypto API + by this point. + +**Case 3:** blk-crypto is given a bio whose encryption context has already been +programmed into a keyslot in some KSM (that is *not* the crypto API fallback +KSM). + In this case, blk-crypto first releases that keyslot from that KSM and then + treats the bio as in Case 1. + +This way, when a device driver is processing a bio, it can be sure that +the bio's encryption context has been programmed into some KSM (either the +device driver's request queue's KSM, or blk-crypto's crypto API fallback KSM). +It then simply needs to check if the bio's ``bc_ksm`` is the device's +request queue's KSM. If so, then it should proceed with IE. If not, it should +simply do nothing with respect to crypto, because some other KSM (perhaps the +blk-crypto crypto API fallback KSM) is handling the en/decryption. + +Blk-crypto will release the keyslot that is being held by the bio (and also +decrypt it if the bio is using the crypto API fallback KSM) once +``bio_remaining_done`` returns true for the bio. + + +Layered Devices +=============== + +Layered devices that wish to support IE need to create their own keyslot +manager for their request queue, and expose whatever functionality they choose. +When a layered device wants to pass a bio to another layer (either by +resubmitting the same bio, or by submitting a clone), it doesn't need to do +anything special because the bio (or the clone) will once again pass through +blk-crypto, which will work as described in Case 3. If a layered device wants +for some reason to do the IO by itself instead of passing it on to a child +device, but it also chose to expose IE capabilities by setting up a KSM in its +request queue, it is then responsible for en/decrypting the data itself. In +such cases, the device can choose to call the blk-crypto function +``blk_crypto_fallback_to_kernel_crypto_api`` (TODO: Not yet implemented), which will +cause the en/decryption to be done via the crypto API fallback. + + +Future Optimizations for layered devices +======================================== + +Creating a keyslot manager for the layered device uses up memory for each +keyslot, and in general, a layered device (like dm-linear) merely passes the +request on to a "child" device, so the keyslots in the layered device itself +might be completely unused. We can instead define a new type of KSM; the +"passthrough KSM", that layered devices can use to let blk-crypto know that +this layered device *will* pass the bio to some child device (and hence +through blk-crypto again, at which point blk-crypto can program the encryption +context, instead of programming it into the layered device's KSM). Again, if +the device "lies" and decides to do the IO itself instead of passing it on to +a child device, it is responsible for doing the en/decryption (and can choose +to call ``blk_crypto_fallback_to_kernel_crypto_api``). Another use case for the +"passthrough KSM" is for IE devices that want to manage their own keyslots/do +not have a limited number of keyslots. diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig index ba7cfa923d70..56e652620541 100644 --- a/arch/arm64/configs/cuttlefish_defconfig +++ b/arch/arm64/configs/cuttlefish_defconfig @@ -47,6 +47,8 @@ CONFIG_REFCOUNT_FULL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PCI=y CONFIG_PCI_HOST_GENERIC=y CONFIG_PREEMPT=y @@ -431,6 +433,7 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y # CONFIG_DNOTIFY is not set diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig index a7f60e3d5dde..5a03ff9b45db 100644 --- a/arch/x86/configs/x86_64_cuttlefish_defconfig +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -46,6 +46,8 @@ CONFIG_REFCOUNT_FULL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_SMP=y CONFIG_HYPERVISOR_GUEST=y @@ -449,6 +451,7 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/block/Kconfig b/block/Kconfig index 28ec55752b68..4d9bcb951d83 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -184,6 +184,23 @@ config BLK_SED_OPAL Enabling this option enables users to setup/unlock/lock Locking ranges for SED devices using the Opal protocol. +config BLK_INLINE_ENCRYPTION + bool "Enable inline encryption support in block layer" + help + Build the blk-crypto subsystem. Enabling this lets the + block layer handle encryption, so users can take + advantage of inline encryption hardware if present. + +config BLK_INLINE_ENCRYPTION_FALLBACK + bool "Enable crypto API fallback for blk-crypto" + depends on BLK_INLINE_ENCRYPTION + select CRYPTO + select CRYPTO_BLKCIPHER + help + Enabling this lets the block layer handle inline encryption + by falling back to the kernel crypto API when inline + encryption hardware is not present. + menu "Partition Types" source "block/partitions/Kconfig" diff --git a/block/Makefile b/block/Makefile index 6a56303b9925..ab14055d8222 100644 --- a/block/Makefile +++ b/block/Makefile @@ -35,3 +35,6 @@ obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o obj-$(CONFIG_BLK_WBT) += blk-wbt.o obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o +obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \ + blk-crypto.o +obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o \ No newline at end of file diff --git a/block/bio-crypt-ctx.c b/block/bio-crypt-ctx.c new file mode 100644 index 000000000000..75008b2afea2 --- /dev/null +++ b/block/bio-crypt-ctx.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Google LLC + */ + +#include +#include +#include +#include +#include + +#include "blk-crypto-internal.h" + +static int num_prealloc_crypt_ctxs = 128; + +module_param(num_prealloc_crypt_ctxs, int, 0444); +MODULE_PARM_DESC(num_prealloc_crypt_ctxs, + "Number of bio crypto contexts to preallocate"); + +static struct kmem_cache *bio_crypt_ctx_cache; +static mempool_t *bio_crypt_ctx_pool; + +int __init bio_crypt_ctx_init(void) +{ + size_t i; + + bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); + if (!bio_crypt_ctx_cache) + return -ENOMEM; + + bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs, + bio_crypt_ctx_cache); + if (!bio_crypt_ctx_pool) + return -ENOMEM; + + /* This is assumed in various places. */ + BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); + + /* Sanity check that no algorithm exceeds the defined limits. */ + for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { + BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE); + BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); + } + + return 0; +} + +struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask) +{ + return mempool_alloc(bio_crypt_ctx_pool, gfp_mask); +} +EXPORT_SYMBOL_GPL(bio_crypt_alloc_ctx); + +void bio_crypt_free_ctx(struct bio *bio) +{ + mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); + bio->bi_crypt_context = NULL; +} + +void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) +{ + const struct bio_crypt_ctx *src_bc = src->bi_crypt_context; + + bio_clone_skip_dm_default_key(dst, src); + + /* + * If a bio is fallback_crypted, then it will be decrypted when + * bio_endio is called. As we only want the data to be decrypted once, + * copies of the bio must not have have a crypt context. + */ + if (!src_bc || bio_crypt_fallback_crypted(src_bc)) + return; + + dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask); + *dst->bi_crypt_context = *src_bc; + + if (src_bc->bc_keyslot >= 0) + keyslot_manager_get_slot(src_bc->bc_ksm, src_bc->bc_keyslot); +} +EXPORT_SYMBOL_GPL(bio_crypt_clone); + +bool bio_crypt_should_process(struct request *rq) +{ + struct bio *bio = rq->bio; + + if (!bio || !bio->bi_crypt_context) + return false; + + return rq->q->ksm == bio->bi_crypt_context->bc_ksm; +} +EXPORT_SYMBOL_GPL(bio_crypt_should_process); + +/* + * Checks that two bio crypt contexts are compatible - i.e. that + * they are mergeable except for data_unit_num continuity. + */ +bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2) +{ + struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context; + struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context; + + if (!bc1) + return !bc2; + return bc2 && bc1->bc_key == bc2->bc_key; +} + +/* + * Checks that two bio crypt contexts are compatible, and also + * that their data_unit_nums are continuous (and can hence be merged) + * in the order b_1 followed by b_2. + */ +bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes, + struct bio *b_2) +{ + struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context; + struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context; + + if (!bio_crypt_ctx_compatible(b_1, b_2)) + return false; + + return !bc1 || bio_crypt_dun_is_contiguous(bc1, b1_bytes, bc2->bc_dun); +} + +void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc) +{ + keyslot_manager_put_slot(bc->bc_ksm, bc->bc_keyslot); + bc->bc_ksm = NULL; + bc->bc_keyslot = -1; +} + +int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc, + struct keyslot_manager *ksm) +{ + int slot = keyslot_manager_get_slot_for_key(ksm, bc->bc_key); + + if (slot < 0) + return slot; + + bc->bc_keyslot = slot; + bc->bc_ksm = ksm; + return 0; +} diff --git a/block/bio.c b/block/bio.c index a3c4fd9ec478..6ef2e22d2bf3 100644 --- a/block/bio.c +++ b/block/bio.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "blk.h" @@ -243,6 +244,8 @@ fallback: void bio_uninit(struct bio *bio) { bio_disassociate_task(bio); + + bio_crypt_free_ctx(bio); } EXPORT_SYMBOL(bio_uninit); @@ -628,15 +631,12 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) __bio_clone_fast(b, bio); - if (bio_integrity(bio)) { - int ret; + bio_crypt_clone(b, bio, gfp_mask); - ret = bio_integrity_clone(b, bio, gfp_mask); - - if (ret < 0) { - bio_put(b); - return NULL; - } + if (bio_integrity(bio) && + bio_integrity_clone(b, bio, gfp_mask) < 0) { + bio_put(b); + return NULL; } return b; @@ -704,6 +704,8 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, break; } + bio_crypt_clone(bio, bio_src, gfp_mask); + if (bio_integrity(bio_src)) { int ret; @@ -1035,6 +1037,7 @@ void bio_advance(struct bio *bio, unsigned bytes) if (bio_integrity(bio)) bio_integrity_advance(bio, bytes); + bio_crypt_advance(bio, bytes); bio_advance_iter(bio, &bio->bi_iter, bytes); } EXPORT_SYMBOL(bio_advance); @@ -1892,6 +1895,10 @@ void bio_endio(struct bio *bio) again: if (!bio_remaining_done(bio)) return; + + if (!blk_crypto_endio(bio)) + return; + if (!bio_integrity_endio(bio)) return; diff --git a/block/blk-core.c b/block/blk-core.c index 52490014818f..e90d2e3644c0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -35,6 +35,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -2284,7 +2285,9 @@ blk_qc_t generic_make_request(struct bio *bio) /* Create a fresh bio_list for all subordinate requests */ bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_init(&bio_list_on_stack[0]); - ret = q->make_request_fn(q, bio); + + if (!blk_crypto_submit_bio(&bio)) + ret = q->make_request_fn(q, bio); /* sort new bios into those for a lower level * and those for the same level @@ -3728,6 +3731,12 @@ int __init blk_dev_init(void) blk_debugfs_root = debugfs_create_dir("block", NULL); #endif + if (bio_crypt_ctx_init() < 0) + panic("Failed to allocate mem for bio crypt ctxs\n"); + + if (blk_crypto_fallback_init() < 0) + panic("Failed to init blk-crypto-fallback\n"); + return 0; } diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c new file mode 100644 index 000000000000..cce3317cba80 --- /dev/null +++ b/block/blk-crypto-fallback.c @@ -0,0 +1,650 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Google LLC + */ + +/* + * Refer to Documentation/block/inline-encryption.rst for detailed explanation. + */ + +#define pr_fmt(fmt) "blk-crypto-fallback: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "blk-crypto-internal.h" + +static unsigned int num_prealloc_bounce_pg = 32; +module_param(num_prealloc_bounce_pg, uint, 0); +MODULE_PARM_DESC(num_prealloc_bounce_pg, + "Number of preallocated bounce pages for the blk-crypto crypto API fallback"); + +static unsigned int blk_crypto_num_keyslots = 100; +module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0); +MODULE_PARM_DESC(num_keyslots, + "Number of keyslots for the blk-crypto crypto API fallback"); + +static unsigned int num_prealloc_fallback_crypt_ctxs = 128; +module_param(num_prealloc_fallback_crypt_ctxs, uint, 0); +MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs, + "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback"); + +struct bio_fallback_crypt_ctx { + struct bio_crypt_ctx crypt_ctx; + /* + * Copy of the bvec_iter when this bio was submitted. + * We only want to en/decrypt the part of the bio as described by the + * bvec_iter upon submission because bio might be split before being + * resubmitted + */ + struct bvec_iter crypt_iter; + u64 fallback_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; +}; + +/* The following few vars are only used during the crypto API fallback */ +static struct kmem_cache *bio_fallback_crypt_ctx_cache; +static mempool_t *bio_fallback_crypt_ctx_pool; + +/* + * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate + * all of a mode's tfms when that mode starts being used. Since each mode may + * need all the keyslots at some point, each mode needs its own tfm for each + * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to + * match the behavior of real inline encryption hardware (which only supports a + * single encryption context per keyslot), we only allow one tfm per keyslot to + * be used at a time - the rest of the unused tfms have their keys cleared. + */ +static DEFINE_MUTEX(tfms_init_lock); +static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX]; + +struct blk_crypto_decrypt_work { + struct work_struct work; + struct bio *bio; +}; + +static struct blk_crypto_keyslot { + struct crypto_skcipher *tfm; + enum blk_crypto_mode_num crypto_mode; + struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; +} *blk_crypto_keyslots; + +/* The following few vars are only used during the crypto API fallback */ +static struct keyslot_manager *blk_crypto_ksm; +static struct workqueue_struct *blk_crypto_wq; +static mempool_t *blk_crypto_bounce_page_pool; +static struct kmem_cache *blk_crypto_decrypt_work_cache; + +bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc) +{ + return bc && bc->bc_ksm == blk_crypto_ksm; +} + +/* + * This is the key we set when evicting a keyslot. This *should* be the all 0's + * key, but AES-XTS rejects that key, so we use some random bytes instead. + */ +static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE]; + +static void blk_crypto_evict_keyslot(unsigned int slot) +{ + struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; + enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode; + int err; + + WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID); + + /* Clear the key in the skcipher */ + err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key, + blk_crypto_modes[crypto_mode].keysize); + WARN_ON(err); + slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; +} + +static int blk_crypto_keyslot_program(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; + const enum blk_crypto_mode_num crypto_mode = key->crypto_mode; + int err; + + if (crypto_mode != slotp->crypto_mode && + slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) { + blk_crypto_evict_keyslot(slot); + } + + if (!slotp->tfms[crypto_mode]) + return -ENOMEM; + slotp->crypto_mode = crypto_mode; + err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw, + key->size); + if (err) { + blk_crypto_evict_keyslot(slot); + return err; + } + return 0; +} + +static int blk_crypto_keyslot_evict(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + blk_crypto_evict_keyslot(slot); + return 0; +} + +/* + * The crypto API fallback KSM ops - only used for a bio when it specifies a + * blk_crypto_mode for which we failed to get a keyslot in the device's inline + * encryption hardware (which probably means the device doesn't have inline + * encryption hardware that supports that crypto mode). + */ +static const struct keyslot_mgmt_ll_ops blk_crypto_ksm_ll_ops = { + .keyslot_program = blk_crypto_keyslot_program, + .keyslot_evict = blk_crypto_keyslot_evict, +}; + +static void blk_crypto_encrypt_endio(struct bio *enc_bio) +{ + struct bio *src_bio = enc_bio->bi_private; + int i; + + for (i = 0; i < enc_bio->bi_vcnt; i++) + mempool_free(enc_bio->bi_io_vec[i].bv_page, + blk_crypto_bounce_page_pool); + + src_bio->bi_status = enc_bio->bi_status; + + bio_put(enc_bio); + bio_endio(src_bio); +} + +static struct bio *blk_crypto_clone_bio(struct bio *bio_src) +{ + struct bvec_iter iter; + struct bio_vec bv; + struct bio *bio; + + bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL); + if (!bio) + return NULL; + bio->bi_disk = bio_src->bi_disk; + bio->bi_opf = bio_src->bi_opf; + bio->bi_ioprio = bio_src->bi_ioprio; + bio->bi_write_hint = bio_src->bi_write_hint; + bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; + bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; + + bio_for_each_segment(bv, bio_src, iter) + bio->bi_io_vec[bio->bi_vcnt++] = bv; + + if (bio_integrity(bio_src) && + bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) { + bio_put(bio); + return NULL; + } + + bio_clone_blkcg_association(bio, bio_src); + + bio_clone_skip_dm_default_key(bio, bio_src); + + return bio; +} + +static int blk_crypto_alloc_cipher_req(struct bio *src_bio, + struct skcipher_request **ciph_req_ret, + struct crypto_wait *wait) +{ + struct skcipher_request *ciph_req; + const struct blk_crypto_keyslot *slotp; + + slotp = &blk_crypto_keyslots[src_bio->bi_crypt_context->bc_keyslot]; + ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode], + GFP_NOIO); + if (!ciph_req) { + src_bio->bi_status = BLK_STS_RESOURCE; + return -ENOMEM; + } + + skcipher_request_set_callback(ciph_req, + CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, wait); + *ciph_req_ret = ciph_req; + return 0; +} + +static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr) +{ + struct bio *bio = *bio_ptr; + unsigned int i = 0; + unsigned int num_sectors = 0; + struct bio_vec bv; + struct bvec_iter iter; + + bio_for_each_segment(bv, bio, iter) { + num_sectors += bv.bv_len >> SECTOR_SHIFT; + if (++i == BIO_MAX_PAGES) + break; + } + if (num_sectors < bio_sectors(bio)) { + struct bio *split_bio; + + split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL); + if (!split_bio) { + bio->bi_status = BLK_STS_RESOURCE; + return -ENOMEM; + } + bio_chain(split_bio, bio); + generic_make_request(bio); + *bio_ptr = split_bio; + } + return 0; +} + +union blk_crypto_iv { + __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; +}; + +static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], + union blk_crypto_iv *iv) +{ + int i; + + for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) + iv->dun[i] = cpu_to_le64(dun[i]); +} + +/* + * The crypto API fallback's encryption routine. + * Allocate a bounce bio for encryption, encrypt the input bio using crypto API, + * and replace *bio_ptr with the bounce bio. May split input bio if it's too + * large. + */ +static int blk_crypto_encrypt_bio(struct bio **bio_ptr) +{ + struct bio *src_bio; + struct skcipher_request *ciph_req = NULL; + DECLARE_CRYPTO_WAIT(wait); + u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + union blk_crypto_iv iv; + struct scatterlist src, dst; + struct bio *enc_bio; + unsigned int i, j; + int data_unit_size; + struct bio_crypt_ctx *bc; + int err = 0; + + /* Split the bio if it's too big for single page bvec */ + err = blk_crypto_split_bio_if_needed(bio_ptr); + if (err) + return err; + + src_bio = *bio_ptr; + bc = src_bio->bi_crypt_context; + data_unit_size = bc->bc_key->data_unit_size; + + /* Allocate bounce bio for encryption */ + enc_bio = blk_crypto_clone_bio(src_bio); + if (!enc_bio) { + src_bio->bi_status = BLK_STS_RESOURCE; + return -ENOMEM; + } + + /* + * Use the crypto API fallback keyslot manager to get a crypto_skcipher + * for the algorithm and key specified for this bio. + */ + err = bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm); + if (err) { + src_bio->bi_status = BLK_STS_IOERR; + goto out_put_enc_bio; + } + + /* and then allocate an skcipher_request for it */ + err = blk_crypto_alloc_cipher_req(src_bio, &ciph_req, &wait); + if (err) + goto out_release_keyslot; + + memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); + sg_init_table(&src, 1); + sg_init_table(&dst, 1); + + skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size, + iv.bytes); + + /* Encrypt each page in the bounce bio */ + for (i = 0; i < enc_bio->bi_vcnt; i++) { + struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i]; + struct page *plaintext_page = enc_bvec->bv_page; + struct page *ciphertext_page = + mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO); + + enc_bvec->bv_page = ciphertext_page; + + if (!ciphertext_page) { + src_bio->bi_status = BLK_STS_RESOURCE; + err = -ENOMEM; + goto out_free_bounce_pages; + } + + sg_set_page(&src, plaintext_page, data_unit_size, + enc_bvec->bv_offset); + sg_set_page(&dst, ciphertext_page, data_unit_size, + enc_bvec->bv_offset); + + /* Encrypt each data unit in this page */ + for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { + blk_crypto_dun_to_iv(curr_dun, &iv); + err = crypto_wait_req(crypto_skcipher_encrypt(ciph_req), + &wait); + if (err) { + i++; + src_bio->bi_status = BLK_STS_RESOURCE; + goto out_free_bounce_pages; + } + bio_crypt_dun_increment(curr_dun, 1); + src.offset += data_unit_size; + dst.offset += data_unit_size; + } + } + + enc_bio->bi_private = src_bio; + enc_bio->bi_end_io = blk_crypto_encrypt_endio; + *bio_ptr = enc_bio; + + enc_bio = NULL; + err = 0; + goto out_free_ciph_req; + +out_free_bounce_pages: + while (i > 0) + mempool_free(enc_bio->bi_io_vec[--i].bv_page, + blk_crypto_bounce_page_pool); +out_free_ciph_req: + skcipher_request_free(ciph_req); +out_release_keyslot: + bio_crypt_ctx_release_keyslot(bc); +out_put_enc_bio: + if (enc_bio) + bio_put(enc_bio); + + return err; +} + +static void blk_crypto_free_fallback_crypt_ctx(struct bio *bio) +{ + mempool_free(container_of(bio->bi_crypt_context, + struct bio_fallback_crypt_ctx, + crypt_ctx), + bio_fallback_crypt_ctx_pool); + bio->bi_crypt_context = NULL; +} + +/* + * The crypto API fallback's main decryption routine. + * Decrypts input bio in place. + */ +static void blk_crypto_decrypt_bio(struct work_struct *work) +{ + struct blk_crypto_decrypt_work *decrypt_work = + container_of(work, struct blk_crypto_decrypt_work, work); + struct bio *bio = decrypt_work->bio; + struct skcipher_request *ciph_req = NULL; + DECLARE_CRYPTO_WAIT(wait); + struct bio_vec bv; + struct bvec_iter iter; + u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + union blk_crypto_iv iv; + struct scatterlist sg; + struct bio_crypt_ctx *bc = bio->bi_crypt_context; + struct bio_fallback_crypt_ctx *f_ctx = + container_of(bc, struct bio_fallback_crypt_ctx, crypt_ctx); + const int data_unit_size = bc->bc_key->data_unit_size; + unsigned int i; + int err; + + /* + * Use the crypto API fallback keyslot manager to get a crypto_skcipher + * for the algorithm and key specified for this bio. + */ + if (bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm)) { + bio->bi_status = BLK_STS_RESOURCE; + goto out_no_keyslot; + } + + /* and then allocate an skcipher_request for it */ + err = blk_crypto_alloc_cipher_req(bio, &ciph_req, &wait); + if (err) + goto out; + + memcpy(curr_dun, f_ctx->fallback_dun, sizeof(curr_dun)); + sg_init_table(&sg, 1); + skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size, + iv.bytes); + + /* Decrypt each segment in the bio */ + __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { + struct page *page = bv.bv_page; + + sg_set_page(&sg, page, data_unit_size, bv.bv_offset); + + /* Decrypt each data unit in the segment */ + for (i = 0; i < bv.bv_len; i += data_unit_size) { + blk_crypto_dun_to_iv(curr_dun, &iv); + if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req), + &wait)) { + bio->bi_status = BLK_STS_IOERR; + goto out; + } + bio_crypt_dun_increment(curr_dun, 1); + sg.offset += data_unit_size; + } + } + +out: + skcipher_request_free(ciph_req); + bio_crypt_ctx_release_keyslot(bc); +out_no_keyslot: + kmem_cache_free(blk_crypto_decrypt_work_cache, decrypt_work); + blk_crypto_free_fallback_crypt_ctx(bio); + bio_endio(bio); +} + +/* + * Queue bio for decryption. + * Returns true iff bio was queued for decryption. + */ +bool blk_crypto_queue_decrypt_bio(struct bio *bio) +{ + struct blk_crypto_decrypt_work *decrypt_work; + + /* If there was an IO error, don't queue for decrypt. */ + if (bio->bi_status) + goto out; + + decrypt_work = kmem_cache_zalloc(blk_crypto_decrypt_work_cache, + GFP_ATOMIC); + if (!decrypt_work) { + bio->bi_status = BLK_STS_RESOURCE; + goto out; + } + + INIT_WORK(&decrypt_work->work, blk_crypto_decrypt_bio); + decrypt_work->bio = bio; + queue_work(blk_crypto_wq, &decrypt_work->work); + + return true; +out: + blk_crypto_free_fallback_crypt_ctx(bio); + return false; +} + +/** + * blk_crypto_start_using_mode() - Start using a crypto algorithm on a device + * @mode_num: the blk_crypto_mode we want to allocate ciphers for. + * @data_unit_size: the data unit size that will be used + * @q: the request queue for the device + * + * Upper layers must call this function to ensure that a the crypto API fallback + * has transforms for this algorithm, if they become necessary. + * + * Return: 0 on success and -err on error. + */ +int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, + unsigned int data_unit_size, + struct request_queue *q) +{ + struct blk_crypto_keyslot *slotp; + unsigned int i; + int err = 0; + + /* + * Fast path + * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] + * for each i are visible before we try to access them. + */ + if (likely(smp_load_acquire(&tfms_inited[mode_num]))) + return 0; + + /* + * If the keyslot manager of the request queue supports this + * crypto mode, then we don't need to allocate this mode. + */ + if (keyslot_manager_crypto_mode_supported(q->ksm, mode_num, + data_unit_size)) + return 0; + + mutex_lock(&tfms_init_lock); + if (likely(tfms_inited[mode_num])) + goto out; + + for (i = 0; i < blk_crypto_num_keyslots; i++) { + slotp = &blk_crypto_keyslots[i]; + slotp->tfms[mode_num] = crypto_alloc_skcipher( + blk_crypto_modes[mode_num].cipher_str, + 0, 0); + if (IS_ERR(slotp->tfms[mode_num])) { + err = PTR_ERR(slotp->tfms[mode_num]); + slotp->tfms[mode_num] = NULL; + goto out_free_tfms; + } + + crypto_skcipher_set_flags(slotp->tfms[mode_num], + CRYPTO_TFM_REQ_WEAK_KEY); + } + + /* + * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] + * for each i are visible before we set tfms_inited[mode_num]. + */ + smp_store_release(&tfms_inited[mode_num], true); + goto out; + +out_free_tfms: + for (i = 0; i < blk_crypto_num_keyslots; i++) { + slotp = &blk_crypto_keyslots[i]; + crypto_free_skcipher(slotp->tfms[mode_num]); + slotp->tfms[mode_num] = NULL; + } +out: + mutex_unlock(&tfms_init_lock); + return err; +} +EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode); + +int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) +{ + return keyslot_manager_evict_key(blk_crypto_ksm, key); +} + +int blk_crypto_fallback_submit_bio(struct bio **bio_ptr) +{ + struct bio *bio = *bio_ptr; + struct bio_crypt_ctx *bc = bio->bi_crypt_context; + struct bio_fallback_crypt_ctx *f_ctx; + + if (!tfms_inited[bc->bc_key->crypto_mode]) { + bio->bi_status = BLK_STS_IOERR; + return -EIO; + } + + if (bio_data_dir(bio) == WRITE) + return blk_crypto_encrypt_bio(bio_ptr); + + /* + * Mark bio as fallback crypted and replace the bio_crypt_ctx with + * another one contained in a bio_fallback_crypt_ctx, so that the + * fallback has space to store the info it needs for decryption. + */ + bc->bc_ksm = blk_crypto_ksm; + f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO); + f_ctx->crypt_ctx = *bc; + memcpy(f_ctx->fallback_dun, bc->bc_dun, sizeof(f_ctx->fallback_dun)); + f_ctx->crypt_iter = bio->bi_iter; + + bio_crypt_free_ctx(bio); + bio->bi_crypt_context = &f_ctx->crypt_ctx; + + return 0; +} + +int __init blk_crypto_fallback_init(void) +{ + int i; + unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX]; + + prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); + + /* All blk-crypto modes have a crypto API fallback. */ + for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) + crypto_mode_supported[i] = 0xFFFFFFFF; + crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; + + blk_crypto_ksm = keyslot_manager_create(blk_crypto_num_keyslots, + &blk_crypto_ksm_ll_ops, + crypto_mode_supported, NULL); + if (!blk_crypto_ksm) + return -ENOMEM; + + blk_crypto_wq = alloc_workqueue("blk_crypto_wq", + WQ_UNBOUND | WQ_HIGHPRI | + WQ_MEM_RECLAIM, num_online_cpus()); + if (!blk_crypto_wq) + return -ENOMEM; + + blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots, + sizeof(blk_crypto_keyslots[0]), + GFP_KERNEL); + if (!blk_crypto_keyslots) + return -ENOMEM; + + blk_crypto_bounce_page_pool = + mempool_create_page_pool(num_prealloc_bounce_pg, 0); + if (!blk_crypto_bounce_page_pool) + return -ENOMEM; + + blk_crypto_decrypt_work_cache = KMEM_CACHE(blk_crypto_decrypt_work, + SLAB_RECLAIM_ACCOUNT); + if (!blk_crypto_decrypt_work_cache) + return -ENOMEM; + + bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0); + if (!bio_fallback_crypt_ctx_cache) + return -ENOMEM; + + bio_fallback_crypt_ctx_pool = + mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs, + bio_fallback_crypt_ctx_cache); + if (!bio_fallback_crypt_ctx_pool) + return -ENOMEM; + + return 0; +} diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h new file mode 100644 index 000000000000..40d826b743da --- /dev/null +++ b/block/blk-crypto-internal.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef __LINUX_BLK_CRYPTO_INTERNAL_H +#define __LINUX_BLK_CRYPTO_INTERNAL_H + +#include + +/* Represents a crypto mode supported by blk-crypto */ +struct blk_crypto_mode { + const char *cipher_str; /* crypto API name (for fallback case) */ + unsigned int keysize; /* key size in bytes */ + unsigned int ivsize; /* iv size in bytes */ +}; + +extern const struct blk_crypto_mode blk_crypto_modes[]; + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK + +int blk_crypto_fallback_submit_bio(struct bio **bio_ptr); + +bool blk_crypto_queue_decrypt_bio(struct bio *bio); + +int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key); + +bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc); + +#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ + +static inline bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc) +{ + return false; +} + +static inline int blk_crypto_fallback_submit_bio(struct bio **bio_ptr) +{ + pr_warn_once("crypto API fallback disabled; failing request\n"); + (*bio_ptr)->bi_status = BLK_STS_NOTSUPP; + return -EIO; +} + +static inline bool blk_crypto_queue_decrypt_bio(struct bio *bio) +{ + WARN_ON(1); + return false; +} + +static inline int +blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) +{ + return 0; +} + +#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ + +#endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */ diff --git a/block/blk-crypto.c b/block/blk-crypto.c new file mode 100644 index 000000000000..a8de0d9680e0 --- /dev/null +++ b/block/blk-crypto.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Google LLC + */ + +/* + * Refer to Documentation/block/inline-encryption.rst for detailed explanation. + */ + +#define pr_fmt(fmt) "blk-crypto: " fmt + +#include +#include +#include +#include +#include + +#include "blk-crypto-internal.h" + +const struct blk_crypto_mode blk_crypto_modes[] = { + [BLK_ENCRYPTION_MODE_AES_256_XTS] = { + .cipher_str = "xts(aes)", + .keysize = 64, + .ivsize = 16, + }, + [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { + .cipher_str = "essiv(cbc(aes),sha256)", + .keysize = 16, + .ivsize = 16, + }, + [BLK_ENCRYPTION_MODE_ADIANTUM] = { + .cipher_str = "adiantum(xchacha12,aes)", + .keysize = 32, + .ivsize = 32, + }, +}; + +/* Check that all I/O segments are data unit aligned */ +static int bio_crypt_check_alignment(struct bio *bio) +{ + const unsigned int data_unit_size = + bio->bi_crypt_context->bc_key->data_unit_size; + struct bvec_iter iter; + struct bio_vec bv; + + bio_for_each_segment(bv, bio, iter) { + if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) + return -EIO; + } + return 0; +} + +/** + * blk_crypto_submit_bio - handle submitting bio for inline encryption + * + * @bio_ptr: pointer to original bio pointer + * + * If the bio doesn't have inline encryption enabled or the submitter already + * specified a keyslot for the target device, do nothing. Else, a raw key must + * have been provided, so acquire a device keyslot for it if supported. Else, + * use the crypto API fallback. + * + * When the crypto API fallback is used for encryption, blk-crypto may choose to + * split the bio into 2 - the first one that will continue to be processed and + * the second one that will be resubmitted via generic_make_request. + * A bounce bio will be allocated to encrypt the contents of the aforementioned + * "first one", and *bio_ptr will be updated to this bounce bio. + * + * Return: 0 if bio submission should continue; nonzero if bio_endio() was + * already called so bio submission should abort. + */ +int blk_crypto_submit_bio(struct bio **bio_ptr) +{ + struct bio *bio = *bio_ptr; + struct request_queue *q; + struct bio_crypt_ctx *bc = bio->bi_crypt_context; + int err; + + if (!bc || !bio_has_data(bio)) + return 0; + + /* + * When a read bio is marked for fallback decryption, its bi_iter is + * saved so that when we decrypt the bio later, we know what part of it + * was marked for fallback decryption (when the bio is passed down after + * blk_crypto_submit bio, it may be split or advanced so we cannot rely + * on the bi_iter while decrypting in blk_crypto_endio) + */ + if (bio_crypt_fallback_crypted(bc)) + return 0; + + err = bio_crypt_check_alignment(bio); + if (err) { + bio->bi_status = BLK_STS_IOERR; + goto out; + } + + q = bio->bi_disk->queue; + + if (bc->bc_ksm) { + /* Key already programmed into device? */ + if (q->ksm == bc->bc_ksm) + return 0; + + /* Nope, release the existing keyslot. */ + bio_crypt_ctx_release_keyslot(bc); + } + + /* Get device keyslot if supported */ + if (keyslot_manager_crypto_mode_supported(q->ksm, + bc->bc_key->crypto_mode, + bc->bc_key->data_unit_size)) { + err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm); + if (!err) + return 0; + + pr_warn_once("Failed to acquire keyslot for %s (err=%d). Falling back to crypto API.\n", + bio->bi_disk->disk_name, err); + } + + /* Fallback to crypto API */ + err = blk_crypto_fallback_submit_bio(bio_ptr); + if (err) + goto out; + + return 0; +out: + bio_endio(*bio_ptr); + return err; +} + +/** + * blk_crypto_endio - clean up bio w.r.t inline encryption during bio_endio + * + * @bio: the bio to clean up + * + * If blk_crypto_submit_bio decided to fallback to crypto API for this bio, + * we queue the bio for decryption into a workqueue and return false, + * and call bio_endio(bio) at a later time (after the bio has been decrypted). + * + * If the bio is not to be decrypted by the crypto API, this function releases + * the reference to the keyslot that blk_crypto_submit_bio got. + * + * Return: true if bio_endio should continue; false otherwise (bio_endio will + * be called again when bio has been decrypted). + */ +bool blk_crypto_endio(struct bio *bio) +{ + struct bio_crypt_ctx *bc = bio->bi_crypt_context; + + if (!bc) + return true; + + if (bio_crypt_fallback_crypted(bc)) { + /* + * The only bios who's crypto is handled by the blk-crypto + * fallback when they reach here are those with + * bio_data_dir(bio) == READ, since WRITE bios that are + * encrypted by the crypto API fallback are handled by + * blk_crypto_encrypt_endio(). + */ + return !blk_crypto_queue_decrypt_bio(bio); + } + + if (bc->bc_keyslot >= 0) + bio_crypt_ctx_release_keyslot(bc); + + return true; +} + +/** + * blk_crypto_init_key() - Prepare a key for use with blk-crypto + * @blk_key: Pointer to the blk_crypto_key to initialize. + * @raw_key: Pointer to the raw key. + * @raw_key_size: Size of raw key. Must be at least the required size for the + * chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed + * to be longer than the mode's actual key size, in order to + * support inline encryption hardware that accepts wrapped keys.) + * @crypto_mode: identifier for the encryption algorithm to use + * @data_unit_size: the data unit size to use for en/decryption + * + * Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error. When + * done using the key, it must be freed with blk_crypto_free_key(). + */ +int blk_crypto_init_key(struct blk_crypto_key *blk_key, + const u8 *raw_key, unsigned int raw_key_size, + enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size) +{ + const struct blk_crypto_mode *mode; + static siphash_key_t hash_key; + + memset(blk_key, 0, sizeof(*blk_key)); + + if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes)) + return -EINVAL; + + BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE); + + mode = &blk_crypto_modes[crypto_mode]; + if (raw_key_size < mode->keysize || + raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE) + return -EINVAL; + + if (!is_power_of_2(data_unit_size)) + return -EINVAL; + + blk_key->crypto_mode = crypto_mode; + blk_key->data_unit_size = data_unit_size; + blk_key->data_unit_size_bits = ilog2(data_unit_size); + blk_key->size = raw_key_size; + memcpy(blk_key->raw, raw_key, raw_key_size); + + /* + * The keyslot manager uses the SipHash of the key to implement O(1) key + * lookups while avoiding leaking information about the keys. It's + * precomputed here so that it only needs to be computed once per key. + */ + get_random_once(&hash_key, sizeof(hash_key)); + blk_key->hash = siphash(raw_key, raw_key_size, &hash_key); + + return 0; +} +EXPORT_SYMBOL_GPL(blk_crypto_init_key); + +/** + * blk_crypto_evict_key() - Evict a key from any inline encryption hardware + * it may have been programmed into + * @q: The request queue who's keyslot manager this key might have been + * programmed into + * @key: The key to evict + * + * Upper layers (filesystems) should call this function to ensure that a key + * is evicted from hardware that it might have been programmed into. This + * will call keyslot_manager_evict_key on the queue's keyslot manager, if one + * exists, and supports the crypto algorithm with the specified data unit size. + * Otherwise, it will evict the key from the blk-crypto-fallback's ksm. + * + * Return: 0 on success, -err on error. + */ +int blk_crypto_evict_key(struct request_queue *q, + const struct blk_crypto_key *key) +{ + if (q->ksm && + keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode, + key->data_unit_size)) + return keyslot_manager_evict_key(q->ksm, key); + + return blk_crypto_fallback_evict_key(key); +} +EXPORT_SYMBOL_GPL(blk_crypto_evict_key); diff --git a/block/blk-merge.c b/block/blk-merge.c index de29a4054666..9e322d62d9f4 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -514,6 +514,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, req_set_nomerge(q, req); return 0; } + if (!bio_crypt_ctx_mergeable(req->bio, blk_rq_bytes(req), bio)) + return 0; if (!bio_flagged(req->biotail, BIO_SEG_VALID)) blk_recount_segments(q, req->biotail); if (!bio_flagged(bio, BIO_SEG_VALID)) @@ -536,6 +538,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, req_set_nomerge(q, req); return 0; } + if (!bio_crypt_ctx_mergeable(bio, bio->bi_iter.bi_size, req->bio)) + return 0; if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); if (!bio_flagged(req->bio, BIO_SEG_VALID)) @@ -612,6 +616,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, if (blk_integrity_merge_rq(q, req, next) == false) return 0; + if (!bio_crypt_ctx_mergeable(req->bio, blk_rq_bytes(req), next->bio)) + return 0; + /* Merge is OK... */ req->nr_phys_segments = total_phys_segments; return 1; @@ -833,6 +840,10 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (rq->write_hint != bio->bi_write_hint) return false; + /* Only merge if the crypt contexts are compatible */ + if (!bio_crypt_ctx_compatible(bio, rq->bio)) + return false; + return true; } diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c new file mode 100644 index 000000000000..7e42813c9de0 --- /dev/null +++ b/block/keyslot-manager.c @@ -0,0 +1,560 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Google LLC + */ + +/** + * DOC: The Keyslot Manager + * + * Many devices with inline encryption support have a limited number of "slots" + * into which encryption contexts may be programmed, and requests can be tagged + * with a slot number to specify the key to use for en/decryption. + * + * As the number of slots are limited, and programming keys is expensive on + * many inline encryption hardware, we don't want to program the same key into + * multiple slots - if multiple requests are using the same key, we want to + * program just one slot with that key and use that slot for all requests. + * + * The keyslot manager manages these keyslots appropriately, and also acts as + * an abstraction between the inline encryption hardware and the upper layers. + * + * Lower layer devices will set up a keyslot manager in their request queue + * and tell it how to perform device specific operations like programming/ + * evicting keys from keyslots. + * + * Upper layers will call keyslot_manager_get_slot_for_key() to program a + * key into some slot in the inline encryption hardware. + */ +#include +#include +#include +#include +#include +#include +#include + +struct keyslot { + atomic_t slot_refs; + struct list_head idle_slot_node; + struct hlist_node hash_node; + struct blk_crypto_key key; +}; + +struct keyslot_manager { + unsigned int num_slots; + struct keyslot_mgmt_ll_ops ksm_ll_ops; + unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX]; + void *ll_priv_data; + + /* Protects programming and evicting keys from the device */ + struct rw_semaphore lock; + + /* List of idle slots, with least recently used slot at front */ + wait_queue_head_t idle_slots_wait_queue; + struct list_head idle_slots; + spinlock_t idle_slots_lock; + + /* + * Hash table which maps key hashes to keyslots, so that we can find a + * key's keyslot in O(1) time rather than O(num_slots). Protected by + * 'lock'. A cryptographic hash function is used so that timing attacks + * can't leak information about the raw keys. + */ + struct hlist_head *slot_hashtable; + unsigned int slot_hashtable_size; + + /* Per-keyslot data */ + struct keyslot slots[]; +}; + +static inline bool keyslot_manager_is_passthrough(struct keyslot_manager *ksm) +{ + return ksm->num_slots == 0; +} + +/** + * keyslot_manager_create() - Create a keyslot manager + * @num_slots: The number of key slots to manage. + * @ksm_ll_ops: The struct keyslot_mgmt_ll_ops for the device that this keyslot + * manager will use to perform operations like programming and + * evicting keys. + * @crypto_mode_supported: Array of size BLK_ENCRYPTION_MODE_MAX of + * bitmasks that represents whether a crypto mode + * and data unit size are supported. The i'th bit + * of crypto_mode_supported[crypto_mode] is set iff + * a data unit size of (1 << i) is supported. We + * only support data unit sizes that are powers of + * 2. + * @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops. + * + * Allocate memory for and initialize a keyslot manager. Called by e.g. + * storage drivers to set up a keyslot manager in their request_queue. + * + * Context: May sleep + * Return: Pointer to constructed keyslot manager or NULL on error. + */ +struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, + const struct keyslot_mgmt_ll_ops *ksm_ll_ops, + const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], + void *ll_priv_data) +{ + struct keyslot_manager *ksm; + unsigned int slot; + unsigned int i; + + if (num_slots == 0) + return NULL; + + /* Check that all ops are specified */ + if (ksm_ll_ops->keyslot_program == NULL || + ksm_ll_ops->keyslot_evict == NULL) + return NULL; + + ksm = kvzalloc(struct_size(ksm, slots, num_slots), GFP_KERNEL); + if (!ksm) + return NULL; + + ksm->num_slots = num_slots; + ksm->ksm_ll_ops = *ksm_ll_ops; + memcpy(ksm->crypto_mode_supported, crypto_mode_supported, + sizeof(ksm->crypto_mode_supported)); + ksm->ll_priv_data = ll_priv_data; + + init_rwsem(&ksm->lock); + + init_waitqueue_head(&ksm->idle_slots_wait_queue); + INIT_LIST_HEAD(&ksm->idle_slots); + + for (slot = 0; slot < num_slots; slot++) { + list_add_tail(&ksm->slots[slot].idle_slot_node, + &ksm->idle_slots); + } + + spin_lock_init(&ksm->idle_slots_lock); + + ksm->slot_hashtable_size = roundup_pow_of_two(num_slots); + ksm->slot_hashtable = kvmalloc_array(ksm->slot_hashtable_size, + sizeof(ksm->slot_hashtable[0]), + GFP_KERNEL); + if (!ksm->slot_hashtable) + goto err_free_ksm; + for (i = 0; i < ksm->slot_hashtable_size; i++) + INIT_HLIST_HEAD(&ksm->slot_hashtable[i]); + + return ksm; + +err_free_ksm: + keyslot_manager_destroy(ksm); + return NULL; +} +EXPORT_SYMBOL_GPL(keyslot_manager_create); + +static inline struct hlist_head * +hash_bucket_for_key(struct keyslot_manager *ksm, + const struct blk_crypto_key *key) +{ + return &ksm->slot_hashtable[key->hash & (ksm->slot_hashtable_size - 1)]; +} + +static void remove_slot_from_lru_list(struct keyslot_manager *ksm, int slot) +{ + unsigned long flags; + + spin_lock_irqsave(&ksm->idle_slots_lock, flags); + list_del(&ksm->slots[slot].idle_slot_node); + spin_unlock_irqrestore(&ksm->idle_slots_lock, flags); +} + +static int find_keyslot(struct keyslot_manager *ksm, + const struct blk_crypto_key *key) +{ + const struct hlist_head *head = hash_bucket_for_key(ksm, key); + const struct keyslot *slotp; + + hlist_for_each_entry(slotp, head, hash_node) { + if (slotp->key.hash == key->hash && + slotp->key.crypto_mode == key->crypto_mode && + slotp->key.size == key->size && + slotp->key.data_unit_size == key->data_unit_size && + !crypto_memneq(slotp->key.raw, key->raw, key->size)) + return slotp - ksm->slots; + } + return -ENOKEY; +} + +static int find_and_grab_keyslot(struct keyslot_manager *ksm, + const struct blk_crypto_key *key) +{ + int slot; + + slot = find_keyslot(ksm, key); + if (slot < 0) + return slot; + if (atomic_inc_return(&ksm->slots[slot].slot_refs) == 1) { + /* Took first reference to this slot; remove it from LRU list */ + remove_slot_from_lru_list(ksm, slot); + } + return slot; +} + +/** + * keyslot_manager_get_slot_for_key() - Program a key into a keyslot. + * @ksm: The keyslot manager to program the key into. + * @key: Pointer to the key object to program, including the raw key, crypto + * mode, and data unit size. + * + * Get a keyslot that's been programmed with the specified key. If one already + * exists, return it with incremented refcount. Otherwise, wait for a keyslot + * to become idle and program it. + * + * Context: Process context. Takes and releases ksm->lock. + * Return: The keyslot on success, else a -errno value. + */ +int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm, + const struct blk_crypto_key *key) +{ + int slot; + int err; + struct keyslot *idle_slot; + + if (keyslot_manager_is_passthrough(ksm)) + return 0; + + down_read(&ksm->lock); + slot = find_and_grab_keyslot(ksm, key); + up_read(&ksm->lock); + if (slot != -ENOKEY) + return slot; + + for (;;) { + down_write(&ksm->lock); + slot = find_and_grab_keyslot(ksm, key); + if (slot != -ENOKEY) { + up_write(&ksm->lock); + return slot; + } + + /* + * If we're here, that means there wasn't a slot that was + * already programmed with the key. So try to program it. + */ + if (!list_empty(&ksm->idle_slots)) + break; + + up_write(&ksm->lock); + wait_event(ksm->idle_slots_wait_queue, + !list_empty(&ksm->idle_slots)); + } + + idle_slot = list_first_entry(&ksm->idle_slots, struct keyslot, + idle_slot_node); + slot = idle_slot - ksm->slots; + + err = ksm->ksm_ll_ops.keyslot_program(ksm, key, slot); + if (err) { + wake_up(&ksm->idle_slots_wait_queue); + up_write(&ksm->lock); + return err; + } + + /* Move this slot to the hash list for the new key. */ + if (idle_slot->key.crypto_mode != BLK_ENCRYPTION_MODE_INVALID) + hlist_del(&idle_slot->hash_node); + hlist_add_head(&idle_slot->hash_node, hash_bucket_for_key(ksm, key)); + + atomic_set(&idle_slot->slot_refs, 1); + idle_slot->key = *key; + + remove_slot_from_lru_list(ksm, slot); + + up_write(&ksm->lock); + return slot; +} + +/** + * keyslot_manager_get_slot() - Increment the refcount on the specified slot. + * @ksm: The keyslot manager that we want to modify. + * @slot: The slot to increment the refcount of. + * + * This function assumes that there is already an active reference to that slot + * and simply increments the refcount. This is useful when cloning a bio that + * already has a reference to a keyslot, and we want the cloned bio to also have + * its own reference. + * + * Context: Any context. + */ +void keyslot_manager_get_slot(struct keyslot_manager *ksm, unsigned int slot) +{ + if (keyslot_manager_is_passthrough(ksm)) + return; + + if (WARN_ON(slot >= ksm->num_slots)) + return; + + WARN_ON(atomic_inc_return(&ksm->slots[slot].slot_refs) < 2); +} + +/** + * keyslot_manager_put_slot() - Release a reference to a slot + * @ksm: The keyslot manager to release the reference from. + * @slot: The slot to release the reference from. + * + * Context: Any context. + */ +void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot) +{ + unsigned long flags; + + if (keyslot_manager_is_passthrough(ksm)) + return; + + if (WARN_ON(slot >= ksm->num_slots)) + return; + + if (atomic_dec_and_lock_irqsave(&ksm->slots[slot].slot_refs, + &ksm->idle_slots_lock, flags)) { + list_add_tail(&ksm->slots[slot].idle_slot_node, + &ksm->idle_slots); + spin_unlock_irqrestore(&ksm->idle_slots_lock, flags); + wake_up(&ksm->idle_slots_wait_queue); + } +} + +/** + * keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode/data + * unit size combination is supported + * by a ksm. + * @ksm: The keyslot manager to check + * @crypto_mode: The crypto mode to check for. + * @data_unit_size: The data_unit_size for the mode. + * + * Calls and returns the result of the crypto_mode_supported function specified + * by the ksm. + * + * Context: Process context. + * Return: Whether or not this ksm supports the specified crypto_mode/ + * data_unit_size combo. + */ +bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, + enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size) +{ + if (!ksm) + return false; + if (WARN_ON(crypto_mode >= BLK_ENCRYPTION_MODE_MAX)) + return false; + if (WARN_ON(!is_power_of_2(data_unit_size))) + return false; + return ksm->crypto_mode_supported[crypto_mode] & data_unit_size; +} + +/** + * keyslot_manager_evict_key() - Evict a key from the lower layer device. + * @ksm: The keyslot manager to evict from + * @key: The key to evict + * + * Find the keyslot that the specified key was programmed into, and evict that + * slot from the lower layer device if that slot is not currently in use. + * + * Context: Process context. Takes and releases ksm->lock. + * Return: 0 on success, -EBUSY if the key is still in use, or another + * -errno value on other error. + */ +int keyslot_manager_evict_key(struct keyslot_manager *ksm, + const struct blk_crypto_key *key) +{ + int slot; + int err; + struct keyslot *slotp; + + if (keyslot_manager_is_passthrough(ksm)) { + if (ksm->ksm_ll_ops.keyslot_evict) { + down_write(&ksm->lock); + err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, -1); + up_write(&ksm->lock); + return err; + } + return 0; + } + + down_write(&ksm->lock); + slot = find_keyslot(ksm, key); + if (slot < 0) { + err = slot; + goto out_unlock; + } + slotp = &ksm->slots[slot]; + + if (atomic_read(&slotp->slot_refs) != 0) { + err = -EBUSY; + goto out_unlock; + } + err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, slot); + if (err) + goto out_unlock; + + hlist_del(&slotp->hash_node); + memzero_explicit(&slotp->key, sizeof(slotp->key)); + err = 0; +out_unlock: + up_write(&ksm->lock); + return err; +} + +/** + * keyslot_manager_reprogram_all_keys() - Re-program all keyslots. + * @ksm: The keyslot manager + * + * Re-program all keyslots that are supposed to have a key programmed. This is + * intended only for use by drivers for hardware that loses its keys on reset. + * + * Context: Process context. Takes and releases ksm->lock. + */ +void keyslot_manager_reprogram_all_keys(struct keyslot_manager *ksm) +{ + unsigned int slot; + + if (WARN_ON(keyslot_manager_is_passthrough(ksm))) + return; + + down_write(&ksm->lock); + for (slot = 0; slot < ksm->num_slots; slot++) { + const struct keyslot *slotp = &ksm->slots[slot]; + int err; + + if (slotp->key.crypto_mode == BLK_ENCRYPTION_MODE_INVALID) + continue; + + err = ksm->ksm_ll_ops.keyslot_program(ksm, &slotp->key, slot); + WARN_ON(err); + } + up_write(&ksm->lock); +} +EXPORT_SYMBOL_GPL(keyslot_manager_reprogram_all_keys); + +/** + * keyslot_manager_private() - return the private data stored with ksm + * @ksm: The keyslot manager + * + * Returns the private data passed to the ksm when it was created. + */ +void *keyslot_manager_private(struct keyslot_manager *ksm) +{ + return ksm->ll_priv_data; +} +EXPORT_SYMBOL_GPL(keyslot_manager_private); + +void keyslot_manager_destroy(struct keyslot_manager *ksm) +{ + if (ksm) { + kvfree(ksm->slot_hashtable); + memzero_explicit(ksm, struct_size(ksm, slots, ksm->num_slots)); + kvfree(ksm); + } +} +EXPORT_SYMBOL_GPL(keyslot_manager_destroy); + +/** + * keyslot_manager_create_passthrough() - Create a passthrough keyslot manager + * @ksm_ll_ops: The struct keyslot_mgmt_ll_ops + * @crypto_mode_supported: Bitmasks for supported encryption modes + * @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops. + * + * Allocate memory for and initialize a passthrough keyslot manager. + * Called by e.g. storage drivers to set up a keyslot manager in their + * request_queue, when the storage driver wants to manage its keys by itself. + * This is useful for inline encryption hardware that don't have a small fixed + * number of keyslots, and for layered devices. + * + * See keyslot_manager_create() for more details about the parameters. + * + * Context: This function may sleep + * Return: Pointer to constructed keyslot manager or NULL on error. + */ +struct keyslot_manager *keyslot_manager_create_passthrough( + const struct keyslot_mgmt_ll_ops *ksm_ll_ops, + const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], + void *ll_priv_data) +{ + struct keyslot_manager *ksm; + + ksm = kzalloc(sizeof(*ksm), GFP_KERNEL); + if (!ksm) + return NULL; + + ksm->ksm_ll_ops = *ksm_ll_ops; + memcpy(ksm->crypto_mode_supported, crypto_mode_supported, + sizeof(ksm->crypto_mode_supported)); + ksm->ll_priv_data = ll_priv_data; + + init_rwsem(&ksm->lock); + + return ksm; +} +EXPORT_SYMBOL_GPL(keyslot_manager_create_passthrough); + +/** + * keyslot_manager_intersect_modes() - restrict supported modes by child device + * @parent: The keyslot manager for parent device + * @child: The keyslot manager for child device, or NULL + * + * Clear any crypto mode support bits in @parent that aren't set in @child. + * If @child is NULL, then all parent bits are cleared. + * + * Only use this when setting up the keyslot manager for a layered device, + * before it's been exposed yet. + */ +void keyslot_manager_intersect_modes(struct keyslot_manager *parent, + const struct keyslot_manager *child) +{ + if (child) { + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(child->crypto_mode_supported); i++) { + parent->crypto_mode_supported[i] &= + child->crypto_mode_supported[i]; + } + } else { + memset(parent->crypto_mode_supported, 0, + sizeof(parent->crypto_mode_supported)); + } +} +EXPORT_SYMBOL_GPL(keyslot_manager_intersect_modes); + +/** + * keyslot_manager_derive_raw_secret() - Derive software secret from wrapped key + * @ksm: The keyslot manager + * @wrapped_key: The wrapped key + * @wrapped_key_size: Size of the wrapped key in bytes + * @secret: (output) the software secret + * @secret_size: (output) the number of secret bytes to derive + * + * Given a hardware-wrapped key, ask the hardware to derive a secret which + * software can use for cryptographic tasks other than inline encryption. The + * derived secret is guaranteed to be cryptographically isolated from the key + * with which any inline encryption with this wrapped key would actually be + * done. I.e., both will be derived from the unwrapped key. + * + * Return: 0 on success, -EOPNOTSUPP if hardware-wrapped keys are unsupported, + * or another -errno code. + */ +int keyslot_manager_derive_raw_secret(struct keyslot_manager *ksm, + const u8 *wrapped_key, + unsigned int wrapped_key_size, + u8 *secret, unsigned int secret_size) +{ + int err; + + down_write(&ksm->lock); + if (ksm->ksm_ll_ops.derive_raw_secret) { + err = ksm->ksm_ll_ops.derive_raw_secret(ksm, wrapped_key, + wrapped_key_size, + secret, secret_size); + } else { + err = -EOPNOTSUPP; + } + up_write(&ksm->lock); + + return err; +} +EXPORT_SYMBOL_GPL(keyslot_manager_derive_raw_secret); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 747edadb39ae..f3d7db1cc828 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -286,6 +286,27 @@ config DM_CRYPT If unsure, say N. +config DM_DEFAULT_KEY + tristate "Default-key target support" + depends on BLK_DEV_DM + depends on BLK_INLINE_ENCRYPTION + # dm-default-key doesn't require -o inlinecrypt, but it does currently + # rely on the inline encryption hooks being built into the kernel. + depends on FS_ENCRYPTION_INLINE_CRYPT + help + This device-mapper target allows you to create a device that + assigns a default encryption key to bios that aren't for the + contents of an encrypted file. + + This ensures that all blocks on-disk will be encrypted with + some key, without the performance hit of file contents being + encrypted twice when fscrypt (File-Based Encryption) is used. + + It is only appropriate to use dm-default-key when key + configuration is tightly controlled, like it is in Android, + such that all fscrypt keys are at least as hard to compromise + as the default key. + config DM_SNAPSHOT tristate "Snapshot target" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 27962abad668..1a03ebd1cee7 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o obj-$(CONFIG_DM_BUFIO) += dm-bufio.o obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o obj-$(CONFIG_DM_CRYPT) += dm-crypt.o +obj-$(CONFIG_DM_DEFAULT_KEY) += dm-default-key.o obj-$(CONFIG_DM_DELAY) += dm-delay.o obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o diff --git a/drivers/md/dm-bow.c b/drivers/md/dm-bow.c index 47289850c445..96ddba82ed24 100644 --- a/drivers/md/dm-bow.c +++ b/drivers/md/dm-bow.c @@ -789,6 +789,7 @@ static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv) rb_insert_color(&br->node, &bc->ranges); ti->discards_supported = true; + ti->may_passthrough_inline_crypto = true; return 0; diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c new file mode 100644 index 000000000000..43a30c076aa6 --- /dev/null +++ b/drivers/md/dm-default-key.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017 Google, Inc. + */ + +#include +#include +#include + +#define DM_MSG_PREFIX "default-key" + +#define DM_DEFAULT_KEY_MAX_KEY_SIZE 64 + +#define SECTOR_SIZE (1 << SECTOR_SHIFT) + +static const struct dm_default_key_cipher { + const char *name; + enum blk_crypto_mode_num mode_num; + int key_size; +} dm_default_key_ciphers[] = { + { + .name = "aes-xts-plain64", + .mode_num = BLK_ENCRYPTION_MODE_AES_256_XTS, + .key_size = 64, + }, { + .name = "xchacha12,aes-adiantum-plain64", + .mode_num = BLK_ENCRYPTION_MODE_ADIANTUM, + .key_size = 32, + }, +}; + +/** + * struct dm_default_c - private data of a default-key target + * @dev: the underlying device + * @start: starting sector of the range of @dev which this target actually maps. + * For this purpose a "sector" is 512 bytes. + * @cipher_string: the name of the encryption algorithm being used + * @iv_offset: starting offset for IVs. IVs are generated as if the target were + * preceded by @iv_offset 512-byte sectors. + * @sector_size: crypto sector size in bytes (usually 4096) + * @sector_bits: log2(sector_size) + * @key: the encryption key to use + */ +struct default_key_c { + struct dm_dev *dev; + sector_t start; + const char *cipher_string; + u64 iv_offset; + unsigned int sector_size; + unsigned int sector_bits; + struct blk_crypto_key key; +}; + +static const struct dm_default_key_cipher * +lookup_cipher(const char *cipher_string) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dm_default_key_ciphers); i++) { + if (strcmp(cipher_string, dm_default_key_ciphers[i].name) == 0) + return &dm_default_key_ciphers[i]; + } + return NULL; +} + +static void default_key_dtr(struct dm_target *ti) +{ + struct default_key_c *dkc = ti->private; + int err; + + if (dkc->dev) { + err = blk_crypto_evict_key(dkc->dev->bdev->bd_queue, &dkc->key); + if (err && err != -ENOKEY) + DMWARN("Failed to evict crypto key: %d", err); + dm_put_device(ti, dkc->dev); + } + kzfree(dkc->cipher_string); + kzfree(dkc); +} + +static int default_key_ctr_optional(struct dm_target *ti, + unsigned int argc, char **argv) +{ + struct default_key_c *dkc = ti->private; + struct dm_arg_set as; + static const struct dm_arg _args[] = { + {0, 3, "Invalid number of feature args"}, + }; + unsigned int opt_params; + const char *opt_string; + bool iv_large_sectors = false; + char dummy; + int err; + + as.argc = argc; + as.argv = argv; + + err = dm_read_arg_group(_args, &as, &opt_params, &ti->error); + if (err) + return err; + + while (opt_params--) { + opt_string = dm_shift_arg(&as); + if (!opt_string) { + ti->error = "Not enough feature arguments"; + return -EINVAL; + } + if (!strcmp(opt_string, "allow_discards")) { + ti->num_discard_bios = 1; + } else if (sscanf(opt_string, "sector_size:%u%c", + &dkc->sector_size, &dummy) == 1) { + if (dkc->sector_size < SECTOR_SIZE || + dkc->sector_size > 4096 || + !is_power_of_2(dkc->sector_size)) { + ti->error = "Invalid sector_size"; + return -EINVAL; + } + } else if (!strcmp(opt_string, "iv_large_sectors")) { + iv_large_sectors = true; + } else { + ti->error = "Invalid feature arguments"; + return -EINVAL; + } + } + + /* dm-default-key doesn't implement iv_large_sectors=false. */ + if (dkc->sector_size != SECTOR_SIZE && !iv_large_sectors) { + ti->error = "iv_large_sectors must be specified"; + return -EINVAL; + } + + return 0; +} + +/* + * Construct a default-key mapping: + * + * + * This syntax matches dm-crypt's, but lots of unneeded functionality has been + * removed. Also, dm-default-key requires that the "iv_large_sectors" option be + * given whenever a non-default sector size is used. + */ +static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct default_key_c *dkc; + const struct dm_default_key_cipher *cipher; + u8 raw_key[DM_DEFAULT_KEY_MAX_KEY_SIZE]; + unsigned long long tmpll; + char dummy; + int err; + + if (argc < 5) { + ti->error = "Not enough arguments"; + return -EINVAL; + } + + dkc = kzalloc(sizeof(*dkc), GFP_KERNEL); + if (!dkc) { + ti->error = "Out of memory"; + return -ENOMEM; + } + ti->private = dkc; + + /* */ + dkc->cipher_string = kstrdup(argv[0], GFP_KERNEL); + if (!dkc->cipher_string) { + ti->error = "Out of memory"; + err = -ENOMEM; + goto bad; + } + cipher = lookup_cipher(dkc->cipher_string); + if (!cipher) { + ti->error = "Unsupported cipher"; + err = -EINVAL; + goto bad; + } + + /* */ + if (strlen(argv[1]) != 2 * cipher->key_size) { + ti->error = "Incorrect key size for cipher"; + err = -EINVAL; + goto bad; + } + if (hex2bin(raw_key, argv[1], cipher->key_size) != 0) { + ti->error = "Malformed key string"; + err = -EINVAL; + goto bad; + } + + /* */ + if (sscanf(argv[2], "%llu%c", &dkc->iv_offset, &dummy) != 1) { + ti->error = "Invalid iv_offset sector"; + err = -EINVAL; + goto bad; + } + + /* */ + err = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), + &dkc->dev); + if (err) { + ti->error = "Device lookup failed"; + goto bad; + } + + /* */ + if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || + tmpll != (sector_t)tmpll) { + ti->error = "Invalid start sector"; + err = -EINVAL; + goto bad; + } + dkc->start = tmpll; + + /* optional arguments */ + dkc->sector_size = SECTOR_SIZE; + if (argc > 5) { + err = default_key_ctr_optional(ti, argc - 5, &argv[5]); + if (err) + goto bad; + } + dkc->sector_bits = ilog2(dkc->sector_size); + if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) { + ti->error = "Device size is not a multiple of sector_size"; + err = -EINVAL; + goto bad; + } + + err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size, + cipher->mode_num, dkc->sector_size); + if (err) { + ti->error = "Error initializing blk-crypto key"; + goto bad; + } + + err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size, + dkc->dev->bdev->bd_queue); + if (err) { + ti->error = "Error starting to use blk-crypto"; + goto bad; + } + + ti->num_flush_bios = 1; + + ti->may_passthrough_inline_crypto = true; + + err = 0; + goto out; + +bad: + default_key_dtr(ti); +out: + memzero_explicit(raw_key, sizeof(raw_key)); + return err; +} + +static int default_key_map(struct dm_target *ti, struct bio *bio) +{ + const struct default_key_c *dkc = ti->private; + sector_t sector_in_target; + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE] = { 0 }; + + bio_set_dev(bio, dkc->dev->bdev); + + /* + * If the bio is a device-level request which doesn't target a specific + * sector, there's nothing more to do. + */ + if (bio_sectors(bio) == 0) + return DM_MAPIO_REMAPPED; + + /* Map the bio's sector to the underlying device. (512-byte sectors) */ + sector_in_target = dm_target_offset(ti, bio->bi_iter.bi_sector); + bio->bi_iter.bi_sector = dkc->start + sector_in_target; + + /* + * If the bio should skip dm-default-key (i.e. if it's for an encrypted + * file's contents), or if it doesn't have any data (e.g. if it's a + * DISCARD request), there's nothing more to do. + */ + if (bio_should_skip_dm_default_key(bio) || !bio_has_data(bio)) + return DM_MAPIO_REMAPPED; + + /* + * Else, dm-default-key needs to set this bio's encryption context. + * It must not already have one. + */ + if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) + return DM_MAPIO_KILL; + + /* Calculate the DUN and enforce data-unit (crypto sector) alignment. */ + dun[0] = dkc->iv_offset + sector_in_target; /* 512-byte sectors */ + if (dun[0] & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) + return DM_MAPIO_KILL; + dun[0] >>= dkc->sector_bits - SECTOR_SHIFT; /* crypto sectors */ + + bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO); + + return DM_MAPIO_REMAPPED; +} + +static void default_key_status(struct dm_target *ti, status_type_t type, + unsigned int status_flags, char *result, + unsigned int maxlen) +{ + const struct default_key_c *dkc = ti->private; + unsigned int sz = 0; + int num_feature_args = 0; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + /* Omit the key for now. */ + DMEMIT("%s - %llu %s %llu", dkc->cipher_string, dkc->iv_offset, + dkc->dev->name, (unsigned long long)dkc->start); + + num_feature_args += !!ti->num_discard_bios; + if (dkc->sector_size != SECTOR_SIZE) + num_feature_args += 2; + if (num_feature_args != 0) { + DMEMIT(" %d", num_feature_args); + if (ti->num_discard_bios) + DMEMIT(" allow_discards"); + if (dkc->sector_size != SECTOR_SIZE) { + DMEMIT(" sector_size:%u", dkc->sector_size); + DMEMIT(" iv_large_sectors"); + } + } + break; + } +} + +static int default_key_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev, + fmode_t *mode) +{ + const struct default_key_c *dkc = ti->private; + const struct dm_dev *dev = dkc->dev; + + *bdev = dev->bdev; + + /* Only pass ioctls through if the device sizes match exactly. */ + if (dkc->start != 0 || + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) + return 1; + return 0; +} + +static int default_key_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, + void *data) +{ + const struct default_key_c *dkc = ti->private; + + return fn(ti, dkc->dev, dkc->start, ti->len, data); +} + +static void default_key_io_hints(struct dm_target *ti, + struct queue_limits *limits) +{ + const struct default_key_c *dkc = ti->private; + const unsigned int sector_size = dkc->sector_size; + + limits->logical_block_size = + max_t(unsigned short, limits->logical_block_size, sector_size); + limits->physical_block_size = + max_t(unsigned int, limits->physical_block_size, sector_size); + limits->io_min = max_t(unsigned int, limits->io_min, sector_size); +} + +static struct target_type default_key_target = { + .name = "default-key", + .version = {2, 0, 0}, + .module = THIS_MODULE, + .ctr = default_key_ctr, + .dtr = default_key_dtr, + .map = default_key_map, + .status = default_key_status, + .prepare_ioctl = default_key_prepare_ioctl, + .iterate_devices = default_key_iterate_devices, + .io_hints = default_key_io_hints, +}; + +static int __init dm_default_key_init(void) +{ + return dm_register_target(&default_key_target); +} + +static void __exit dm_default_key_exit(void) +{ + dm_unregister_target(&default_key_target); +} + +module_init(dm_default_key_init); +module_exit(dm_default_key_exit); + +MODULE_AUTHOR("Paul Lawrence "); +MODULE_AUTHOR("Paul Crowley "); +MODULE_AUTHOR("Eric Biggers "); +MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index c06517031592..d1fbf3d8b4cc 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -61,6 +61,7 @@ int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->num_discard_bios = 1; ti->num_write_same_bios = 1; ti->num_write_zeroes_bios = 1; + ti->may_passthrough_inline_crypto = true; ti->private = lc; return 0; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 852350e3cfe7..f96075563620 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #define DM_MSG_PREFIX "table" @@ -1597,6 +1599,54 @@ static void dm_table_verify_integrity(struct dm_table *t) } } +#ifdef CONFIG_BLK_INLINE_ENCRYPTION +static int device_intersect_crypto_modes(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct keyslot_manager *parent = data; + struct keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm; + + keyslot_manager_intersect_modes(parent, child); + return 0; +} + +/* + * Update the inline crypto modes supported by 'q->ksm' to be the intersection + * of the modes supported by all targets in the table. + * + * For any mode to be supported at all, all targets must have explicitly + * declared that they can pass through inline crypto support. For a particular + * mode to be supported, all underlying devices must also support it. + * + * Assume that 'q->ksm' initially declares all modes to be supported. + */ +static void dm_calculate_supported_crypto_modes(struct dm_table *t, + struct request_queue *q) +{ + struct dm_target *ti; + unsigned int i; + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (!ti->may_passthrough_inline_crypto) { + keyslot_manager_intersect_modes(q->ksm, NULL); + return; + } + if (!ti->type->iterate_devices) + continue; + ti->type->iterate_devices(ti, device_intersect_crypto_modes, + q->ksm); + } +} +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ +static inline void dm_calculate_supported_crypto_modes(struct dm_table *t, + struct request_queue *q) +{ +} +#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ + static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1871,6 +1921,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, dm_table_verify_integrity(t); + dm_calculate_supported_crypto_modes(t, q); + /* * Some devices don't use blk_integrity but still want stable pages * because they do their own checksumming. diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 02ba6849f89d..0189f70e87a0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #define DM_MSG_PREFIX "core" @@ -1249,9 +1251,10 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, __bio_clone_fast(clone, bio); + bio_crypt_clone(clone, bio, GFP_NOIO); + if (unlikely(bio_integrity(bio) != NULL)) { int r; - if (unlikely(!dm_target_has_integrity(tio->ti->type) && !dm_target_passes_integrity(tio->ti->type))) { DMWARN("%s: the target %s doesn't support integrity data.", @@ -1661,6 +1664,8 @@ void dm_init_normal_md_queue(struct mapped_device *md) md->queue->backing_dev_info->congested_fn = dm_any_congested; } +static void dm_destroy_inline_encryption(struct request_queue *q); + static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) @@ -1685,8 +1690,10 @@ static void cleanup_mapped_device(struct mapped_device *md) put_disk(md->disk); } - if (md->queue) + if (md->queue) { + dm_destroy_inline_encryption(md->queue); blk_cleanup_queue(md->queue); + } cleanup_srcu_struct(&md->io_barrier); @@ -2035,6 +2042,89 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_get_queue_limits); +#ifdef CONFIG_BLK_INLINE_ENCRYPTION +struct dm_keyslot_evict_args { + const struct blk_crypto_key *key; + int err; +}; + +static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_keyslot_evict_args *args = data; + int err; + + err = blk_crypto_evict_key(dev->bdev->bd_queue, args->key); + if (!args->err) + args->err = err; + /* Always try to evict the key from all devices. */ + return 0; +} + +/* + * When an inline encryption key is evicted from a device-mapper device, evict + * it from all the underlying devices. + */ +static int dm_keyslot_evict(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, unsigned int slot) +{ + struct mapped_device *md = keyslot_manager_private(ksm); + struct dm_keyslot_evict_args args = { key }; + struct dm_table *t; + int srcu_idx; + int i; + struct dm_target *ti; + + t = dm_get_live_table(md, &srcu_idx); + if (!t) + return 0; + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + if (!ti->type->iterate_devices) + continue; + ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args); + } + dm_put_live_table(md, srcu_idx); + return args.err; +} + +static struct keyslot_mgmt_ll_ops dm_ksm_ll_ops = { + .keyslot_evict = dm_keyslot_evict, +}; + +static int dm_init_inline_encryption(struct mapped_device *md) +{ + unsigned int mode_masks[BLK_ENCRYPTION_MODE_MAX]; + + /* + * Start out with all crypto mode support bits set. Any unsupported + * bits will be cleared later when calculating the device restrictions. + */ + memset(mode_masks, 0xFF, sizeof(mode_masks)); + + md->queue->ksm = keyslot_manager_create_passthrough(&dm_ksm_ll_ops, + mode_masks, md); + if (!md->queue->ksm) + return -ENOMEM; + return 0; +} + +static void dm_destroy_inline_encryption(struct request_queue *q) +{ + keyslot_manager_destroy(q->ksm); + q->ksm = NULL; +} +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ +static inline int dm_init_inline_encryption(struct mapped_device *md) +{ + return 0; +} + +static inline void dm_destroy_inline_encryption(struct request_queue *q) +{ +} +#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ + /* * Setup the DM device's queue based on md's type */ @@ -2073,6 +2163,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) break; } + r = dm_init_inline_encryption(md); + if (r) { + DMERR("Cannot initialize inline encryption"); + return r; + } + return 0; } diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 8d4ef369aa15..e63ed53620d7 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -122,3 +122,12 @@ config SCSI_UFSHCD_CMD_LOGGING Select this if you want above mentioned debug information captured. If unsure, say N. + +config SCSI_UFS_CRYPTO + bool "UFS Crypto Engine Support" + depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION + help + Enable Crypto Engine Support in UFS. + Enabling this makes it possible for the kernel to use the crypto + capabilities of the UFS device (if present) to perform crypto + operations on data being transferred to/from the device. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index bf374ee1f6e2..93a2e1a10335 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -3,8 +3,10 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o -obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o +obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o +ufshcd-core-y := ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o obj-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufs-qcom-debugfs.o +ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index ff66f7c5893a..c93a6f5048d4 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1452,6 +1452,12 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) if (host->disable_lpm) hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + /* + * Inline crypto is currently broken with ufs-qcom at least because the + * device tree doesn't include the crypto registers. There are likely + * to be other issues that will need to be addressed too. + */ + //hba->quirks |= UFSHCD_QUIRK_BROKEN_CRYPTO; } static void ufs_qcom_set_caps(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c new file mode 100644 index 000000000000..276b49ad13be --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Google LLC + */ + +#include +#include "ufshcd.h" +#include "ufshcd-crypto.h" + +static bool ufshcd_cap_idx_valid(struct ufs_hba *hba, unsigned int cap_idx) +{ + return cap_idx < hba->crypto_capabilities.num_crypto_cap; +} + +static u8 get_data_unit_size_mask(unsigned int data_unit_size) +{ + if (data_unit_size < 512 || data_unit_size > 65536 || + !is_power_of_2(data_unit_size)) + return 0; + + return data_unit_size / 512; +} + +static size_t get_keysize_bytes(enum ufs_crypto_key_size size) +{ + switch (size) { + case UFS_CRYPTO_KEY_SIZE_128: + return 16; + case UFS_CRYPTO_KEY_SIZE_192: + return 24; + case UFS_CRYPTO_KEY_SIZE_256: + return 32; + case UFS_CRYPTO_KEY_SIZE_512: + return 64; + default: + return 0; + } +} + +int ufshcd_crypto_cap_find(struct ufs_hba *hba, + enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size) +{ + enum ufs_crypto_alg ufs_alg; + u8 data_unit_mask; + int cap_idx; + enum ufs_crypto_key_size ufs_key_size; + union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array; + + if (!ufshcd_hba_is_crypto_supported(hba)) + return -EINVAL; + + switch (crypto_mode) { + case BLK_ENCRYPTION_MODE_AES_256_XTS: + ufs_alg = UFS_CRYPTO_ALG_AES_XTS; + ufs_key_size = UFS_CRYPTO_KEY_SIZE_256; + break; + default: + return -EINVAL; + } + + data_unit_mask = get_data_unit_size_mask(data_unit_size); + + for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap; + cap_idx++) { + if (ccap_array[cap_idx].algorithm_id == ufs_alg && + (ccap_array[cap_idx].sdus_mask & data_unit_mask) && + ccap_array[cap_idx].key_size == ufs_key_size) + return cap_idx; + } + + return -EINVAL; +} +EXPORT_SYMBOL(ufshcd_crypto_cap_find); + +/** + * ufshcd_crypto_cfg_entry_write_key - Write a key into a crypto_cfg_entry + * + * Writes the key with the appropriate format - for AES_XTS, + * the first half of the key is copied as is, the second half is + * copied with an offset halfway into the cfg->crypto_key array. + * For the other supported crypto algs, the key is just copied. + * + * @cfg: The crypto config to write to + * @key: The key to write + * @cap: The crypto capability (which specifies the crypto alg and key size) + * + * Returns 0 on success, or -EINVAL + */ +static int ufshcd_crypto_cfg_entry_write_key(union ufs_crypto_cfg_entry *cfg, + const u8 *key, + union ufs_crypto_cap_entry cap) +{ + size_t key_size_bytes = get_keysize_bytes(cap.key_size); + + if (key_size_bytes == 0) + return -EINVAL; + + switch (cap.algorithm_id) { + case UFS_CRYPTO_ALG_AES_XTS: + key_size_bytes *= 2; + if (key_size_bytes > UFS_CRYPTO_KEY_MAX_SIZE) + return -EINVAL; + + memcpy(cfg->crypto_key, key, key_size_bytes/2); + memcpy(cfg->crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2, + key + key_size_bytes/2, key_size_bytes/2); + return 0; + case UFS_CRYPTO_ALG_BITLOCKER_AES_CBC: + /* fall through */ + case UFS_CRYPTO_ALG_AES_ECB: + /* fall through */ + case UFS_CRYPTO_ALG_ESSIV_AES_CBC: + memcpy(cfg->crypto_key, key, key_size_bytes); + return 0; + } + + return -EINVAL; +} + +static int ufshcd_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot) +{ + int i; + u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg); + int err; + + pm_runtime_get_sync(hba->dev); + ufshcd_hold(hba, false); + + if (hba->vops->program_key) { + err = hba->vops->program_key(hba, cfg, slot); + goto out; + } + + /* Clear the dword 16 */ + ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0])); + /* Ensure that CFGE is cleared before programming the key */ + wmb(); + for (i = 0; i < 16; i++) { + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[i]), + slot_offset + i * sizeof(cfg->reg_val[0])); + /* Spec says each dword in key must be written sequentially */ + wmb(); + } + /* Write dword 17 */ + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[17]), + slot_offset + 17 * sizeof(cfg->reg_val[0])); + /* Dword 16 must be written last */ + wmb(); + /* Write dword 16 */ + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]), + slot_offset + 16 * sizeof(cfg->reg_val[0])); + wmb(); + err = 0; +out: + ufshcd_release(hba); + pm_runtime_put_sync(hba->dev); + return err; +} + +static void ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) +{ + union ufs_crypto_cfg_entry cfg = { 0 }; + int err; + + err = ufshcd_program_key(hba, &cfg, slot); + WARN_ON_ONCE(err); +} + +/* Clear all keyslots at driver init time */ +static void ufshcd_clear_all_keyslots(struct ufs_hba *hba) +{ + int slot; + + for (slot = 0; slot < ufshcd_num_keyslots(hba); slot++) + ufshcd_clear_keyslot(hba, slot); +} + +static int ufshcd_crypto_keyslot_program(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = keyslot_manager_private(ksm); + int err = 0; + u8 data_unit_mask; + union ufs_crypto_cfg_entry cfg; + int cap_idx; + + cap_idx = ufshcd_crypto_cap_find(hba, key->crypto_mode, + key->data_unit_size); + + if (!ufshcd_is_crypto_enabled(hba) || + !ufshcd_keyslot_valid(hba, slot) || + !ufshcd_cap_idx_valid(hba, cap_idx)) + return -EINVAL; + + data_unit_mask = get_data_unit_size_mask(key->data_unit_size); + + if (!(data_unit_mask & hba->crypto_cap_array[cap_idx].sdus_mask)) + return -EINVAL; + + memset(&cfg, 0, sizeof(cfg)); + cfg.data_unit_size = data_unit_mask; + cfg.crypto_cap_idx = cap_idx; + cfg.config_enable |= UFS_CRYPTO_CONFIGURATION_ENABLE; + + err = ufshcd_crypto_cfg_entry_write_key(&cfg, key->raw, + hba->crypto_cap_array[cap_idx]); + if (err) + return err; + + err = ufshcd_program_key(hba, &cfg, slot); + + memzero_explicit(&cfg, sizeof(cfg)); + + return err; +} + +static int ufshcd_crypto_keyslot_evict(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = keyslot_manager_private(ksm); + + if (!ufshcd_is_crypto_enabled(hba) || + !ufshcd_keyslot_valid(hba, slot)) + return -EINVAL; + + /* + * Clear the crypto cfg on the device. Clearing CFGE + * might not be sufficient, so just clear the entire cfg. + */ + ufshcd_clear_keyslot(hba, slot); + + return 0; +} + +/* Functions implementing UFSHCI v2.1 specification behaviour */ +void ufshcd_crypto_enable_spec(struct ufs_hba *hba) +{ + if (!ufshcd_hba_is_crypto_supported(hba)) + return; + + hba->caps |= UFSHCD_CAP_CRYPTO; + + /* Reset might clear all keys, so reprogram all the keys. */ + keyslot_manager_reprogram_all_keys(hba->ksm); +} +EXPORT_SYMBOL_GPL(ufshcd_crypto_enable_spec); + +void ufshcd_crypto_disable_spec(struct ufs_hba *hba) +{ + hba->caps &= ~UFSHCD_CAP_CRYPTO; +} +EXPORT_SYMBOL_GPL(ufshcd_crypto_disable_spec); + +static const struct keyslot_mgmt_ll_ops ufshcd_ksm_ops = { + .keyslot_program = ufshcd_crypto_keyslot_program, + .keyslot_evict = ufshcd_crypto_keyslot_evict, +}; + +enum blk_crypto_mode_num ufshcd_blk_crypto_mode_num_for_alg_dusize( + enum ufs_crypto_alg ufs_crypto_alg, + enum ufs_crypto_key_size key_size) +{ + /* + * This is currently the only mode that UFS and blk-crypto both support. + */ + if (ufs_crypto_alg == UFS_CRYPTO_ALG_AES_XTS && + key_size == UFS_CRYPTO_KEY_SIZE_256) + return BLK_ENCRYPTION_MODE_AES_256_XTS; + + return BLK_ENCRYPTION_MODE_INVALID; +} + +/** + * ufshcd_hba_init_crypto - Read crypto capabilities, init crypto fields in hba + * @hba: Per adapter instance + * + * Return: 0 if crypto was initialized or is not supported, else a -errno value. + */ +int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba, + const struct keyslot_mgmt_ll_ops *ksm_ops) +{ + int cap_idx = 0; + int err = 0; + unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; + enum blk_crypto_mode_num blk_mode_num; + + /* Default to disabling crypto */ + hba->caps &= ~UFSHCD_CAP_CRYPTO; + + /* Return 0 if crypto support isn't present */ + if (!(hba->capabilities & MASK_CRYPTO_SUPPORT) || + (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO)) + goto out; + + /* + * Crypto Capabilities should never be 0, because the + * config_array_ptr > 04h. So we use a 0 value to indicate that + * crypto init failed, and can't be enabled. + */ + hba->crypto_capabilities.reg_val = + cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP)); + hba->crypto_cfg_register = + (u32)hba->crypto_capabilities.config_array_ptr * 0x100; + hba->crypto_cap_array = + devm_kcalloc(hba->dev, + hba->crypto_capabilities.num_crypto_cap, + sizeof(hba->crypto_cap_array[0]), + GFP_KERNEL); + if (!hba->crypto_cap_array) { + err = -ENOMEM; + goto out; + } + + memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported)); + /* + * Store all the capabilities now so that we don't need to repeatedly + * access the device each time we want to know its capabilities + */ + for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap; + cap_idx++) { + hba->crypto_cap_array[cap_idx].reg_val = + cpu_to_le32(ufshcd_readl(hba, + REG_UFS_CRYPTOCAP + + cap_idx * sizeof(__le32))); + blk_mode_num = ufshcd_blk_crypto_mode_num_for_alg_dusize( + hba->crypto_cap_array[cap_idx].algorithm_id, + hba->crypto_cap_array[cap_idx].key_size); + if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID) + continue; + crypto_modes_supported[blk_mode_num] |= + hba->crypto_cap_array[cap_idx].sdus_mask * 512; + } + + ufshcd_clear_all_keyslots(hba); + + hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), ksm_ops, + crypto_modes_supported, hba); + + if (!hba->ksm) { + err = -ENOMEM; + goto out_free_caps; + } + + return 0; + +out_free_caps: + devm_kfree(hba->dev, hba->crypto_cap_array); +out: + /* Indicate that init failed by setting crypto_capabilities to 0 */ + hba->crypto_capabilities.reg_val = 0; + return err; +} +EXPORT_SYMBOL_GPL(ufshcd_hba_init_crypto_spec); + +void ufshcd_crypto_setup_rq_keyslot_manager_spec(struct ufs_hba *hba, + struct request_queue *q) +{ + if (!ufshcd_hba_is_crypto_supported(hba) || !q) + return; + + q->ksm = hba->ksm; +} +EXPORT_SYMBOL_GPL(ufshcd_crypto_setup_rq_keyslot_manager_spec); + +void ufshcd_crypto_destroy_rq_keyslot_manager_spec(struct ufs_hba *hba, + struct request_queue *q) +{ + keyslot_manager_destroy(hba->ksm); +} +EXPORT_SYMBOL_GPL(ufshcd_crypto_destroy_rq_keyslot_manager_spec); + +int ufshcd_prepare_lrbp_crypto_spec(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp) +{ + struct bio_crypt_ctx *bc; + + if (!bio_crypt_should_process(cmd->request)) { + lrbp->crypto_enable = false; + return 0; + } + bc = cmd->request->bio->bi_crypt_context; + + if (WARN_ON(!ufshcd_is_crypto_enabled(hba))) { + /* + * Upper layer asked us to do inline encryption + * but that isn't enabled, so we fail this request. + */ + return -EINVAL; + } + if (!ufshcd_keyslot_valid(hba, bc->bc_keyslot)) + return -EINVAL; + + lrbp->crypto_enable = true; + lrbp->crypto_key_slot = bc->bc_keyslot; + lrbp->data_unit_num = bc->bc_dun[0]; + + return 0; +} +EXPORT_SYMBOL_GPL(ufshcd_prepare_lrbp_crypto_spec); + +/* Crypto Variant Ops Support */ + +void ufshcd_crypto_enable(struct ufs_hba *hba) +{ + if (hba->crypto_vops && hba->crypto_vops->enable) + return hba->crypto_vops->enable(hba); + + return ufshcd_crypto_enable_spec(hba); +} + +void ufshcd_crypto_disable(struct ufs_hba *hba) +{ + if (hba->crypto_vops && hba->crypto_vops->disable) + return hba->crypto_vops->disable(hba); + + return ufshcd_crypto_disable_spec(hba); +} + +int ufshcd_hba_init_crypto(struct ufs_hba *hba) +{ + if (hba->crypto_vops && hba->crypto_vops->hba_init_crypto) + return hba->crypto_vops->hba_init_crypto(hba, + &ufshcd_ksm_ops); + + return ufshcd_hba_init_crypto_spec(hba, &ufshcd_ksm_ops); +} + +void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q) +{ + if (hba->crypto_vops && hba->crypto_vops->setup_rq_keyslot_manager) + return hba->crypto_vops->setup_rq_keyslot_manager(hba, q); + + return ufshcd_crypto_setup_rq_keyslot_manager_spec(hba, q); +} + +void ufshcd_crypto_destroy_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q) +{ + if (hba->crypto_vops && hba->crypto_vops->destroy_rq_keyslot_manager) + return hba->crypto_vops->destroy_rq_keyslot_manager(hba, q); + + return ufshcd_crypto_destroy_rq_keyslot_manager_spec(hba, q); +} + +int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp) +{ + if (hba->crypto_vops && hba->crypto_vops->prepare_lrbp_crypto) + return hba->crypto_vops->prepare_lrbp_crypto(hba, cmd, lrbp); + + return ufshcd_prepare_lrbp_crypto_spec(hba, cmd, lrbp); +} + +int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp) +{ + if (hba->crypto_vops && hba->crypto_vops->complete_lrbp_crypto) + return hba->crypto_vops->complete_lrbp_crypto(hba, cmd, lrbp); + + return 0; +} + +void ufshcd_crypto_debug(struct ufs_hba *hba) +{ + if (hba->crypto_vops && hba->crypto_vops->debug) + hba->crypto_vops->debug(hba); +} + +int ufshcd_crypto_suspend(struct ufs_hba *hba, + enum ufs_pm_op pm_op) +{ + if (hba->crypto_vops && hba->crypto_vops->suspend) + return hba->crypto_vops->suspend(hba, pm_op); + + return 0; +} + +int ufshcd_crypto_resume(struct ufs_hba *hba, + enum ufs_pm_op pm_op) +{ + if (hba->crypto_vops && hba->crypto_vops->resume) + return hba->crypto_vops->resume(hba, pm_op); + + return 0; +} + +void ufshcd_crypto_set_vops(struct ufs_hba *hba, + struct ufs_hba_crypto_variant_ops *crypto_vops) +{ + hba->crypto_vops = crypto_vops; +} diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h new file mode 100644 index 000000000000..95f37c9f7672 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-crypto.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef _UFSHCD_CRYPTO_H +#define _UFSHCD_CRYPTO_H + +#ifdef CONFIG_SCSI_UFS_CRYPTO +#include +#include "ufshcd.h" +#include "ufshci.h" + +static inline int ufshcd_num_keyslots(struct ufs_hba *hba) +{ + return hba->crypto_capabilities.config_count + 1; +} + +static inline bool ufshcd_keyslot_valid(struct ufs_hba *hba, unsigned int slot) +{ + /* + * The actual number of configurations supported is (CFGC+1), so slot + * numbers range from 0 to config_count inclusive. + */ + return slot < ufshcd_num_keyslots(hba); +} + +static inline bool ufshcd_hba_is_crypto_supported(struct ufs_hba *hba) +{ + return hba->crypto_capabilities.reg_val != 0; +} + +static inline bool ufshcd_is_crypto_enabled(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_CRYPTO; +} + +/* Functions implementing UFSHCI v2.1 specification behaviour */ +int ufshcd_crypto_cap_find(struct ufs_hba *hba, + enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size); + +int ufshcd_prepare_lrbp_crypto_spec(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp); + +void ufshcd_crypto_enable_spec(struct ufs_hba *hba); + +void ufshcd_crypto_disable_spec(struct ufs_hba *hba); + +struct keyslot_mgmt_ll_ops; +int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba, + const struct keyslot_mgmt_ll_ops *ksm_ops); + +void ufshcd_crypto_setup_rq_keyslot_manager_spec(struct ufs_hba *hba, + struct request_queue *q); + +void ufshcd_crypto_destroy_rq_keyslot_manager_spec(struct ufs_hba *hba, + struct request_queue *q); + +static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp) +{ + return lrbp->crypto_enable; +} + +/* Crypto Variant Ops Support */ +void ufshcd_crypto_enable(struct ufs_hba *hba); + +void ufshcd_crypto_disable(struct ufs_hba *hba); + +int ufshcd_hba_init_crypto(struct ufs_hba *hba); + +void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q); + +void ufshcd_crypto_destroy_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q); + +int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp); + +int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp); + +void ufshcd_crypto_debug(struct ufs_hba *hba); + +int ufshcd_crypto_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op); + +int ufshcd_crypto_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op); + +void ufshcd_crypto_set_vops(struct ufs_hba *hba, + struct ufs_hba_crypto_variant_ops *crypto_vops); + +#else /* CONFIG_SCSI_UFS_CRYPTO */ + +static inline bool ufshcd_keyslot_valid(struct ufs_hba *hba, + unsigned int slot) +{ + return false; +} + +static inline bool ufshcd_hba_is_crypto_supported(struct ufs_hba *hba) +{ + return false; +} + +static inline bool ufshcd_is_crypto_enabled(struct ufs_hba *hba) +{ + return false; +} + +static inline void ufshcd_crypto_enable(struct ufs_hba *hba) { } + +static inline void ufshcd_crypto_disable(struct ufs_hba *hba) { } + +static inline int ufshcd_hba_init_crypto(struct ufs_hba *hba) +{ + return 0; +} + +static inline void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q) { } + +static inline void ufshcd_crypto_destroy_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q) { } + +static inline int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp) +{ + return 0; +} + +static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp) +{ + return false; +} + +static inline int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp) +{ + return 0; +} + +static inline void ufshcd_crypto_debug(struct ufs_hba *hba) { } + +static inline int ufshcd_crypto_suspend(struct ufs_hba *hba, + enum ufs_pm_op pm_op) +{ + return 0; +} + +static inline int ufshcd_crypto_resume(struct ufs_hba *hba, + enum ufs_pm_op pm_op) +{ + return 0; +} + +static inline void ufshcd_crypto_set_vops(struct ufs_hba *hba, + struct ufs_hba_crypto_variant_ops *crypto_vops) { } + +#endif /* CONFIG_SCSI_UFS_CRYPTO */ + +#endif /* _UFSHCD_CRYPTO_H */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index d125c70bfe72..55f2f1645c1f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -197,6 +197,7 @@ static void ufshcd_update_uic_error_cnt(struct ufs_hba *hba, u32 reg, int type) break; } } +#include "ufshcd-crypto.h" #define CREATE_TRACE_POINTS #include @@ -918,6 +919,8 @@ static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep) static void ufshcd_print_host_regs(struct ufs_hba *hba) { __ufshcd_print_host_regs(hba, false); + + ufshcd_crypto_debug(hba); } static @@ -1409,6 +1412,11 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba) { u32 val = CONTROLLER_ENABLE; + if (ufshcd_hba_is_crypto_supported(hba)) { + ufshcd_crypto_enable(hba); + val |= CRYPTO_GENERAL_ENABLE; + } + ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); } @@ -3391,9 +3399,23 @@ static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, dword_0 |= UTP_REQ_DESC_INT_CMD; /* Transfer request descriptor header fields */ + if (ufshcd_lrbp_crypto_enabled(lrbp)) { +#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO) + dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD; + dword_0 |= lrbp->crypto_key_slot; + req_desc->header.dword_1 = + cpu_to_le32(lower_32_bits(lrbp->data_unit_num)); + req_desc->header.dword_3 = + cpu_to_le32(upper_32_bits(lrbp->data_unit_num)); +#endif /* CONFIG_SCSI_UFS_CRYPTO */ + } else { + /* dword_1 and dword_3 are reserved, hence they are set to 0 */ + req_desc->header.dword_1 = 0; + req_desc->header.dword_3 = 0; + } + req_desc->header.dword_0 = cpu_to_le32(dword_0); - /* dword_1 is reserved, hence it is set to 0 */ - req_desc->header.dword_1 = 0; + /* * assigning invalid value for command status. Controller * updates OCS on command completion, with the command @@ -3401,8 +3423,6 @@ static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, */ req_desc->header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); - /* dword_3 is reserved, hence it is set to 0 */ - req_desc->header.dword_3 = 0; req_desc->prd_table_length = 0; @@ -3780,6 +3800,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->task_tag = tag; lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; + + err = ufshcd_prepare_lrbp_crypto(hba, cmd, lrbp); + if (err) { + lrbp->cmd = NULL; + clear_bit_unlock(tag, &hba->lrb_in_use); + goto out; + } lrbp->req_abort_skip = false; err = ufshcd_comp_scsi_upiu(hba, lrbp); @@ -3843,6 +3870,9 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, lrbp->task_tag = tag; lrbp->lun = 0; /* device management cmd is not specific to any LUN */ lrbp->intr_cmd = true; /* No interrupt aggregation */ +#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO) + lrbp->crypto_enable = false; /* No crypto operations */ +#endif hba->dev_cmd.type = cmd_type; return ufshcd_comp_devman_upiu(hba, lrbp); @@ -5696,6 +5726,8 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep) { int err; + ufshcd_crypto_disable(hba); + ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE, CONTROLLER_DISABLE, @@ -6067,8 +6099,8 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) */ static int ufshcd_slave_configure(struct scsi_device *sdev) { - struct request_queue *q = sdev->request_queue; struct ufs_hba *hba = shost_priv(sdev->host); + struct request_queue *q = sdev->request_queue; blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX); @@ -6080,6 +6112,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS; sdev->use_rpm_auto = 1; + ufshcd_crypto_setup_rq_keyslot_manager(hba, q); return 0; } @@ -6091,6 +6124,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) static void ufshcd_slave_destroy(struct scsi_device *sdev) { struct ufs_hba *hba; + struct request_queue *q = sdev->request_queue; hba = shost_priv(sdev->host); /* Drop the reference as it won't be needed anymore */ @@ -6101,6 +6135,8 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) hba->sdev_ufs_device = NULL; spin_unlock_irqrestore(hba->host->host_lock, flags); } + + ufshcd_crypto_destroy_rq_keyslot_manager(hba, q); } /** @@ -6376,6 +6412,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, clear_bit_unlock(index, &hba->lrb_in_use); lrbp->complete_time_stamp = ktime_get(); update_req_stats(hba, lrbp); + ufshcd_complete_lrbp_crypto(hba, cmd, lrbp); /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL; @@ -10105,6 +10142,10 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) req_link_state = UIC_LINK_OFF_STATE; } + ret = ufshcd_crypto_suspend(hba, pm_op); + if (ret) + goto out; + /* * If we can't transition into any of the low power modes * just gate the clocks. @@ -10225,6 +10266,7 @@ enable_gating: hba->hibern8_on_idle.is_suspended = false; hba->clk_gating.is_suspended = false; ufshcd_release_all(hba); + ufshcd_crypto_resume(hba, pm_op); out: hba->pm_op_in_progress = 0; @@ -10248,9 +10290,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { int ret; enum uic_link_state old_link_state; + enum ufs_dev_pwr_mode old_pwr_mode; hba->pm_op_in_progress = 1; old_link_state = hba->uic_link_state; + old_pwr_mode = hba->curr_dev_pwr_mode; ufshcd_hba_vreg_set_hpm(hba); /* Make sure clocks are enabled before accessing controller */ @@ -10327,6 +10371,10 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto set_old_link_state; } + ret = ufshcd_crypto_resume(hba, pm_op); + if (ret) + goto set_old_dev_pwr_mode; + if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) ufshcd_enable_auto_bkops(hba); else @@ -10347,6 +10395,9 @@ skip_dev_ops: ufshcd_release_all(hba); goto out; +set_old_dev_pwr_mode: + if (old_pwr_mode != hba->curr_dev_pwr_mode) + ufshcd_set_dev_pwr_mode(hba, old_pwr_mode); set_old_link_state: ufshcd_link_state_transition(hba, old_link_state, 0); if (ufshcd_is_link_hibern8(hba) && @@ -11177,6 +11228,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) if (hba->force_g4) hba->reinit_g4_rate_A = true; + /* Init crypto */ + err = ufshcd_hba_init_crypto(hba); + if (err) { + dev_err(hba->dev, "crypto setup failed\n"); + goto out_remove_scsi_host; + } /* Host controller enable */ err = ufshcd_hba_enable(hba); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index a51cc94ad603..790e2be33995 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -197,6 +197,9 @@ struct ufs_pm_lvl_states { * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) * @issue_time_stamp: time stamp for debug purposes * @complete_time_stamp: time stamp for statistics + * @crypto_enable: whether or not the request needs inline crypto operations + * @crypto_key_slot: the key slot to use for inline crypto + * @data_unit_num: the data unit number for the first block for inline crypto * @req_abort_skip: skip request abort task flag */ struct ufshcd_lrb { @@ -221,6 +224,11 @@ struct ufshcd_lrb { bool intr_cmd; ktime_t issue_time_stamp; ktime_t complete_time_stamp; +#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO) + bool crypto_enable; + u8 crypto_key_slot; + u64 data_unit_num; +#endif /* CONFIG_SCSI_UFS_CRYPTO */ bool req_abort_skip; }; @@ -302,6 +310,8 @@ struct ufs_pwr_mode_info { struct ufs_pa_layer_attr info; }; +union ufs_crypto_cfg_entry; + /** * struct ufs_hba_variant_ops - variant specific callbacks * @init: called when the driver is initialized @@ -332,6 +342,7 @@ struct ufs_pwr_mode_info { * scale down * @set_bus_vote: called to vote for the required bus bandwidth * @phy_initialization: used to initialize phys + * @program_key: program an inline encryption key into a keyslot */ struct ufs_hba_variant_ops { int (*init)(struct ufs_hba *); @@ -367,6 +378,8 @@ struct ufs_hba_variant_ops { void (*add_debugfs)(struct ufs_hba *hba, struct dentry *root); void (*remove_debugfs)(struct ufs_hba *hba); #endif + int (*program_key)(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot); }; /** @@ -388,6 +401,28 @@ struct ufs_hba_variant { struct ufs_hba_pm_qos_variant_ops *pm_qos_vops; }; +struct keyslot_mgmt_ll_ops; +struct ufs_hba_crypto_variant_ops { + void (*setup_rq_keyslot_manager)(struct ufs_hba *hba, + struct request_queue *q); + void (*destroy_rq_keyslot_manager)(struct ufs_hba *hba, + struct request_queue *q); + int (*hba_init_crypto)(struct ufs_hba *hba, + const struct keyslot_mgmt_ll_ops *ksm_ops); + void (*enable)(struct ufs_hba *hba); + void (*disable)(struct ufs_hba *hba); + int (*suspend)(struct ufs_hba *hba, enum ufs_pm_op pm_op); + int (*resume)(struct ufs_hba *hba, enum ufs_pm_op pm_op); + int (*debug)(struct ufs_hba *hba); + int (*prepare_lrbp_crypto)(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp); + int (*complete_lrbp_crypto)(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp); + void *priv; +}; + /* clock gating state */ enum clk_gating_state { CLKS_OFF, @@ -749,6 +784,10 @@ enum ufshcd_card_state { * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for * device is known or not. * @scsi_block_reqs_cnt: reference counting for scsi block requests + * @crypto_capabilities: Content of crypto capabilities register (0x100) + * @crypto_cap_array: Array of crypto capabilities + * @crypto_cfg_register: Start of the crypto cfg array + * @ksm: the keyslot manager tied to this hba */ struct ufs_hba { void __iomem *mmio_base; @@ -794,6 +833,7 @@ struct ufs_hba { struct ufs_hba_variant *var; void *priv; size_t sg_entry_size; + const struct ufs_hba_crypto_variant_ops *crypto_vops; unsigned int irq; bool is_irq_enabled; bool crash_on_err; @@ -883,6 +923,12 @@ struct ufs_hba { /* Auto hibern8 support is broken */ #define UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 UFS_BIT(15) + /* + * This quirk needs to be enabled if the host controller advertises + * inline encryption support but it doesn't work correctly. + */ + #define UFSHCD_QUIRK_BROKEN_CRYPTO UFS_BIT(16) + unsigned int quirks; /* Deviations from standard UFSHCI spec. */ wait_queue_head_t tm_wq; @@ -995,6 +1041,11 @@ struct ufs_hba { * in hibern8 then enable this cap. */ #define UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8 (1 << 7) + /* + * This capability allows the host controller driver to use the + * inline crypto engine, if it is present + */ +#define UFSHCD_CAP_CRYPTO (1 << 8) struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; @@ -1027,6 +1078,14 @@ struct ufs_hba { bool force_g4; /* distinguish between resume and restore */ bool restore; + +#ifdef CONFIG_SCSI_UFS_CRYPTO + /* crypto */ + union ufs_crypto_capabilities crypto_capabilities; + union ufs_crypto_cap_entry *crypto_cap_array; + u32 crypto_cfg_register; + struct keyslot_manager *ksm; +#endif /* CONFIG_SCSI_UFS_CRYPTO */ }; static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 91f852764932..764662fc685d 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -347,6 +347,61 @@ enum { INTERRUPT_MASK_ALL_VER_21 = 0x71FFF, }; +/* CCAP - Crypto Capability 100h */ +union ufs_crypto_capabilities { + __le32 reg_val; + struct { + u8 num_crypto_cap; + u8 config_count; + u8 reserved; + u8 config_array_ptr; + }; +}; + +enum ufs_crypto_key_size { + UFS_CRYPTO_KEY_SIZE_INVALID = 0x0, + UFS_CRYPTO_KEY_SIZE_128 = 0x1, + UFS_CRYPTO_KEY_SIZE_192 = 0x2, + UFS_CRYPTO_KEY_SIZE_256 = 0x3, + UFS_CRYPTO_KEY_SIZE_512 = 0x4, +}; + +enum ufs_crypto_alg { + UFS_CRYPTO_ALG_AES_XTS = 0x0, + UFS_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1, + UFS_CRYPTO_ALG_AES_ECB = 0x2, + UFS_CRYPTO_ALG_ESSIV_AES_CBC = 0x3, +}; + +/* x-CRYPTOCAP - Crypto Capability X */ +union ufs_crypto_cap_entry { + __le32 reg_val; + struct { + u8 algorithm_id; + u8 sdus_mask; /* Supported data unit size mask */ + u8 key_size; + u8 reserved; + }; +}; + +#define UFS_CRYPTO_CONFIGURATION_ENABLE (1 << 7) +#define UFS_CRYPTO_KEY_MAX_SIZE 64 +/* x-CRYPTOCFG - Crypto Configuration X */ +union ufs_crypto_cfg_entry { + __le32 reg_val[32]; + struct { + u8 crypto_key[UFS_CRYPTO_KEY_MAX_SIZE]; + u8 data_unit_size; + u8 crypto_cap_idx; + u8 reserved_1; + u8 config_enable; + u8 reserved_multi_host; + u8 reserved_2; + u8 vsb[2]; + u8 reserved_3[56]; + }; +}; + /* * Request Descriptor Definitions */ @@ -368,6 +423,7 @@ enum { UTP_NATIVE_UFS_COMMAND = 0x10000000, UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000, UTP_REQ_DESC_INT_CMD = 0x01000000, + UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000, }; /* UTP Transfer Request Data Direction (DD) */ diff --git a/fs/buffer.c b/fs/buffer.c index 758b6056ad91..5c85f4ef66bd 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -46,6 +46,7 @@ #include #include #include +#include static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, @@ -3172,6 +3173,8 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, */ bio = bio_alloc(GFP_NOIO, 1); + fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); + if (wbc) { wbc_init_bio(wbc, bio); wbc_account_io(wbc, bh->b_page, bh->b_size); diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index 4bc66f2c571e..0701bb90f99c 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig @@ -15,3 +15,9 @@ config FS_ENCRYPTION efficient since it avoids caching the encrypted and decrypted pages in the page cache. Currently Ext4, F2FS and UBIFS make use of this feature. + +config FS_ENCRYPTION_INLINE_CRYPT + bool "Enable fscrypt to use inline crypto" + depends on FS_ENCRYPTION && BLK_INLINE_ENCRYPTION + help + Enable fscrypt to use inline encryption hardware if available. diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile index 0a78543f6cec..1a6b0774f3ff 100644 --- a/fs/crypto/Makefile +++ b/fs/crypto/Makefile @@ -10,3 +10,4 @@ fscrypto-y := crypto.o \ policy.o fscrypto-$(CONFIG_BLOCK) += bio.o +fscrypto-$(CONFIG_FS_ENCRYPTION_INLINE_CRYPT) += inline_crypt.o diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 699bb4d426f2..9601e4bfc004 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -46,26 +46,35 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, { const unsigned int blockbits = inode->i_blkbits; const unsigned int blocksize = 1 << blockbits; + const bool inlinecrypt = fscrypt_inode_uses_inline_crypto(inode); struct page *ciphertext_page; struct bio *bio; int ret, err = 0; - ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT); - if (!ciphertext_page) - return -ENOMEM; + if (inlinecrypt) { + ciphertext_page = ZERO_PAGE(0); + } else { + ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT); + if (!ciphertext_page) + return -ENOMEM; + } while (len--) { - err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, - ZERO_PAGE(0), ciphertext_page, - blocksize, 0, GFP_NOFS); - if (err) - goto errout; + if (!inlinecrypt) { + err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, + ZERO_PAGE(0), ciphertext_page, + blocksize, 0, GFP_NOFS); + if (err) + goto errout; + } bio = bio_alloc(GFP_NOWAIT, 1); if (!bio) { err = -ENOMEM; goto errout; } + fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOIO); + bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - 9); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -87,7 +96,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, } err = 0; errout: - fscrypt_free_bounce_page(ciphertext_page); + if (!inlinecrypt) + fscrypt_free_bounce_page(ciphertext_page); return err; } EXPORT_SYMBOL(fscrypt_zeroout_range); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 6e6f39ea18a7..41b4fe15b4b6 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -96,7 +96,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, DECLARE_CRYPTO_WAIT(wait); struct scatterlist dst, src; struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_key.tfm; int res = 0; if (WARN_ON_ONCE(len <= 0)) diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 3da3707c10e3..3aafddaab703 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -40,7 +40,7 @@ int fname_encrypt(struct inode *inode, const struct qstr *iname, struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_key.tfm; union fscrypt_iv iv; struct scatterlist sg; int res; @@ -93,7 +93,7 @@ static int fname_decrypt(struct inode *inode, DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_key.tfm; union fscrypt_iv iv; int res; diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 94da6bad5f19..739d8a9d24f5 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -13,12 +13,14 @@ #include #include +#include #define CONST_STRLEN(str) (sizeof(str) - 1) #define FS_KEY_DERIVATION_NONCE_SIZE 16 #define FSCRYPT_MIN_KEY_SIZE 16 +#define FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE 128 #define FSCRYPT_CONTEXT_V1 1 #define FSCRYPT_CONTEXT_V2 2 @@ -151,6 +153,20 @@ struct fscrypt_symlink_data { char encrypted_path[1]; } __packed; +/** + * struct fscrypt_prepared_key - a key prepared for actual encryption/decryption + * @tfm: crypto API transform object + * @blk_key: key for blk-crypto + * + * Normally only one of the fields will be non-NULL. + */ +struct fscrypt_prepared_key { + struct crypto_skcipher *tfm; +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + struct fscrypt_blk_crypto_key *blk_key; +#endif +}; + /* * fscrypt_info - the "encryption key" for an inode * @@ -159,15 +175,20 @@ struct fscrypt_symlink_data { * inode is evicted. */ struct fscrypt_info { - /* The actual crypto transform used for encryption and decryption */ - u8 ci_data_mode; - u8 ci_filename_mode; - u8 ci_flags; - struct crypto_skcipher *ci_ctfm; + /* The key in a form prepared for actual encryption/decryption */ + struct fscrypt_prepared_key ci_key; /* True if the key should be freed when this fscrypt_info is freed */ bool ci_owns_key; +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + /* + * True if this inode will use inline encryption (blk-crypto) instead of + * the traditional filesystem-layer encryption. + */ + bool ci_inlinecrypt; +#endif + /* * Encryption mode used for this inode. It corresponds to either the * contents or filenames encryption mode, depending on the inode type. @@ -192,7 +213,7 @@ struct fscrypt_info { /* * If non-NULL, then encryption is done using the master key directly - * and ci_ctfm will equal ci_direct_key->dk_ctfm. + * and ci_key will equal ci_direct_key->dk_key. */ struct fscrypt_direct_key *ci_direct_key; @@ -257,6 +278,7 @@ union fscrypt_iv { u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; }; u8 raw[FSCRYPT_MAX_IV_SIZE]; + __le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)]; }; void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, @@ -296,6 +318,94 @@ extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context, extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); +/* inline_crypt.c */ +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT +extern void fscrypt_select_encryption_impl(struct fscrypt_info *ci); + +static inline bool +fscrypt_using_inline_encryption(const struct fscrypt_info *ci) +{ + return ci->ci_inlinecrypt; +} + +extern int fscrypt_prepare_inline_crypt_key( + struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + unsigned int raw_key_size, + const struct fscrypt_info *ci); + +extern void fscrypt_destroy_inline_crypt_key( + struct fscrypt_prepared_key *prep_key); + +extern int fscrypt_derive_raw_secret(struct super_block *sb, + const u8 *wrapped_key, + unsigned int wrapped_key_size, + u8 *raw_secret, + unsigned int raw_secret_size); + +/* + * Check whether the crypto transform or blk-crypto key has been allocated in + * @prep_key, depending on which encryption implementation the file will use. + */ +static inline bool +fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, + const struct fscrypt_info *ci) +{ + /* + * The READ_ONCE() here pairs with the smp_store_release() in + * fscrypt_prepare_key(). (This only matters for the per-mode keys, + * which are shared by multiple inodes.) + */ + if (fscrypt_using_inline_encryption(ci)) + return READ_ONCE(prep_key->blk_key) != NULL; + return READ_ONCE(prep_key->tfm) != NULL; +} + +#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +static inline void fscrypt_select_encryption_impl(struct fscrypt_info *ci) +{ +} + +static inline bool fscrypt_using_inline_encryption( + const struct fscrypt_info *ci) +{ + return false; +} + +static inline int +fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, unsigned int raw_key_size, + const struct fscrypt_info *ci) +{ + WARN_ON(1); + return -EOPNOTSUPP; +} + +static inline void +fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) +{ +} + +static inline int fscrypt_derive_raw_secret(struct super_block *sb, + const u8 *wrapped_key, + unsigned int wrapped_key_size, + u8 *raw_secret, + unsigned int raw_secret_size) +{ + fscrypt_warn(NULL, + "kernel built without support for hardware-wrapped keys"); + return -EOPNOTSUPP; +} + +static inline bool +fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, + const struct fscrypt_info *ci) +{ + return READ_ONCE(prep_key->tfm) != NULL; +} +#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + /* keyring.c */ /* @@ -312,8 +422,15 @@ struct fscrypt_master_key_secret { /* Size of the raw key in bytes. Set even if ->raw isn't set. */ u32 size; - /* For v1 policy keys: the raw key. Wiped for v2 policy keys. */ - u8 raw[FSCRYPT_MAX_KEY_SIZE]; + /* True if the key in ->raw is a hardware-wrapped key. */ + bool is_hw_wrapped; + + /* + * For v1 policy keys: the raw key. Wiped for v2 policy keys, unless + * ->is_hw_wrapped is true, in which case this contains the wrapped key + * rather than the key with which 'hkdf' was keyed. + */ + u8 raw[FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE]; } __randomize_layout; @@ -385,14 +502,11 @@ struct fscrypt_master_key { struct list_head mk_decrypted_inodes; spinlock_t mk_decrypted_inodes_lock; - /* Crypto API transforms for DIRECT_KEY policies, allocated on-demand */ - struct crypto_skcipher *mk_direct_tfms[__FSCRYPT_MODE_MAX + 1]; + /* Per-mode keys for DIRECT_KEY policies, allocated on-demand */ + struct fscrypt_prepared_key mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; - /* - * Crypto API transforms for filesystem-layer implementation of - * IV_INO_LBLK_64 policies, allocated on-demand. - */ - struct crypto_skcipher *mk_iv_ino_lblk_64_tfms[__FSCRYPT_MODE_MAX + 1]; + /* Per-mode keys for IV_INO_LBLK_64 policies, allocated on-demand */ + struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; } __randomize_layout; @@ -449,17 +563,22 @@ struct fscrypt_mode { int keysize; int ivsize; int logged_impl_name; + enum blk_crypto_mode_num blk_crypto_mode; }; +extern struct fscrypt_mode fscrypt_modes[]; + static inline bool fscrypt_mode_supports_direct_key(const struct fscrypt_mode *mode) { return mode->ivsize >= offsetofend(union fscrypt_iv, nonce); } -extern struct crypto_skcipher * -fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, - const struct inode *inode); +extern int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, unsigned int raw_key_size, + const struct fscrypt_info *ci); + +extern void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key); extern int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key); diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c new file mode 100644 index 000000000000..92c471d3db73 --- /dev/null +++ b/fs/crypto/inline_crypt.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Inline encryption support for fscrypt + * + * Copyright 2019 Google LLC + */ + +/* + * With "inline encryption", the block layer handles the decryption/encryption + * as part of the bio, instead of the filesystem doing the crypto itself via + * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still + * provides the key and IV to use. + */ + +#include +#include +#include +#include +#include + +#include "fscrypt_private.h" + +struct fscrypt_blk_crypto_key { + struct blk_crypto_key base; + int num_devs; + struct request_queue *devs[]; +}; + +/* Enable inline encryption for this file if supported. */ +void fscrypt_select_encryption_impl(struct fscrypt_info *ci) +{ + const struct inode *inode = ci->ci_inode; + struct super_block *sb = inode->i_sb; + + /* The file must need contents encryption, not filenames encryption */ + if (!S_ISREG(inode->i_mode)) + return; + + /* blk-crypto must implement the needed encryption algorithm */ + if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) + return; + + /* The filesystem must be mounted with -o inlinecrypt */ + if (!sb->s_cop->inline_crypt_enabled || + !sb->s_cop->inline_crypt_enabled(sb)) + return; + + ci->ci_inlinecrypt = true; +} + +int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + unsigned int raw_key_size, + const struct fscrypt_info *ci) +{ + const struct inode *inode = ci->ci_inode; + struct super_block *sb = inode->i_sb; + enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + int num_devs = 1; + int queue_refs = 0; + struct fscrypt_blk_crypto_key *blk_key; + int err; + int i; + + if (sb->s_cop->get_num_devices) + num_devs = sb->s_cop->get_num_devices(sb); + if (WARN_ON(num_devs < 1)) + return -EINVAL; + + blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS); + if (!blk_key) + return -ENOMEM; + + blk_key->num_devs = num_devs; + if (num_devs == 1) + blk_key->devs[0] = bdev_get_queue(sb->s_bdev); + else + sb->s_cop->get_devices(sb, blk_key->devs); + + BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE > + BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE); + + err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size, + crypto_mode, sb->s_blocksize); + if (err) { + fscrypt_err(inode, "error %d initializing blk-crypto key", err); + goto fail; + } + + /* + * We have to start using blk-crypto on all the filesystem's devices. + * We also have to save all the request_queue's for later so that the + * key can be evicted from them. This is needed because some keys + * aren't destroyed until after the filesystem was already unmounted + * (namely, the per-mode keys in struct fscrypt_master_key). + */ + for (i = 0; i < num_devs; i++) { + if (!blk_get_queue(blk_key->devs[i])) { + fscrypt_err(inode, "couldn't get request_queue"); + err = -EAGAIN; + goto fail; + } + queue_refs++; + + err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize, + blk_key->devs[i]); + if (err) { + fscrypt_err(inode, + "error %d starting to use blk-crypto", err); + goto fail; + } + } + /* + * Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters + * for the per-mode keys, which are shared by multiple inodes.) + */ + smp_store_release(&prep_key->blk_key, blk_key); + return 0; + +fail: + for (i = 0; i < queue_refs; i++) + blk_put_queue(blk_key->devs[i]); + kzfree(blk_key); + return err; +} + +void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) +{ + struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key; + int i; + + if (blk_key) { + for (i = 0; i < blk_key->num_devs; i++) { + blk_crypto_evict_key(blk_key->devs[i], &blk_key->base); + blk_put_queue(blk_key->devs[i]); + } + kzfree(blk_key); + } +} + +int fscrypt_derive_raw_secret(struct super_block *sb, + const u8 *wrapped_key, + unsigned int wrapped_key_size, + u8 *raw_secret, unsigned int raw_secret_size) +{ + struct request_queue *q; + + q = sb->s_bdev->bd_queue; + if (!q->ksm) + return -EOPNOTSUPP; + + return keyslot_manager_derive_raw_secret(q->ksm, + wrapped_key, wrapped_key_size, + raw_secret, raw_secret_size); +} + +/** + * fscrypt_inode_uses_inline_crypto - test whether an inode uses inline + * encryption + * @inode: an inode + * + * Return: true if the inode requires file contents encryption and if the + * encryption should be done in the block layer via blk-crypto rather + * than in the filesystem layer. + */ +bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && + inode->i_crypt_info->ci_inlinecrypt; +} +EXPORT_SYMBOL_GPL(fscrypt_inode_uses_inline_crypto); + +/** + * fscrypt_inode_uses_fs_layer_crypto - test whether an inode uses fs-layer + * encryption + * @inode: an inode + * + * Return: true if the inode requires file contents encryption and if the + * encryption should be done in the filesystem layer rather than in the + * block layer via blk-crypto. + */ +bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) +{ + return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && + !inode->i_crypt_info->ci_inlinecrypt; +} +EXPORT_SYMBOL_GPL(fscrypt_inode_uses_fs_layer_crypto); + +static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) +{ + union fscrypt_iv iv; + int i; + + fscrypt_generate_iv(&iv, lblk_num, ci); + + BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); + memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); + for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) + dun[i] = le64_to_cpu(iv.dun[i]); +} + +/** + * fscrypt_set_bio_crypt_ctx - prepare a file contents bio for inline encryption + * @bio: a bio which will eventually be submitted to the file + * @inode: the file's inode + * @first_lblk: the first file logical block number in the I/O + * @gfp_mask: memory allocation flags - these must be a waiting mask so that + * bio_crypt_set_ctx can't fail. + * + * If the contents of the file should be encrypted (or decrypted) with inline + * encryption, then assign the appropriate encryption context to the bio. + * + * Normally the bio should be newly allocated (i.e. no pages added yet), as + * otherwise fscrypt_mergeable_bio() won't work as intended. + * + * The encryption context will be freed automatically when the bio is freed. + * + * This function also handles setting bi_skip_dm_default_key when needed. + */ +void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, + u64 first_lblk, gfp_t gfp_mask) +{ + const struct fscrypt_info *ci = inode->i_crypt_info; + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + if (fscrypt_inode_should_skip_dm_default_key(inode)) + bio_set_skip_dm_default_key(bio); + + if (!fscrypt_inode_uses_inline_crypto(inode)) + return; + + fscrypt_generate_dun(ci, first_lblk, dun); + bio_crypt_set_ctx(bio, &ci->ci_key.blk_key->base, dun, gfp_mask); +} +EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); + +/* Extract the inode and logical block number from a buffer_head. */ +static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, + const struct inode **inode_ret, + u64 *lblk_num_ret) +{ + struct page *page = bh->b_page; + const struct address_space *mapping; + const struct inode *inode; + + /* + * The ext4 journal (jbd2) can submit a buffer_head it directly created + * for a non-pagecache page. fscrypt doesn't care about these. + */ + mapping = page_mapping(page); + if (!mapping) + return false; + inode = mapping->host; + + *inode_ret = inode; + *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + + (bh_offset(bh) >> inode->i_blkbits); + return true; +} + +/** + * fscrypt_set_bio_crypt_ctx_bh - prepare a file contents bio for inline + * encryption + * @bio: a bio which will eventually be submitted to the file + * @first_bh: the first buffer_head for which I/O will be submitted + * @gfp_mask: memory allocation flags + * + * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead + * of an inode and block number directly. + */ +void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask) +{ + const struct inode *inode; + u64 first_lblk; + + if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) + fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); +} +EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); + +/** + * fscrypt_mergeable_bio - test whether data can be added to a bio + * @bio: the bio being built up + * @inode: the inode for the next part of the I/O + * @next_lblk: the next file logical block number in the I/O + * + * When building a bio which may contain data which should undergo inline + * encryption (or decryption) via fscrypt, filesystems should call this function + * to ensure that the resulting bio contains only logically contiguous data. + * This will return false if the next part of the I/O cannot be merged with the + * bio because either the encryption key would be different or the encryption + * data unit numbers would be discontiguous. + * + * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. + * + * This function also returns false if the next part of the I/O would need to + * have a different value for the bi_skip_dm_default_key flag. + * + * Return: true iff the I/O is mergeable + */ +bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, + u64 next_lblk) +{ + const struct bio_crypt_ctx *bc = bio->bi_crypt_context; + u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) + return false; + if (bio_should_skip_dm_default_key(bio) != + fscrypt_inode_should_skip_dm_default_key(inode)) + return false; + if (!bc) + return true; + + /* + * Comparing the key pointers is good enough, as all I/O for each key + * uses the same pointer. I.e., there's currently no need to support + * merging requests where the keys are the same but the pointers differ. + */ + if (bc->bc_key != &inode->i_crypt_info->ci_key.blk_key->base) + return false; + + fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); + return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); +} +EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); + +/** + * fscrypt_mergeable_bio_bh - test whether data can be added to a bio + * @bio: the bio being built up + * @next_bh: the next buffer_head for which I/O will be submitted + * + * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of + * an inode and block number directly. + * + * Return: true iff the I/O is mergeable + */ +bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh) +{ + const struct inode *inode; + u64 next_lblk; + + if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) + return !bio->bi_crypt_context && + !bio_should_skip_dm_default_key(bio); + + return fscrypt_mergeable_bio(bio, inode, next_lblk); +} +EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 687f76590761..40ea4bc1059d 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -44,8 +44,8 @@ static void free_master_key(struct fscrypt_master_key *mk) wipe_master_key_secret(&mk->mk_secret); for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { - crypto_free_skcipher(mk->mk_direct_tfms[i]); - crypto_free_skcipher(mk->mk_iv_ino_lblk_64_tfms[i]); + fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]); } key_put(mk->mk_users); @@ -469,8 +469,10 @@ static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) { const struct fscrypt_provisioning_key_payload *payload = prep->data; + BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE < FSCRYPT_MAX_KEY_SIZE); + if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE || - prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE) + prep->datalen > sizeof(*payload) + FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE) return -EINVAL; if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && @@ -567,6 +569,8 @@ out_put: key_ref_put(ref); return err; } +/* Size of software "secret" derived from hardware-wrapped key */ +#define RAW_SECRET_SIZE 32 /* * Add a master encryption key to the filesystem, causing all files which were @@ -598,6 +602,9 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) struct fscrypt_add_key_arg __user *uarg = _uarg; struct fscrypt_add_key_arg arg; struct fscrypt_master_key_secret secret; + u8 _kdf_key[RAW_SECRET_SIZE]; + u8 *kdf_key; + unsigned int kdf_key_size; int err; if (copy_from_user(&arg, uarg, sizeof(arg))) @@ -609,6 +616,9 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; + BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE < + FSCRYPT_MAX_KEY_SIZE); + memset(&secret, 0, sizeof(secret)); if (arg.key_id) { @@ -617,16 +627,20 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret); if (err) goto out_wipe_secret; + err = -EINVAL; + if (!(arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) && + secret.size > FSCRYPT_MAX_KEY_SIZE) + goto out_wipe_secret; } else { if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE || - arg.raw_size > FSCRYPT_MAX_KEY_SIZE) + arg.raw_size > + ((arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ? + FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : FSCRYPT_MAX_KEY_SIZE)) return -EINVAL; - secret.size = arg.raw_size; err = -EFAULT; - if (copy_from_user(secret.raw, uarg->raw, secret.size)) { + if (copy_from_user(secret.raw, uarg->raw, secret.size)) goto out_wipe_secret; - } } switch (arg.key_spec.type) { @@ -639,18 +653,37 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) err = -EACCES; if (!capable(CAP_SYS_ADMIN)) goto out_wipe_secret; + + err = -EINVAL; + if (arg.__flags) + goto out_wipe_secret; break; case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: - err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size); + err = -EINVAL; + if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) + goto out_wipe_secret; + if (arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) { + kdf_key = _kdf_key; + kdf_key_size = RAW_SECRET_SIZE; + err = fscrypt_derive_raw_secret(sb, secret.raw, + secret.size, + kdf_key, kdf_key_size); + if (err) + goto out_wipe_secret; + secret.is_hw_wrapped = true; + } else { + kdf_key = secret.raw; + kdf_key_size = secret.size; + } + err = fscrypt_init_hkdf(&secret.hkdf, kdf_key, kdf_key_size); + /* + * Now that the HKDF context is initialized, the raw HKDF + * key is no longer needed. + */ + memzero_explicit(kdf_key, kdf_key_size); if (err) goto out_wipe_secret; - /* - * Now that the HKDF context is initialized, the raw key is no - * longer needed. - */ - memzero_explicit(secret.raw, secret.size); - /* Calculate the key identifier and return it to userspace. */ err = fscrypt_hkdf_expand(&secret.hkdf, HKDF_CONTEXT_KEY_IDENTIFIER, diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 0380ae882441..b51fc41395e0 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -13,12 +13,13 @@ #include "fscrypt_private.h" -static struct fscrypt_mode available_modes[] = { +struct fscrypt_mode fscrypt_modes[] = { [FSCRYPT_MODE_AES_256_XTS] = { .friendly_name = "AES-256-XTS", .cipher_str = "xts(aes)", .keysize = 64, .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS, }, [FSCRYPT_MODE_AES_256_CTS] = { .friendly_name = "AES-256-CTS-CBC", @@ -31,6 +32,7 @@ static struct fscrypt_mode available_modes[] = { .cipher_str = "essiv(cbc(aes),sha256)", .keysize = 16, .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, }, [FSCRYPT_MODE_AES_128_CTS] = { .friendly_name = "AES-128-CTS-CBC", @@ -43,6 +45,7 @@ static struct fscrypt_mode available_modes[] = { .cipher_str = "adiantum(xchacha12,aes)", .keysize = 32, .ivsize = 32, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM, }, [FSCRYPT_MODE_PRIVATE] = { .friendly_name = "ICE", @@ -56,10 +59,10 @@ select_encryption_mode(const union fscrypt_policy *policy, const struct inode *inode) { if (S_ISREG(inode->i_mode)) - return &available_modes[fscrypt_policy_contents_mode(policy)]; + return &fscrypt_modes[fscrypt_policy_contents_mode(policy)]; if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - return &available_modes[fscrypt_policy_fnames_mode(policy)]; + return &fscrypt_modes[fscrypt_policy_fnames_mode(policy)]; WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n", inode->i_ino, (inode->i_mode & S_IFMT)); @@ -67,9 +70,9 @@ select_encryption_mode(const union fscrypt_policy *policy, } /* Create a symmetric cipher object for the given encryption mode and key */ -struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode, - const u8 *raw_key, - const struct inode *inode) +static struct crypto_skcipher * +fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, + const struct inode *inode) { struct crypto_skcipher *tfm; int err; @@ -109,30 +112,61 @@ err_free_tfm: return ERR_PTR(err); } -/* Given the per-file key, set up the file's crypto transform object */ -int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key) +/* + * Prepare the crypto transform object or blk-crypto key in @prep_key, given the + * raw key, encryption mode, and flag indicating which encryption implementation + * (fs-layer or blk-crypto) will be used. + */ +int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, unsigned int raw_key_size, + const struct fscrypt_info *ci) { struct crypto_skcipher *tfm; - tfm = fscrypt_allocate_skcipher(ci->ci_mode, derived_key, ci->ci_inode); + if (fscrypt_using_inline_encryption(ci)) + return fscrypt_prepare_inline_crypt_key(prep_key, + raw_key, raw_key_size, ci); + + if (WARN_ON(raw_key_size != ci->ci_mode->keysize)) + return -EINVAL; + + tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode); if (IS_ERR(tfm)) return PTR_ERR(tfm); - - ci->ci_ctfm = tfm; - ci->ci_owns_key = true; + /* + * Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters + * for the per-mode keys, which are shared by multiple inodes.) + */ + smp_store_release(&prep_key->tfm, tfm); return 0; } +/* Destroy a crypto transform object and/or blk-crypto key. */ +void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key) +{ + crypto_free_skcipher(prep_key->tfm); + fscrypt_destroy_inline_crypt_key(prep_key); +} + +/* Given the per-file key, set up the file's crypto transform object */ +int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key) +{ + ci->ci_owns_key = true; + return fscrypt_prepare_key(&ci->ci_key, derived_key, + ci->ci_mode->keysize, ci); +} + static int setup_per_mode_key(struct fscrypt_info *ci, struct fscrypt_master_key *mk, - struct crypto_skcipher **tfms, + struct fscrypt_prepared_key *keys, u8 hkdf_context, bool include_fs_uuid) { + static DEFINE_MUTEX(mode_key_setup_mutex); const struct inode *inode = ci->ci_inode; const struct super_block *sb = inode->i_sb; struct fscrypt_mode *mode = ci->ci_mode; - u8 mode_num = mode - available_modes; - struct crypto_skcipher *tfm, *prev_tfm; + const u8 mode_num = mode - fscrypt_modes; + struct fscrypt_prepared_key *prep_key; u8 mode_key[FSCRYPT_MAX_KEY_SIZE]; u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)]; unsigned int hkdf_infolen = 0; @@ -141,39 +175,65 @@ static int setup_per_mode_key(struct fscrypt_info *ci, if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX)) return -EINVAL; - /* pairs with cmpxchg() below */ - tfm = READ_ONCE(tfms[mode_num]); - if (likely(tfm != NULL)) - goto done; - - BUILD_BUG_ON(sizeof(mode_num) != 1); - BUILD_BUG_ON(sizeof(sb->s_uuid) != 16); - BUILD_BUG_ON(sizeof(hkdf_info) != 17); - hkdf_info[hkdf_infolen++] = mode_num; - if (include_fs_uuid) { - memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid, - sizeof(sb->s_uuid)); - hkdf_infolen += sizeof(sb->s_uuid); + prep_key = &keys[mode_num]; + if (fscrypt_is_key_prepared(prep_key, ci)) { + ci->ci_key = *prep_key; + return 0; } - err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, - hkdf_context, hkdf_info, hkdf_infolen, - mode_key, mode->keysize); - if (err) - return err; - tfm = fscrypt_allocate_skcipher(mode, mode_key, inode); - memzero_explicit(mode_key, mode->keysize); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - /* pairs with READ_ONCE() above */ - prev_tfm = cmpxchg(&tfms[mode_num], NULL, tfm); - if (prev_tfm != NULL) { - crypto_free_skcipher(tfm); - tfm = prev_tfm; + mutex_lock(&mode_key_setup_mutex); + + if (fscrypt_is_key_prepared(prep_key, ci)) + goto done_unlock; + + if (mk->mk_secret.is_hw_wrapped && S_ISREG(inode->i_mode)) { + int i; + + if (!fscrypt_using_inline_encryption(ci)) { + fscrypt_warn(ci->ci_inode, + "Hardware-wrapped keys require inline encryption (-o inlinecrypt)"); + err = -EINVAL; + goto out_unlock; + } + for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { + if (fscrypt_is_key_prepared(&keys[i], ci)) { + fscrypt_warn(ci->ci_inode, + "Each hardware-wrapped key can only be used with one encryption mode"); + err = -EINVAL; + goto out_unlock; + } + } + err = fscrypt_prepare_key(prep_key, mk->mk_secret.raw, + mk->mk_secret.size, ci); + if (err) + goto out_unlock; + } else { + BUILD_BUG_ON(sizeof(mode_num) != 1); + BUILD_BUG_ON(sizeof(sb->s_uuid) != 16); + BUILD_BUG_ON(sizeof(hkdf_info) != 17); + hkdf_info[hkdf_infolen++] = mode_num; + if (include_fs_uuid) { + memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid, + sizeof(sb->s_uuid)); + hkdf_infolen += sizeof(sb->s_uuid); + } + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, + hkdf_context, hkdf_info, hkdf_infolen, + mode_key, mode->keysize); + if (err) + goto out_unlock; + err = fscrypt_prepare_key(prep_key, mode_key, mode->keysize, + ci); + memzero_explicit(mode_key, mode->keysize); + if (err) + goto out_unlock; } -done: - ci->ci_ctfm = tfm; - return 0; +done_unlock: + ci->ci_key = *prep_key; + err = 0; +out_unlock: + mutex_unlock(&mode_key_setup_mutex); + return err; } static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, @@ -182,6 +242,13 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; int err; + if (mk->mk_secret.is_hw_wrapped && + !(ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)) { + fscrypt_warn(ci->ci_inode, + "Hardware-wrapped keys are only supported with IV_INO_LBLK_64 policies"); + return -EINVAL; + } + if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { /* * DIRECT_KEY: instead of deriving per-file keys, the per-file @@ -197,7 +264,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, ci->ci_mode->friendly_name); return -EINVAL; } - return setup_per_mode_key(ci, mk, mk->mk_direct_tfms, + return setup_per_mode_key(ci, mk, mk->mk_direct_keys, HKDF_CONTEXT_DIRECT_KEY, false); } else if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { @@ -207,7 +274,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, * the IVs. This format is optimized for use with inline * encryption hardware compliant with the UFS or eMMC standards. */ - return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms, + return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_keys, HKDF_CONTEXT_IV_INO_LBLK_64_KEY, true); } @@ -242,6 +309,8 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, struct fscrypt_key_specifier mk_spec; int err; + fscrypt_select_encryption_impl(ci); + switch (ci->ci_policy.version) { case FSCRYPT_POLICY_V1: mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; @@ -334,7 +403,7 @@ static void put_crypt_info(struct fscrypt_info *ci) if (ci->ci_direct_key) fscrypt_put_direct_key(ci->ci_direct_key); else if (ci->ci_owns_key) - crypto_free_skcipher(ci->ci_ctfm); + fscrypt_destroy_prepared_key(&ci->ci_key); key = ci->ci_master_key; if (key) { diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 454fb03fc30e..47591c54dc3d 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -146,7 +146,7 @@ struct fscrypt_direct_key { struct hlist_node dk_node; refcount_t dk_refcount; const struct fscrypt_mode *dk_mode; - struct crypto_skcipher *dk_ctfm; + struct fscrypt_prepared_key dk_key; u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; u8 dk_raw[FSCRYPT_MAX_KEY_SIZE]; }; @@ -154,7 +154,7 @@ struct fscrypt_direct_key { static void free_direct_key(struct fscrypt_direct_key *dk) { if (dk) { - crypto_free_skcipher(dk->dk_ctfm); + fscrypt_destroy_prepared_key(&dk->dk_key); kzfree(dk); } } @@ -199,6 +199,8 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert, continue; if (ci->ci_mode != dk->dk_mode) continue; + if (!fscrypt_is_key_prepared(&dk->dk_key, ci)) + continue; if (crypto_memneq(raw_key, dk->dk_raw, ci->ci_mode->keysize)) continue; /* using existing tfm with same (descriptor, mode, raw_key) */ @@ -231,13 +233,10 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key) return ERR_PTR(-ENOMEM); refcount_set(&dk->dk_refcount, 1); dk->dk_mode = ci->ci_mode; - dk->dk_ctfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, - ci->ci_inode); - if (IS_ERR(dk->dk_ctfm)) { - err = PTR_ERR(dk->dk_ctfm); - dk->dk_ctfm = NULL; + err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci->ci_mode->keysize, + ci); + if (err) goto err_free_dk; - } memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize); @@ -274,7 +273,7 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci, if (IS_ERR(dk)) return PTR_ERR(dk); ci->ci_direct_key = dk; - ci->ci_ctfm = dk->dk_ctfm; + ci->ci_key = dk->dk_key; return 0; } diff --git a/fs/direct-io.c b/fs/direct-io.c index 30bf22c989de..729c59213d2e 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -431,6 +432,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, sector_t first_sector, int nr_vecs) { struct bio *bio; + struct inode *inode = dio->inode; /* * bio_alloc() is guaranteed to return a bio when called with @@ -438,6 +440,9 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, */ bio = bio_alloc(GFP_KERNEL, nr_vecs); + fscrypt_set_bio_crypt_ctx(bio, inode, + sdio->cur_page_fs_offset >> inode->i_blkbits, + GFP_KERNEL); bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = first_sector; bio_set_op_attrs(bio, dio->op, dio->op_flags); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 6c129067c07e..a2d6e8f0eb97 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1155,6 +1155,7 @@ struct ext4_inode_info { #define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ +#define EXT4_MOUNT_INLINECRYPT 0x4000000 /* Inline encryption support */ #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c134c701a034..096e4cc053dc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1236,8 +1236,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, (block_start < from || block_end > to)) { ll_rw_block(REQ_OP_READ, 0, 1, &bh); *wait_bh++ = bh; - decrypt = IS_ENCRYPTED(inode) && - S_ISREG(inode->i_mode); + decrypt = fscrypt_inode_uses_fs_layer_crypto(inode); } } /* @@ -3856,10 +3855,12 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ssize_t ret; int rw = iov_iter_rw(iter); -#ifdef CONFIG_FS_ENCRYPTION - if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) - return 0; -#endif + if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode)) { + if (!fscrypt_inode_uses_inline_crypto(inode) || + !IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), + i_blocksize(inode))) + return 0; + } if (fsverity_active(inode)) return 0; @@ -4067,8 +4068,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; - if (S_ISREG(inode->i_mode) && - IS_ENCRYPTED(inode)) { + if (fscrypt_inode_uses_fs_layer_crypto(inode)) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); BUG_ON(blocksize != PAGE_SIZE); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index a6ec98d494b8..6481742d666d 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -366,6 +366,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io, bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); if (!bio) return -ENOMEM; + fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); wbc_init_bio(io->io_wbc, bio); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio_set_dev(bio, bh->b_bdev); @@ -383,7 +384,8 @@ static int io_submit_add_bh(struct ext4_io_submit *io, { int ret; - if (io->io_bio && bh->b_blocknr != io->io_next_block) { + if (io->io_bio && (bh->b_blocknr != io->io_next_block || + !fscrypt_mergeable_bio_bh(io->io_bio, bh))) { submit_and_retry: ext4_io_submit(io); } @@ -469,7 +471,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, bh = head = page_buffers(page); - if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) { + if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) { gfp_t gfp_flags = GFP_NOFS; /* diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index aefcd712df85..e4f684a71002 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -198,7 +198,7 @@ static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode, unsigned int post_read_steps = 0; struct bio_post_read_ctx *ctx = NULL; - if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) + if (fscrypt_inode_uses_fs_layer_crypto(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (ext4_need_verity(inode, first_idx)) @@ -259,6 +259,7 @@ int ext4_mpage_readpages(struct address_space *mapping, const unsigned blkbits = inode->i_blkbits; const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; + sector_t next_block; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; @@ -290,7 +291,8 @@ int ext4_mpage_readpages(struct address_space *mapping, if (page_has_buffers(page)) goto confused; - block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); + block_in_file = next_block = + (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (ext4_readpage_limit(inode) + blocksize - 1) >> blkbits; @@ -390,7 +392,8 @@ int ext4_mpage_readpages(struct address_space *mapping, * This page will go to BIO. Do we need to send this * BIO off first? */ - if (bio && (last_block_in_bio != blocks[0] - 1)) { + if (bio && (last_block_in_bio != blocks[0] - 1 || + !fscrypt_mergeable_bio(bio, inode, next_block))) { submit_and_realloc: ext4_submit_bio_read(bio); bio = NULL; @@ -402,6 +405,8 @@ int ext4_mpage_readpages(struct address_space *mapping, min_t(int, nr_pages, BIO_MAX_PAGES)); if (!bio) goto set_error_page; + fscrypt_set_bio_crypt_ctx(bio, inode, next_block, + GFP_KERNEL); ctx = get_bio_post_read_ctx(inode, bio, page->index); if (IS_ERR(ctx)) { bio_put(bio); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c0e405bdab46..8f6ee92e51db 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1300,6 +1300,11 @@ static void ext4_get_ino_and_lblk_bits(struct super_block *sb, *lblk_bits_ret = 8 * sizeof(ext4_lblk_t); } +static bool ext4_inline_crypt_enabled(struct super_block *sb) +{ + return test_opt(sb, INLINECRYPT); +} + static const struct fscrypt_operations ext4_cryptops = { .key_prefix = "ext4:", .get_context = ext4_get_context, @@ -1309,6 +1314,7 @@ static const struct fscrypt_operations ext4_cryptops = { .max_namelen = EXT4_NAME_LEN, .has_stable_inodes = ext4_has_stable_inodes, .get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits, + .inline_crypt_enabled = ext4_inline_crypt_enabled, }; #endif @@ -1404,6 +1410,7 @@ enum { Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, + Opt_inlinecrypt, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, @@ -1497,6 +1504,7 @@ static const match_table_t tokens = { {Opt_noinit_itable, "noinit_itable"}, {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, + {Opt_inlinecrypt, "inlinecrypt"}, {Opt_nombcache, "nombcache"}, {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */ {Opt_removed, "check=none"}, /* mount option from ext2/3 */ @@ -1706,6 +1714,11 @@ static const struct mount_opts { {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT}, {Opt_max_dir_size_kb, 0, MOPT_GTE0}, {Opt_test_dummy_encryption, 0, MOPT_GTE0}, +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + {Opt_inlinecrypt, EXT4_MOUNT_INLINECRYPT, MOPT_SET}, +#else + {Opt_inlinecrypt, EXT4_MOUNT_INLINECRYPT, MOPT_NOSUPPORT}, +#endif {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, {Opt_err, 0, 0} }; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 2d4c259624b3..19abef392da3 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -467,6 +467,37 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) return bio; } +static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, + pgoff_t first_idx, + const struct f2fs_io_info *fio, + gfp_t gfp_mask) +{ + /* + * The f2fs garbage collector sets ->encrypted_page when it wants to + * read/write raw data without encryption. + */ + if (!fio || !fio->encrypted_page) + fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask); + else if (fscrypt_inode_should_skip_dm_default_key(inode)) + bio_set_skip_dm_default_key(bio); +} + +static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode, + pgoff_t next_idx, + const struct f2fs_io_info *fio) +{ + /* + * The f2fs garbage collector sets ->encrypted_page when it wants to + * read/write raw data without encryption. + */ + if (fio && fio->encrypted_page) + return !bio_has_crypt_ctx(bio) && + (bio_should_skip_dm_default_key(bio) == + fscrypt_inode_should_skip_dm_default_key(inode)); + + return fscrypt_mergeable_bio(bio, inode, next_idx); +} + static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio, enum page_type type) { @@ -712,6 +743,9 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) /* Allocate a new bio */ bio = __bio_alloc(fio, 1); + f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, + fio->page->index, fio, GFP_NOIO); + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); return -EFAULT; @@ -895,7 +929,6 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) struct bio *bio = *fio->bio; struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; - struct inode *inode; if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) @@ -904,14 +937,17 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) trace_f2fs_submit_page_bio(page, fio); f2fs_trace_ios(fio, 0); - inode = fio->page->mapping->host; - - if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, - fio->new_blkaddr)) + if (bio && (!page_is_mergeable(fio->sbi, bio, *fio->last_block, + fio->new_blkaddr) || + !f2fs_crypt_mergeable_bio(bio, fio->page->mapping->host, + fio->page->index, fio))) f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); alloc_new: if (!bio) { bio = __bio_alloc(fio, BIO_MAX_PAGES); + f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, + fio->page->index, fio, + GFP_NOIO); bio_set_op_attrs(bio, fio->op, fio->op_flags); add_bio_entry(fio->sbi, bio, page, fio->temp); } else { @@ -967,8 +1003,11 @@ next: inc_page_count(sbi, WB_DATA_TYPE(bio_page)); - if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio, - io->last_block_in_bio, fio->new_blkaddr)) + if (io->bio && + (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, + fio->new_blkaddr) || + !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host, + fio->page->index, fio))) __submit_merged_bio(io); alloc_new: if (io->bio == NULL) { @@ -980,6 +1019,9 @@ alloc_new: goto skip; } io->bio = __bio_alloc(fio, BIO_MAX_PAGES); + f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host, + fio->page->index, fio, + GFP_NOIO); io->fio = *fio; } @@ -1024,11 +1066,14 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, for_write); if (!bio) return ERR_PTR(-ENOMEM); + + f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS); + f2fs_target_device(sbi, blkaddr, bio); bio->bi_end_io = f2fs_read_end_io; bio_set_op_attrs(bio, REQ_OP_READ, op_flag); - if (f2fs_encrypted_file(inode)) + if (fscrypt_inode_uses_fs_layer_crypto(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (f2fs_compressed_file(inode)) post_read_steps |= 1 << STEP_DECOMPRESS; @@ -2054,8 +2099,9 @@ zero_out: * This page will go to BIO. Do we need to send this * BIO off first? */ - if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio, - *last_block_in_bio, block_nr)) { + if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio, + *last_block_in_bio, block_nr) || + !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) { submit_and_realloc: __f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; @@ -2406,6 +2452,9 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio) /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); + if (fscrypt_inode_uses_inline_crypto(inode)) + return 0; + retry_encrypt: fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page, PAGE_SIZE, 0, gfp_flags); @@ -2579,7 +2628,7 @@ got_it: f2fs_unlock_op(fio->sbi); err = f2fs_inplace_write_data(fio); if (err) { - if (f2fs_encrypted_file(inode)) + if (fscrypt_inode_uses_fs_layer_crypto(inode)) fscrypt_finalize_bounce_page(&fio->encrypted_page); if (PageWriteback(page)) end_page_writeback(page); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 1e2c9a59393a..4a365cf7f068 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -139,6 +139,9 @@ struct f2fs_mount_info { int fs_mode; /* fs mode: LFS or ADAPTIVE */ int bggc_mode; /* bggc mode: off, on or sync */ bool test_dummy_encryption; /* test dummy encryption */ +#ifdef CONFIG_FS_ENCRYPTION + bool inlinecrypt; /* inline encryption enabled */ +#endif block_t unusable_cap; /* Amount of space allowed to be * unusable when disabling checkpoint */ @@ -4035,7 +4038,13 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int rw = iov_iter_rw(iter); - if (f2fs_encrypted_file(inode)) + if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && f2fs_encrypted_file(inode)) { + if (!fscrypt_inode_uses_inline_crypto(inode) || + !IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), + F2FS_BLKSIZE)) + return true; + } + if (fsverity_active(inode)) return true; if (f2fs_is_multi_device(sbi)) return true; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index ecef87cee77d..de737389ba94 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -137,6 +137,7 @@ enum { Opt_alloc, Opt_fsync, Opt_test_dummy_encryption, + Opt_inlinecrypt, Opt_checkpoint_disable, Opt_checkpoint_disable_cap, Opt_checkpoint_disable_cap_perc, @@ -202,6 +203,7 @@ static match_table_t f2fs_tokens = { {Opt_alloc, "alloc_mode=%s"}, {Opt_fsync, "fsync_mode=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, + {Opt_inlinecrypt, "inlinecrypt"}, {Opt_checkpoint_disable, "checkpoint=disable"}, {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"}, {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"}, @@ -787,6 +789,13 @@ static int parse_options(struct super_block *sb, char *options) f2fs_info(sbi, "Test dummy encryption mode enabled"); #else f2fs_info(sbi, "Test dummy encryption mount option ignored"); +#endif + break; + case Opt_inlinecrypt: +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + F2FS_OPTION(sbi).inlinecrypt = true; +#else + f2fs_info(sbi, "inline encryption not supported"); #endif break; case Opt_checkpoint_disable_cap_perc: @@ -1574,6 +1583,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) #ifdef CONFIG_FS_ENCRYPTION if (F2FS_OPTION(sbi).test_dummy_encryption) seq_puts(seq, ",test_dummy_encryption"); + if (F2FS_OPTION(sbi).inlinecrypt) + seq_puts(seq, ",inlinecrypt"); #endif if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) @@ -1604,6 +1615,9 @@ static void default_options(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; F2FS_OPTION(sbi).test_dummy_encryption = false; +#ifdef CONFIG_FS_ENCRYPTION + F2FS_OPTION(sbi).inlinecrypt = false; +#endif F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; @@ -2456,6 +2470,30 @@ static void f2fs_get_ino_and_lblk_bits(struct super_block *sb, *lblk_bits_ret = 8 * sizeof(block_t); } +static bool f2fs_inline_crypt_enabled(struct super_block *sb) +{ + return F2FS_OPTION(F2FS_SB(sb)).inlinecrypt; +} + +static int f2fs_get_num_devices(struct super_block *sb) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + + if (f2fs_is_multi_device(sbi)) + return sbi->s_ndevs; + return 1; +} + +static void f2fs_get_devices(struct super_block *sb, + struct request_queue **devs) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + int i; + + for (i = 0; i < sbi->s_ndevs; i++) + devs[i] = bdev_get_queue(FDEV(i).bdev); +} + static const struct fscrypt_operations f2fs_cryptops = { .key_prefix = "f2fs:", .get_context = f2fs_get_context, @@ -2465,6 +2503,9 @@ static const struct fscrypt_operations f2fs_cryptops = { .max_namelen = F2FS_NAME_LEN, .has_stable_inodes = f2fs_has_stable_inodes, .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits, + .inline_crypt_enabled = f2fs_inline_crypt_enabled, + .get_num_devices = f2fs_get_num_devices, + .get_devices = f2fs_get_devices, }; #endif diff --git a/fs/iomap.c b/fs/iomap.c index 3f5b1655cfce..1e573a59ea71 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -825,10 +826,13 @@ static blk_qc_t iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, unsigned len) { + struct inode *inode = file_inode(dio->iocb->ki_filp); struct page *page = ZERO_PAGE(0); struct bio *bio; bio = bio_alloc(GFP_KERNEL, 1); + fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, + GFP_KERNEL); bio_set_dev(bio, iomap->bdev); bio->bi_iter.bi_sector = iomap->blkno + ((pos - iomap->offset) >> 9); @@ -908,6 +912,8 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, return 0; bio = bio_alloc(GFP_KERNEL, nr_pages); + fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, + GFP_KERNEL); bio_set_dev(bio, iomap->bdev); bio->bi_iter.bi_sector = iomap->blkno + ((pos - iomap->offset) >> 9); diff --git a/include/linux/bio-crypt-ctx.h b/include/linux/bio-crypt-ctx.h new file mode 100644 index 000000000000..12b46ece9c55 --- /dev/null +++ b/include/linux/bio-crypt-ctx.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ +#ifndef __LINUX_BIO_CRYPT_CTX_H +#define __LINUX_BIO_CRYPT_CTX_H + +#include + +enum blk_crypto_mode_num { + BLK_ENCRYPTION_MODE_INVALID, + BLK_ENCRYPTION_MODE_AES_256_XTS, + BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, + BLK_ENCRYPTION_MODE_ADIANTUM, + BLK_ENCRYPTION_MODE_MAX, +}; + +#ifdef CONFIG_BLOCK +#include + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + +#define BLK_CRYPTO_MAX_KEY_SIZE 64 +#define BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE 128 + +/** + * struct blk_crypto_key - an inline encryption key + * @crypto_mode: encryption algorithm this key is for + * @data_unit_size: the data unit size for all encryption/decryptions with this + * key. This is the size in bytes of each individual plaintext and + * ciphertext. This is always a power of 2. It might be e.g. the + * filesystem block size or the disk sector size. + * @data_unit_size_bits: log2 of data_unit_size + * @size: size of this key in bytes (determined by @crypto_mode) + * @hash: hash of this key, for keyslot manager use only + * @raw: the raw bytes of this key. Only the first @size bytes are used. + * + * A blk_crypto_key is immutable once created, and many bios can reference it at + * the same time. It must not be freed until all bios using it have completed. + */ +struct blk_crypto_key { + enum blk_crypto_mode_num crypto_mode; + unsigned int data_unit_size; + unsigned int data_unit_size_bits; + unsigned int size; + unsigned int hash; + u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; +}; + +#define BLK_CRYPTO_MAX_IV_SIZE 32 +#define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE/sizeof(u64)) + +/** + * struct bio_crypt_ctx - an inline encryption context + * @bc_key: the key, algorithm, and data unit size to use + * @bc_keyslot: the keyslot that has been assigned for this key in @bc_ksm, + * or -1 if no keyslot has been assigned yet. + * @bc_dun: the data unit number (starting IV) to use + * @bc_ksm: the keyslot manager into which the key has been programmed with + * @bc_keyslot, or NULL if this key hasn't yet been programmed. + * + * A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for + * write requests) or decrypted (for read requests) inline by the storage device + * or controller, or by the crypto API fallback. + */ +struct bio_crypt_ctx { + const struct blk_crypto_key *bc_key; + int bc_keyslot; + + /* Data unit number */ + u64 bc_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + /* + * The keyslot manager where the key has been programmed + * with keyslot. + */ + struct keyslot_manager *bc_ksm; +}; + +int bio_crypt_ctx_init(void); + +struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask); + +void bio_crypt_free_ctx(struct bio *bio); + +static inline bool bio_has_crypt_ctx(struct bio *bio) +{ + return bio->bi_crypt_context; +} + +void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); + +static inline void bio_crypt_set_ctx(struct bio *bio, + const struct blk_crypto_key *key, + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], + gfp_t gfp_mask) +{ + struct bio_crypt_ctx *bc = bio_crypt_alloc_ctx(gfp_mask); + + bc->bc_key = key; + memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); + bc->bc_ksm = NULL; + bc->bc_keyslot = -1; + + bio->bi_crypt_context = bc; +} + +void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc); + +int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc, + struct keyslot_manager *ksm); + +struct request; +bool bio_crypt_should_process(struct request *rq); + +static inline bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, + unsigned int bytes, + u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) +{ + int i = 0; + unsigned int inc = bytes >> bc->bc_key->data_unit_size_bits; + + while (i < BLK_CRYPTO_DUN_ARRAY_SIZE) { + if (bc->bc_dun[i] + inc != next_dun[i]) + return false; + inc = ((bc->bc_dun[i] + inc) < inc); + i++; + } + + return true; +} + + +static inline void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], + unsigned int inc) +{ + int i = 0; + + while (inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE) { + dun[i] += inc; + inc = (dun[i] < inc); + i++; + } +} + +static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) +{ + struct bio_crypt_ctx *bc = bio->bi_crypt_context; + + if (!bc) + return; + + bio_crypt_dun_increment(bc->bc_dun, + bytes >> bc->bc_key->data_unit_size_bits); +} + +bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2); + +bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes, + struct bio *b_2); + +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ +static inline int bio_crypt_ctx_init(void) +{ + return 0; +} + +static inline bool bio_has_crypt_ctx(struct bio *bio) +{ + return false; +} + +static inline void bio_crypt_clone(struct bio *dst, struct bio *src, + gfp_t gfp_mask) { } + +static inline void bio_crypt_free_ctx(struct bio *bio) { } + +static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) { } + +static inline bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2) +{ + return true; +} + +static inline bool bio_crypt_ctx_mergeable(struct bio *b_1, + unsigned int b1_bytes, + struct bio *b_2) +{ + return true; +} + +#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ + +#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY) +static inline void bio_set_skip_dm_default_key(struct bio *bio) +{ + bio->bi_skip_dm_default_key = true; +} + +static inline bool bio_should_skip_dm_default_key(const struct bio *bio) +{ + return bio->bi_skip_dm_default_key; +} + +static inline void bio_clone_skip_dm_default_key(struct bio *dst, + const struct bio *src) +{ + dst->bi_skip_dm_default_key = src->bi_skip_dm_default_key; +} +#else /* CONFIG_DM_DEFAULT_KEY */ +static inline void bio_set_skip_dm_default_key(struct bio *bio) +{ +} + +static inline bool bio_should_skip_dm_default_key(const struct bio *bio) +{ + return false; +} + +static inline void bio_clone_skip_dm_default_key(struct bio *dst, + const struct bio *src) +{ +} +#endif /* !CONFIG_DM_DEFAULT_KEY */ + +#endif /* CONFIG_BLOCK */ + +#endif /* __LINUX_BIO_CRYPT_CTX_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index e260f000b9ac..2e08e3731376 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -22,6 +22,7 @@ #include #include #include +#include #ifdef CONFIG_BLOCK diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h new file mode 100644 index 000000000000..485cee0b92dd --- /dev/null +++ b/include/linux/blk-crypto.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef __LINUX_BLK_CRYPTO_H +#define __LINUX_BLK_CRYPTO_H + +#include + +#define SECTOR_SHIFT 9 + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + +int blk_crypto_submit_bio(struct bio **bio_ptr); + +bool blk_crypto_endio(struct bio *bio); + +int blk_crypto_init_key(struct blk_crypto_key *blk_key, + const u8 *raw_key, unsigned int raw_key_size, + enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size); + +int blk_crypto_evict_key(struct request_queue *q, + const struct blk_crypto_key *key); + +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ + +static inline int blk_crypto_submit_bio(struct bio **bio_ptr) +{ + return 0; +} + +static inline bool blk_crypto_endio(struct bio *bio) +{ + return true; +} + +#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK + +int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, + unsigned int data_unit_size, + struct request_queue *q); + +int blk_crypto_fallback_init(void); + +#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ + +static inline int +blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, + unsigned int data_unit_size, + struct request_queue *q) +{ + return 0; +} + +static inline int blk_crypto_fallback_init(void) +{ + return 0; +} + +#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ + +#endif /* __LINUX_BLK_CRYPTO_H */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 415811f0b24a..41b2e8a10fdb 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -17,6 +17,7 @@ struct block_device; struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); +struct bio_crypt_ctx; /* * Block error status values. See block/blk-core:blk_errors for the details. @@ -95,6 +96,14 @@ struct bio { struct blk_issue_stat bi_issue_stat; #endif #endif + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + struct bio_crypt_ctx *bi_crypt_context; +#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY) + bool bi_skip_dm_default_key; +#endif +#endif + union { #if defined(CONFIG_BLK_DEV_INTEGRITY) struct bio_integrity_payload *bi_integrity; /* data integrity */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 835a3cf3b47b..19c98d619b87 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -43,6 +43,7 @@ struct pr_ops; struct rq_wb; struct blk_queue_stats; struct blk_stat_callback; +struct keyslot_manager; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -545,6 +546,11 @@ struct request_queue { */ unsigned int request_fn_active; +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + /* Inline crypto capabilities */ + struct keyslot_manager *ksm; +#endif + unsigned int rq_timeout; int poll_nsec; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index be0eb0118992..52834cd273b4 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -321,6 +321,12 @@ struct dm_target { * on max_io_len boundary. */ bool split_discard_bios:1; + + /* + * Set if inline crypto capabilities from this target's underlying + * device(s) can be exposed via the device-mapper device. + */ + bool may_passthrough_inline_crypto:1; }; /* Each target can link one of these into the table */ diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 5977a6ced502..3a2971075432 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -65,6 +65,10 @@ struct fscrypt_operations { bool (*has_stable_inodes)(struct super_block *sb); void (*get_ino_and_lblk_bits)(struct super_block *sb, int *ino_bits_ret, int *lblk_bits_ret); + bool (*inline_crypt_enabled)(struct super_block *sb); + int (*get_num_devices)(struct super_block *sb); + void (*get_devices)(struct super_block *sb, + struct request_queue **devs); }; static inline bool fscrypt_has_encryption_key(const struct inode *inode) @@ -533,6 +537,74 @@ static inline const char *fscrypt_get_symlink(struct inode *inode, } #endif /* !CONFIG_FS_ENCRYPTION */ +/* inline_crypt.c */ +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT +extern bool fscrypt_inode_uses_inline_crypto(const struct inode *inode); + +extern bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode); + +extern void fscrypt_set_bio_crypt_ctx(struct bio *bio, + const struct inode *inode, + u64 first_lblk, gfp_t gfp_mask); + +extern void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask); + +extern bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, + u64 next_lblk); + +extern bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh); + +#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ +static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return false; +} + +static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) +{ + return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); +} + +static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio, + const struct inode *inode, + u64 first_lblk, gfp_t gfp_mask) { } + +static inline void fscrypt_set_bio_crypt_ctx_bh( + struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask) { } + +static inline bool fscrypt_mergeable_bio(struct bio *bio, + const struct inode *inode, + u64 next_lblk) +{ + return true; +} + +static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh) +{ + return true; +} +#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_DM_DEFAULT_KEY) +static inline bool +fscrypt_inode_should_skip_dm_default_key(const struct inode *inode) +{ + return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); +} +#else +static inline bool +fscrypt_inode_should_skip_dm_default_key(const struct inode *inode) +{ + return false; +} +#endif + /** * fscrypt_require_key - require an inode's encryption key * @inode: the inode we need the key for diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h new file mode 100644 index 000000000000..6d32a031218e --- /dev/null +++ b/include/linux/keyslot-manager.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef __LINUX_KEYSLOT_MANAGER_H +#define __LINUX_KEYSLOT_MANAGER_H + +#include + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + +struct keyslot_manager; + +/** + * struct keyslot_mgmt_ll_ops - functions to manage keyslots in hardware + * @keyslot_program: Program the specified key into the specified slot in the + * inline encryption hardware. + * @keyslot_evict: Evict key from the specified keyslot in the hardware. + * The key is provided so that e.g. dm layers can evict + * keys from the devices that they map over. + * Returns 0 on success, -errno otherwise. + * @derive_raw_secret: (Optional) Derive a software secret from a + * hardware-wrapped key. Returns 0 on success, -EOPNOTSUPP + * if unsupported on the hardware, or another -errno code. + * + * This structure should be provided by storage device drivers when they set up + * a keyslot manager - this structure holds the function ptrs that the keyslot + * manager will use to manipulate keyslots in the hardware. + */ +struct keyslot_mgmt_ll_ops { + int (*keyslot_program)(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot); + int (*keyslot_evict)(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot); + int (*derive_raw_secret)(struct keyslot_manager *ksm, + const u8 *wrapped_key, + unsigned int wrapped_key_size, + u8 *secret, unsigned int secret_size); +}; + +struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, + const struct keyslot_mgmt_ll_ops *ksm_ops, + const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], + void *ll_priv_data); + +int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm, + const struct blk_crypto_key *key); + +void keyslot_manager_get_slot(struct keyslot_manager *ksm, unsigned int slot); + +void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot); + +bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, + enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size); + +int keyslot_manager_evict_key(struct keyslot_manager *ksm, + const struct blk_crypto_key *key); + +void keyslot_manager_reprogram_all_keys(struct keyslot_manager *ksm); + +void *keyslot_manager_private(struct keyslot_manager *ksm); + +void keyslot_manager_destroy(struct keyslot_manager *ksm); + +struct keyslot_manager *keyslot_manager_create_passthrough( + const struct keyslot_mgmt_ll_ops *ksm_ops, + const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], + void *ll_priv_data); + +void keyslot_manager_intersect_modes(struct keyslot_manager *parent, + const struct keyslot_manager *child); + +int keyslot_manager_derive_raw_secret(struct keyslot_manager *ksm, + const u8 *wrapped_key, + unsigned int wrapped_key_size, + u8 *secret, unsigned int secret_size); + +#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ + +#endif /* __LINUX_KEYSLOT_MANAGER_H */ diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 393b5ae4e7d4..1b9cdb7a5c8f 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -126,6 +126,8 @@ struct fscrypt_add_key_arg { __u32 raw_size; __u32 key_id; __u32 __reserved[7]; + /* N.B.: "temporary" flag, not reserved upstream */ +#define __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001 __u32 __flags; __u8 raw[]; }; From b8722ec54a0aac3351c940f723356627e31a8327 Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Wed, 5 Aug 2020 00:02:12 +0530 Subject: [PATCH 035/141] Revert "Reverting crypto patches" This reverts commit b73e822d12ecbea7cad3742c46fd1be17aa141c8. This is reverted to integrate new file encryption framework support changes to ensure all fixes are present to use new encryption policies. Change-Id: I455ec66664064069ac34e6fe410bd28dc3a53d07 Signed-off-by: Neeraj Soni --- Documentation/filesystems/fscrypt.rst | 86 +++++-- block/blk-crypto-fallback.c | 50 ++-- block/blk-crypto-internal.h | 9 + block/blk-crypto.c | 55 ++++- block/keyslot-manager.c | 30 ++- drivers/md/dm-default-key.c | 29 ++- drivers/md/dm.c | 80 ++++++- drivers/scsi/ufs/ufshcd-crypto.c | 12 +- drivers/scsi/ufs/ufshcd-crypto.h | 8 + drivers/scsi/ufs/ufshcd.c | 2 +- drivers/scsi/ufs/ufshcd.h | 1 + fs/crypto/Kconfig | 22 +- fs/crypto/bio.c | 183 +++++++++++---- fs/crypto/crypto.c | 57 +---- fs/crypto/fname.c | 314 ++++++++++++++++++++------ fs/crypto/fscrypt_private.h | 87 +++---- fs/crypto/hkdf.c | 2 +- fs/crypto/hooks.c | 48 +++- fs/crypto/inline_crypt.c | 74 ++++-- fs/crypto/keyring.c | 22 +- fs/crypto/keysetup.c | 141 +++++++----- fs/crypto/keysetup_v1.c | 21 +- fs/crypto/policy.c | 191 +++++++++++----- fs/ext4/Kconfig | 1 + fs/ext4/dir.c | 9 +- fs/ext4/ioctl.c | 6 + fs/ext4/namei.c | 1 + fs/ext4/super.c | 5 - fs/f2fs/Kconfig | 1 + fs/f2fs/dir.c | 76 ++++--- fs/f2fs/f2fs.h | 14 +- fs/f2fs/file.c | 11 + fs/f2fs/hash.c | 25 +- fs/f2fs/inline.c | 9 +- fs/f2fs/namei.c | 1 + fs/f2fs/super.c | 7 - fs/inode.c | 3 +- fs/libfs.c | 50 ++++ fs/ubifs/Kconfig | 1 + fs/ubifs/dir.c | 20 +- include/linux/bio-crypt-ctx.h | 3 + include/linux/blk-crypto.h | 18 +- include/linux/fs.h | 2 + include/linux/fscrypt.h | 134 +++++------ include/linux/keyslot-manager.h | 14 +- include/uapi/linux/fscrypt.h | 2 + 46 files changed, 1324 insertions(+), 613 deletions(-) diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index 471a511c7508..dc444b8d3704 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -234,8 +234,8 @@ HKDF is more flexible, is nonreversible, and evenly distributes entropy from the master key. HKDF is also standardized and widely used by other software, whereas the AES-128-ECB based KDF is ad-hoc. -Per-file keys -------------- +Per-file encryption keys +------------------------ Since each master key can protect many files, it is necessary to "tweak" the encryption of each file so that the same plaintext in two @@ -268,9 +268,9 @@ is greater than that of an AES-256-XTS key. Therefore, to improve performance and save memory, for Adiantum a "direct key" configuration is supported. When the user has enabled this by setting FSCRYPT_POLICY_FLAG_DIRECT_KEY in the fscrypt policy, -per-file keys are not used. Instead, whenever any data (contents or -filenames) is encrypted, the file's 16-byte nonce is included in the -IV. Moreover: +per-file encryption keys are not used. Instead, whenever any data +(contents or filenames) is encrypted, the file's 16-byte nonce is +included in the IV. Moreover: - For v1 encryption policies, the encryption is done directly with the master key. Because of this, users **must not** use the same master @@ -302,6 +302,16 @@ For master keys used for v2 encryption policies, a unique 16-byte "key identifier" is also derived using the KDF. This value is stored in the clear, since it is needed to reliably identify the key itself. +Dirhash keys +------------ + +For directories that are indexed using a secret-keyed dirhash over the +plaintext filenames, the KDF is also used to derive a 128-bit +SipHash-2-4 key per directory in order to hash filenames. This works +just like deriving a per-file encryption key, except that a different +KDF context is used. Currently, only casefolded ("case-insensitive") +encrypted directories use this style of hashing. + Encryption modes and usage ========================== @@ -325,11 +335,11 @@ used. Adiantum is a (primarily) stream cipher-based mode that is fast even on CPUs without dedicated crypto instructions. It's also a true wide-block mode, unlike XTS. It can also eliminate the need to derive -per-file keys. However, it depends on the security of two primitives, -XChaCha12 and AES-256, rather than just one. See the paper -"Adiantum: length-preserving encryption for entry-level processors" -(https://eprint.iacr.org/2018/720.pdf) for more details. To use -Adiantum, CONFIG_CRYPTO_ADIANTUM must be enabled. Also, fast +per-file encryption keys. However, it depends on the security of two +primitives, XChaCha12 and AES-256, rather than just one. See the +paper "Adiantum: length-preserving encryption for entry-level +processors" (https://eprint.iacr.org/2018/720.pdf) for more details. +To use Adiantum, CONFIG_CRYPTO_ADIANTUM must be enabled. Also, fast implementations of ChaCha and NHPoly1305 should be enabled, e.g. CONFIG_CRYPTO_CHACHA20_NEON and CONFIG_CRYPTO_NHPOLY1305_NEON for ARM. @@ -513,7 +523,9 @@ FS_IOC_SET_ENCRYPTION_POLICY can fail with the following errors: - ``EEXIST``: the file is already encrypted with an encryption policy different from the one specified - ``EINVAL``: an invalid encryption policy was specified (invalid - version, mode(s), or flags; or reserved bits were set) + version, mode(s), or flags; or reserved bits were set); or a v1 + encryption policy was specified but the directory has the casefold + flag enabled (casefolding is incompatible with v1 policies). - ``ENOKEY``: a v2 encryption policy was specified, but the key with the specified ``master_key_identifier`` has not been added, nor does the process have the CAP_FOWNER capability in the initial user @@ -621,6 +633,17 @@ from a passphrase or other low-entropy user credential. FS_IOC_GET_ENCRYPTION_PWSALT is deprecated. Instead, prefer to generate and manage any needed salt(s) in userspace. +Getting a file's encryption nonce +--------------------------------- + +Since Linux v5.7, the ioctl FS_IOC_GET_ENCRYPTION_NONCE is supported. +On encrypted files and directories it gets the inode's 16-byte nonce. +On unencrypted files and directories, it fails with ENODATA. + +This ioctl can be useful for automated tests which verify that the +encryption is being done correctly. It is not needed for normal use +of fscrypt. + Adding keys ----------- @@ -638,7 +661,8 @@ follows:: struct fscrypt_add_key_arg { struct fscrypt_key_specifier key_spec; __u32 raw_size; - __u32 __reserved[9]; + __u32 key_id; + __u32 __reserved[8]; __u8 raw[]; }; @@ -655,6 +679,12 @@ follows:: } u; }; + struct fscrypt_provisioning_key_payload { + __u32 type; + __u32 __reserved; + __u8 raw[]; + }; + :c:type:`struct fscrypt_add_key_arg` must be zeroed, then initialized as follows: @@ -677,9 +707,26 @@ as follows: ``Documentation/security/keys/core.rst``). - ``raw_size`` must be the size of the ``raw`` key provided, in bytes. + Alternatively, if ``key_id`` is nonzero, this field must be 0, since + in that case the size is implied by the specified Linux keyring key. + +- ``key_id`` is 0 if the raw key is given directly in the ``raw`` + field. Otherwise ``key_id`` is the ID of a Linux keyring key of + type "fscrypt-provisioning" whose payload is a :c:type:`struct + fscrypt_provisioning_key_payload` whose ``raw`` field contains the + raw key and whose ``type`` field matches ``key_spec.type``. Since + ``raw`` is variable-length, the total size of this key's payload + must be ``sizeof(struct fscrypt_provisioning_key_payload)`` plus the + raw key size. The process must have Search permission on this key. + + Most users should leave this 0 and specify the raw key directly. + The support for specifying a Linux keyring key is intended mainly to + allow re-adding keys after a filesystem is unmounted and re-mounted, + without having to store the raw keys in userspace memory. - ``raw`` is a variable-length field which must contain the actual - key, ``raw_size`` bytes long. + key, ``raw_size`` bytes long. Alternatively, if ``key_id`` is + nonzero, then this field is unused. For v2 policy keys, the kernel keeps track of which user (identified by effective user ID) added the key, and only allows the key to be @@ -701,11 +748,16 @@ FS_IOC_ADD_ENCRYPTION_KEY can fail with the following errors: - ``EACCES``: FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR was specified, but the caller does not have the CAP_SYS_ADMIN capability in the initial - user namespace + user namespace; or the raw key was specified by Linux key ID but the + process lacks Search permission on the key. - ``EDQUOT``: the key quota for this user would be exceeded by adding the key - ``EINVAL``: invalid key size or key specifier type, or reserved bits were set +- ``EKEYREJECTED``: the raw key was specified by Linux key ID, but the + key has the wrong type +- ``ENOKEY``: the raw key was specified by Linux key ID, but no key + exists with that ID - ``ENOTTY``: this type of filesystem does not implement encryption - ``EOPNOTSUPP``: the kernel was not configured with encryption support for this filesystem, or the filesystem superblock has not @@ -1108,8 +1160,8 @@ The context structs contain the same information as the corresponding policy structs (see `Setting an encryption policy`_), except that the context structs also contain a nonce. The nonce is randomly generated by the kernel and is used as KDF input or as a tweak to cause -different files to be encrypted differently; see `Per-file keys`_ and -`DIRECT_KEY policies`_. +different files to be encrypted differently; see `Per-file encryption +keys`_ and `DIRECT_KEY policies`_. Data path changes ----------------- @@ -1161,7 +1213,7 @@ filesystem-specific hash(es) needed for directory lookups. This allows the filesystem to still, with a high degree of confidence, map the filename given in ->lookup() back to a particular directory entry that was previously listed by readdir(). See :c:type:`struct -fscrypt_digested_name` in the source for more details. +fscrypt_nokey_name` in the source for more details. Note that the precise way that filenames are presented to userspace without the key is subject to change in the future. It is only meant diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index cce3317cba80..ad83e1077ba3 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -487,21 +487,13 @@ out: return false; } -/** - * blk_crypto_start_using_mode() - Start using a crypto algorithm on a device - * @mode_num: the blk_crypto_mode we want to allocate ciphers for. - * @data_unit_size: the data unit size that will be used - * @q: the request queue for the device - * - * Upper layers must call this function to ensure that a the crypto API fallback - * has transforms for this algorithm, if they become necessary. - * - * Return: 0 on success and -err on error. +/* + * Prepare blk-crypto-fallback for the specified crypto mode. + * Returns -ENOPKG if the needed crypto API support is missing. */ -int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, - unsigned int data_unit_size, - struct request_queue *q) +int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) { + const char *cipher_str = blk_crypto_modes[mode_num].cipher_str; struct blk_crypto_keyslot *slotp; unsigned int i; int err = 0; @@ -514,25 +506,20 @@ int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, if (likely(smp_load_acquire(&tfms_inited[mode_num]))) return 0; - /* - * If the keyslot manager of the request queue supports this - * crypto mode, then we don't need to allocate this mode. - */ - if (keyslot_manager_crypto_mode_supported(q->ksm, mode_num, - data_unit_size)) - return 0; - mutex_lock(&tfms_init_lock); if (likely(tfms_inited[mode_num])) goto out; for (i = 0; i < blk_crypto_num_keyslots; i++) { slotp = &blk_crypto_keyslots[i]; - slotp->tfms[mode_num] = crypto_alloc_skcipher( - blk_crypto_modes[mode_num].cipher_str, - 0, 0); + slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0); if (IS_ERR(slotp->tfms[mode_num])) { err = PTR_ERR(slotp->tfms[mode_num]); + if (err == -ENOENT) { + pr_warn_once("Missing crypto API support for \"%s\"\n", + cipher_str); + err = -ENOPKG; + } slotp->tfms[mode_num] = NULL; goto out_free_tfms; } @@ -558,7 +545,6 @@ out: mutex_unlock(&tfms_init_lock); return err; } -EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode); int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) { @@ -571,6 +557,12 @@ int blk_crypto_fallback_submit_bio(struct bio **bio_ptr) struct bio_crypt_ctx *bc = bio->bi_crypt_context; struct bio_fallback_crypt_ctx *f_ctx; + if (bc->bc_key->is_hw_wrapped) { + pr_warn_once("HW wrapped key cannot be used with fallback.\n"); + bio->bi_status = BLK_STS_NOTSUPP; + return -EOPNOTSUPP; + } + if (!tfms_inited[bc->bc_key->crypto_mode]) { bio->bi_status = BLK_STS_IOERR; return -EIO; @@ -608,9 +600,11 @@ int __init blk_crypto_fallback_init(void) crypto_mode_supported[i] = 0xFFFFFFFF; crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; - blk_crypto_ksm = keyslot_manager_create(blk_crypto_num_keyslots, - &blk_crypto_ksm_ll_ops, - crypto_mode_supported, NULL); + blk_crypto_ksm = keyslot_manager_create( + NULL, blk_crypto_num_keyslots, + &blk_crypto_ksm_ll_ops, + BLK_CRYPTO_FEATURE_STANDARD_KEYS, + crypto_mode_supported, NULL); if (!blk_crypto_ksm) return -ENOMEM; diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h index 40d826b743da..4da998c803f2 100644 --- a/block/blk-crypto-internal.h +++ b/block/blk-crypto-internal.h @@ -19,6 +19,8 @@ extern const struct blk_crypto_mode blk_crypto_modes[]; #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK +int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num); + int blk_crypto_fallback_submit_bio(struct bio **bio_ptr); bool blk_crypto_queue_decrypt_bio(struct bio *bio); @@ -29,6 +31,13 @@ bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc); #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ +static inline int +blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) +{ + pr_warn_once("crypto API fallback is disabled\n"); + return -ENOPKG; +} + static inline bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc) { return false; diff --git a/block/blk-crypto.c b/block/blk-crypto.c index a8de0d9680e0..f56bbec1132f 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -109,7 +109,8 @@ int blk_crypto_submit_bio(struct bio **bio_ptr) /* Get device keyslot if supported */ if (keyslot_manager_crypto_mode_supported(q->ksm, bc->bc_key->crypto_mode, - bc->bc_key->data_unit_size)) { + bc->bc_key->data_unit_size, + bc->bc_key->is_hw_wrapped)) { err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm); if (!err) return 0; @@ -175,7 +176,9 @@ bool blk_crypto_endio(struct bio *bio) * @raw_key_size: Size of raw key. Must be at least the required size for the * chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed * to be longer than the mode's actual key size, in order to - * support inline encryption hardware that accepts wrapped keys.) + * support inline encryption hardware that accepts wrapped keys. + * @is_hw_wrapped has to be set for such keys) + * @is_hw_wrapped: Denotes @raw_key is wrapped. * @crypto_mode: identifier for the encryption algorithm to use * @data_unit_size: the data unit size to use for en/decryption * @@ -184,6 +187,7 @@ bool blk_crypto_endio(struct bio *bio) */ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, unsigned int raw_key_size, + bool is_hw_wrapped, enum blk_crypto_mode_num crypto_mode, unsigned int data_unit_size) { @@ -198,9 +202,14 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE); mode = &blk_crypto_modes[crypto_mode]; - if (raw_key_size < mode->keysize || - raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE) - return -EINVAL; + if (is_hw_wrapped) { + if (raw_key_size < mode->keysize || + raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE) + return -EINVAL; + } else { + if (raw_key_size != mode->keysize) + return -EINVAL; + } if (!is_power_of_2(data_unit_size)) return -EINVAL; @@ -209,6 +218,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, blk_key->data_unit_size = data_unit_size; blk_key->data_unit_size_bits = ilog2(data_unit_size); blk_key->size = raw_key_size; + blk_key->is_hw_wrapped = is_hw_wrapped; memcpy(blk_key->raw, raw_key, raw_key_size); /* @@ -223,6 +233,38 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, } EXPORT_SYMBOL_GPL(blk_crypto_init_key); +/** + * blk_crypto_start_using_mode() - Start using blk-crypto on a device + * @crypto_mode: the crypto mode that will be used + * @data_unit_size: the data unit size that will be used + * @is_hw_wrapped_key: whether the key will be hardware-wrapped + * @q: the request queue for the device + * + * Upper layers must call this function to ensure that either the hardware + * supports the needed crypto settings, or the crypto API fallback has + * transforms for the needed mode allocated and ready to go. + * + * Return: 0 on success; -ENOPKG if the hardware doesn't support the crypto + * settings and blk-crypto-fallback is either disabled or the needed + * algorithm is disabled in the crypto API; or another -errno code. + */ +int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size, + bool is_hw_wrapped_key, + struct request_queue *q) +{ + if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode, + data_unit_size, + is_hw_wrapped_key)) + return 0; + if (is_hw_wrapped_key) { + pr_warn_once("hardware doesn't support wrapped keys\n"); + return -EOPNOTSUPP; + } + return blk_crypto_fallback_start_using_mode(crypto_mode); +} +EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode); + /** * blk_crypto_evict_key() - Evict a key from any inline encryption hardware * it may have been programmed into @@ -243,7 +285,8 @@ int blk_crypto_evict_key(struct request_queue *q, { if (q->ksm && keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode, - key->data_unit_size)) + key->data_unit_size, + key->is_hw_wrapped)) return keyslot_manager_evict_key(q->ksm, key); return blk_crypto_fallback_evict_key(key); diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c index 7e42813c9de0..1999c503b954 100644 --- a/block/keyslot-manager.c +++ b/block/keyslot-manager.c @@ -43,6 +43,7 @@ struct keyslot { struct keyslot_manager { unsigned int num_slots; struct keyslot_mgmt_ll_ops ksm_ll_ops; + unsigned int features; unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX]; void *ll_priv_data; @@ -78,6 +79,8 @@ static inline bool keyslot_manager_is_passthrough(struct keyslot_manager *ksm) * @ksm_ll_ops: The struct keyslot_mgmt_ll_ops for the device that this keyslot * manager will use to perform operations like programming and * evicting keys. + * @features: The supported features as a bitmask of BLK_CRYPTO_FEATURE_* flags. + * Most drivers should set BLK_CRYPTO_FEATURE_STANDARD_KEYS here. * @crypto_mode_supported: Array of size BLK_ENCRYPTION_MODE_MAX of * bitmasks that represents whether a crypto mode * and data unit size are supported. The i'th bit @@ -95,6 +98,7 @@ static inline bool keyslot_manager_is_passthrough(struct keyslot_manager *ksm) */ struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, const struct keyslot_mgmt_ll_ops *ksm_ll_ops, + unsigned int features, const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data) { @@ -116,6 +120,7 @@ struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, ksm->num_slots = num_slots; ksm->ksm_ll_ops = *ksm_ll_ops; + ksm->features = features; memcpy(ksm->crypto_mode_supported, crypto_mode_supported, sizeof(ksm->crypto_mode_supported)); ksm->ll_priv_data = ll_priv_data; @@ -321,23 +326,24 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot) } /** - * keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode/data - * unit size combination is supported - * by a ksm. + * keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode / + * data unit size / is_hw_wrapped_key + * combination is supported by a ksm. * @ksm: The keyslot manager to check * @crypto_mode: The crypto mode to check for. * @data_unit_size: The data_unit_size for the mode. + * @is_hw_wrapped_key: Whether a hardware-wrapped key will be used. * * Calls and returns the result of the crypto_mode_supported function specified * by the ksm. * * Context: Process context. - * Return: Whether or not this ksm supports the specified crypto_mode/ - * data_unit_size combo. + * Return: Whether or not this ksm supports the specified crypto settings. */ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size) + unsigned int data_unit_size, + bool is_hw_wrapped_key) { if (!ksm) return false; @@ -345,6 +351,13 @@ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, return false; if (WARN_ON(!is_power_of_2(data_unit_size))) return false; + if (is_hw_wrapped_key) { + if (!(ksm->features & BLK_CRYPTO_FEATURE_WRAPPED_KEYS)) + return false; + } else { + if (!(ksm->features & BLK_CRYPTO_FEATURE_STANDARD_KEYS)) + return false; + } return ksm->crypto_mode_supported[crypto_mode] & data_unit_size; } @@ -457,6 +470,7 @@ EXPORT_SYMBOL_GPL(keyslot_manager_destroy); /** * keyslot_manager_create_passthrough() - Create a passthrough keyslot manager * @ksm_ll_ops: The struct keyslot_mgmt_ll_ops + * @features: Bitmask of BLK_CRYPTO_FEATURE_* flags * @crypto_mode_supported: Bitmasks for supported encryption modes * @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops. * @@ -473,6 +487,7 @@ EXPORT_SYMBOL_GPL(keyslot_manager_destroy); */ struct keyslot_manager *keyslot_manager_create_passthrough( const struct keyslot_mgmt_ll_ops *ksm_ll_ops, + unsigned int features, const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data) { @@ -483,6 +498,7 @@ struct keyslot_manager *keyslot_manager_create_passthrough( return NULL; ksm->ksm_ll_ops = *ksm_ll_ops; + ksm->features = features; memcpy(ksm->crypto_mode_supported, crypto_mode_supported, sizeof(ksm->crypto_mode_supported)); ksm->ll_priv_data = ll_priv_data; @@ -510,11 +526,13 @@ void keyslot_manager_intersect_modes(struct keyslot_manager *parent, if (child) { unsigned int i; + parent->features &= child->features; for (i = 0; i < ARRAY_SIZE(child->crypto_mode_supported); i++) { parent->crypto_mode_supported[i] &= child->crypto_mode_supported[i]; } } else { + parent->features = 0; memset(parent->crypto_mode_supported, 0, sizeof(parent->crypto_mode_supported)); } diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 43a30c076aa6..3d0bd0645f7a 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -9,7 +9,7 @@ #define DM_MSG_PREFIX "default-key" -#define DM_DEFAULT_KEY_MAX_KEY_SIZE 64 +#define DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE 128 #define SECTOR_SIZE (1 << SECTOR_SHIFT) @@ -49,6 +49,7 @@ struct default_key_c { unsigned int sector_size; unsigned int sector_bits; struct blk_crypto_key key; + bool is_hw_wrapped; }; static const struct dm_default_key_cipher * @@ -84,7 +85,7 @@ static int default_key_ctr_optional(struct dm_target *ti, struct default_key_c *dkc = ti->private; struct dm_arg_set as; static const struct dm_arg _args[] = { - {0, 3, "Invalid number of feature args"}, + {0, 4, "Invalid number of feature args"}, }; unsigned int opt_params; const char *opt_string; @@ -117,6 +118,8 @@ static int default_key_ctr_optional(struct dm_target *ti, } } else if (!strcmp(opt_string, "iv_large_sectors")) { iv_large_sectors = true; + } else if (!strcmp(opt_string, "wrappedkey_v0")) { + dkc->is_hw_wrapped = true; } else { ti->error = "Invalid feature arguments"; return -EINVAL; @@ -144,7 +147,8 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct default_key_c *dkc; const struct dm_default_key_cipher *cipher; - u8 raw_key[DM_DEFAULT_KEY_MAX_KEY_SIZE]; + u8 raw_key[DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE]; + unsigned int raw_key_size; unsigned long long tmpll; char dummy; int err; @@ -176,12 +180,15 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) } /* */ - if (strlen(argv[1]) != 2 * cipher->key_size) { - ti->error = "Incorrect key size for cipher"; + raw_key_size = strlen(argv[1]); + if (raw_key_size > 2 * DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE || + raw_key_size % 2) { + ti->error = "Invalid keysize"; err = -EINVAL; goto bad; } - if (hex2bin(raw_key, argv[1], cipher->key_size) != 0) { + raw_key_size /= 2; + if (hex2bin(raw_key, argv[1], raw_key_size) != 0) { ti->error = "Malformed key string"; err = -EINVAL; goto bad; @@ -226,13 +233,15 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) } err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size, - cipher->mode_num, dkc->sector_size); + dkc->is_hw_wrapped, cipher->mode_num, + dkc->sector_size); if (err) { ti->error = "Error initializing blk-crypto key"; goto bad; } err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size, + dkc->is_hw_wrapped, dkc->dev->bdev->bd_queue); if (err) { ti->error = "Error starting to use blk-crypto"; @@ -319,6 +328,8 @@ static void default_key_status(struct dm_target *ti, status_type_t type, num_feature_args += !!ti->num_discard_bios; if (dkc->sector_size != SECTOR_SIZE) num_feature_args += 2; + if (dkc->is_hw_wrapped) + num_feature_args += 1; if (num_feature_args != 0) { DMEMIT(" %d", num_feature_args); if (ti->num_discard_bios) @@ -327,6 +338,8 @@ static void default_key_status(struct dm_target *ti, status_type_t type, DMEMIT(" sector_size:%u", dkc->sector_size); DMEMIT(" iv_large_sectors"); } + if (dkc->is_hw_wrapped) + DMEMIT(" wrappedkey_v0"); } break; } @@ -372,7 +385,7 @@ static void default_key_io_hints(struct dm_target *ti, static struct target_type default_key_target = { .name = "default-key", - .version = {2, 0, 0}, + .version = {2, 1, 0}, .module = THIS_MODULE, .ctr = default_key_ctr, .dtr = default_key_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0189f70e87a0..bb78417a249b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2088,21 +2088,97 @@ static int dm_keyslot_evict(struct keyslot_manager *ksm, return args.err; } +struct dm_derive_raw_secret_args { + const u8 *wrapped_key; + unsigned int wrapped_key_size; + u8 *secret; + unsigned int secret_size; + int err; +}; + +static int dm_derive_raw_secret_callback(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct dm_derive_raw_secret_args *args = data; + struct request_queue *q = dev->bdev->bd_queue; + + if (!args->err) + return 0; + + if (!q->ksm) { + args->err = -EOPNOTSUPP; + return 0; + } + + args->err = keyslot_manager_derive_raw_secret(q->ksm, args->wrapped_key, + args->wrapped_key_size, + args->secret, + args->secret_size); + /* Try another device in case this fails. */ + return 0; +} + +/* + * Retrieve the raw_secret from the underlying device. Given that + * only only one raw_secret can exist for a particular wrappedkey, + * retrieve it only from the first device that supports derive_raw_secret() + */ +static int dm_derive_raw_secret(struct keyslot_manager *ksm, + const u8 *wrapped_key, + unsigned int wrapped_key_size, + u8 *secret, unsigned int secret_size) +{ + struct mapped_device *md = keyslot_manager_private(ksm); + struct dm_derive_raw_secret_args args = { + .wrapped_key = wrapped_key, + .wrapped_key_size = wrapped_key_size, + .secret = secret, + .secret_size = secret_size, + .err = -EOPNOTSUPP, + }; + struct dm_table *t; + int srcu_idx; + int i; + struct dm_target *ti; + + t = dm_get_live_table(md, &srcu_idx); + if (!t) + return -EOPNOTSUPP; + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + if (!ti->type->iterate_devices) + continue; + ti->type->iterate_devices(ti, dm_derive_raw_secret_callback, + &args); + if (!args.err) + break; + } + dm_put_live_table(md, srcu_idx); + return args.err; +} + static struct keyslot_mgmt_ll_ops dm_ksm_ll_ops = { .keyslot_evict = dm_keyslot_evict, + .derive_raw_secret = dm_derive_raw_secret, }; static int dm_init_inline_encryption(struct mapped_device *md) { + unsigned int features; unsigned int mode_masks[BLK_ENCRYPTION_MODE_MAX]; /* - * Start out with all crypto mode support bits set. Any unsupported - * bits will be cleared later when calculating the device restrictions. + * Initially declare support for all crypto settings. Anything + * unsupported by a child device will be removed later when calculating + * the device restrictions. */ + features = BLK_CRYPTO_FEATURE_STANDARD_KEYS | + BLK_CRYPTO_FEATURE_WRAPPED_KEYS; memset(mode_masks, 0xFF, sizeof(mode_masks)); md->queue->ksm = keyslot_manager_create_passthrough(&dm_ksm_ll_ops, + features, mode_masks, md); if (!md->queue->ksm) return -ENOMEM; diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c index 276b49ad13be..28abedfbf609 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.c +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -337,7 +337,9 @@ int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba, ufshcd_clear_all_keyslots(hba); - hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), ksm_ops, + hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), + ksm_ops, + BLK_CRYPTO_FEATURE_STANDARD_KEYS, crypto_modes_supported, hba); if (!hba->ksm) { @@ -458,6 +460,14 @@ int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, return ufshcd_prepare_lrbp_crypto_spec(hba, cmd, lrbp); } +int ufshcd_map_sg_crypto(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + if (hba->crypto_vops && hba->crypto_vops->map_sg_crypto) + return hba->crypto_vops->map_sg_crypto(hba, lrbp); + + return 0; +} + int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba, struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp) diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h index 95f37c9f7672..f223a06fbf93 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.h +++ b/drivers/scsi/ufs/ufshcd-crypto.h @@ -80,6 +80,8 @@ int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); +int ufshcd_map_sg_crypto(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); + int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba, struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); @@ -133,6 +135,12 @@ static inline int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, return 0; } +static inline int ufshcd_map_sg_crypto(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + return 0; +} + static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp) { return false; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 55f2f1645c1f..abedcca0e793 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3321,7 +3321,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) lrbp->utr_descriptor_ptr->prd_table_length = 0; } - return 0; + return ufshcd_map_sg_crypto(hba, lrbp); } /** diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 790e2be33995..7621eaf51404 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -417,6 +417,7 @@ struct ufs_hba_crypto_variant_ops { int (*prepare_lrbp_crypto)(struct ufs_hba *hba, struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); + int (*map_sg_crypto)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); int (*complete_lrbp_crypto)(struct ufs_hba *hba, struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index 0701bb90f99c..97c0a113f4cc 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig @@ -1,13 +1,8 @@ config FS_ENCRYPTION bool "FS Encryption (Per-file encryption)" select CRYPTO - select CRYPTO_AES - select CRYPTO_CBC - select CRYPTO_ECB - select CRYPTO_XTS - select CRYPTO_CTS - select CRYPTO_SHA512 - select CRYPTO_HMAC + select CRYPTO_HASH + select CRYPTO_BLKCIPHER select KEYS help Enable encryption of files and directories. This @@ -16,6 +11,19 @@ config FS_ENCRYPTION decrypted pages in the page cache. Currently Ext4, F2FS and UBIFS make use of this feature. +# Filesystems supporting encryption must select this if FS_ENCRYPTION. This +# allows the algorithms to be built as modules when all the filesystems are. +config FS_ENCRYPTION_ALGS + tristate + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_CTS + select CRYPTO_ECB + select CRYPTO_HMAC + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_XTS + config FS_ENCRYPTION_INLINE_CRYPT bool "Enable fscrypt to use inline crypto" depends on FS_ENCRYPTION && BLK_INLINE_ENCRYPTION diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 9601e4bfc004..aa36d245f548 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -41,63 +41,154 @@ void fscrypt_decrypt_bio(struct bio *bio) } EXPORT_SYMBOL(fscrypt_decrypt_bio); -int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, - sector_t pblk, unsigned int len) +static int fscrypt_zeroout_range_inlinecrypt(const struct inode *inode, + pgoff_t lblk, + sector_t pblk, unsigned int len) { const unsigned int blockbits = inode->i_blkbits; - const unsigned int blocksize = 1 << blockbits; - const bool inlinecrypt = fscrypt_inode_uses_inline_crypto(inode); - struct page *ciphertext_page; + const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits; + const unsigned int blocks_per_page = 1 << blocks_per_page_bits; + unsigned int i; struct bio *bio; - int ret, err = 0; + int ret, err; - if (inlinecrypt) { - ciphertext_page = ZERO_PAGE(0); - } else { - ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT); - if (!ciphertext_page) - return -ENOMEM; - } - - while (len--) { - if (!inlinecrypt) { - err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, - ZERO_PAGE(0), ciphertext_page, - blocksize, 0, GFP_NOFS); - if (err) - goto errout; - } - - bio = bio_alloc(GFP_NOWAIT, 1); - if (!bio) { - err = -ENOMEM; - goto errout; - } - fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOIO); + /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ + bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); + do { bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - 9); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - ret = bio_add_page(bio, ciphertext_page, blocksize, 0); - if (WARN_ON(ret != blocksize)) { - /* should never happen! */ - bio_put(bio); - err = -EIO; - goto errout; - } + fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); + + i = 0; + do { + unsigned int blocks_this_page = + min(len, blocks_per_page); + unsigned int bytes_this_page = + blocks_this_page << blockbits; + + ret = bio_add_page(bio, ZERO_PAGE(0), + bytes_this_page, 0); + if (WARN_ON(ret != bytes_this_page)) { + err = -EIO; + goto out; + } + lblk += blocks_this_page; + pblk += blocks_this_page; + len -= blocks_this_page; + } while (++i != BIO_MAX_PAGES && len != 0); + err = submit_bio_wait(bio); - if (err == 0 && bio->bi_status) - err = -EIO; - bio_put(bio); if (err) - goto errout; - lblk++; - pblk++; - } + goto out; + bio_reset(bio); + } while (len != 0); err = 0; -errout: - if (!inlinecrypt) - fscrypt_free_bounce_page(ciphertext_page); +out: + bio_put(bio); + return err; +} + +/** + * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file + * @inode: the file's inode + * @lblk: the first file logical block to zero out + * @pblk: the first filesystem physical block to zero out + * @len: number of blocks to zero out + * + * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write + * ciphertext blocks which decrypt to the all-zeroes block. The blocks must be + * both logically and physically contiguous. It's also assumed that the + * filesystem only uses a single block device, ->s_bdev. + * + * Note that since each block uses a different IV, this involves writing a + * different ciphertext to each block; we can't simply reuse the same one. + * + * Return: 0 on success; -errno on failure. + */ +int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, + sector_t pblk, unsigned int len) +{ + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocksize = 1 << blockbits; + const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits; + const unsigned int blocks_per_page = 1 << blocks_per_page_bits; + struct page *pages[16]; /* write up to 16 pages at a time */ + unsigned int nr_pages; + unsigned int i; + unsigned int offset; + struct bio *bio; + int ret, err; + + if (len == 0) + return 0; + + if (fscrypt_inode_uses_inline_crypto(inode)) + return fscrypt_zeroout_range_inlinecrypt(inode, lblk, pblk, + len); + + BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_PAGES); + nr_pages = min_t(unsigned int, ARRAY_SIZE(pages), + (len + blocks_per_page - 1) >> blocks_per_page_bits); + + /* + * We need at least one page for ciphertext. Allocate the first one + * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail. + * + * Any additional page allocations are allowed to fail, as they only + * help performance, and waiting on the mempool for them could deadlock. + */ + for (i = 0; i < nr_pages; i++) { + pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS : + GFP_NOWAIT | __GFP_NOWARN); + if (!pages[i]) + break; + } + nr_pages = i; + if (WARN_ON(nr_pages <= 0)) + return -EINVAL; + + /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ + bio = bio_alloc(GFP_NOFS, nr_pages); + + do { + bio_set_dev(bio, inode->i_sb->s_bdev); + bio->bi_iter.bi_sector = pblk << (blockbits - 9); + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); + + i = 0; + offset = 0; + do { + err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, + ZERO_PAGE(0), pages[i], + blocksize, offset, GFP_NOFS); + if (err) + goto out; + lblk++; + pblk++; + len--; + offset += blocksize; + if (offset == PAGE_SIZE || len == 0) { + ret = bio_add_page(bio, pages[i++], offset, 0); + if (WARN_ON(ret != offset)) { + err = -EIO; + goto out; + } + offset = 0; + } + } while (i != nr_pages && len != 0); + + err = submit_bio_wait(bio); + if (err) + goto out; + bio_reset(bio); + } while (len != 0); + err = 0; +out: + bio_put(bio); + for (i = 0; i < nr_pages; i++) + fscrypt_free_bounce_page(pages[i]); return err; } EXPORT_SYMBOL(fscrypt_zeroout_range); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 41b4fe15b4b6..ed6ea28dbdad 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -24,8 +24,6 @@ #include #include #include -#include -#include #include #include "fscrypt_private.h" @@ -139,7 +137,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, * multiple of the filesystem's block size. * @offs: Byte offset within @page of the first block to encrypt. Must be * a multiple of the filesystem's block size. - * @gfp_flags: Memory allocation flags + * @gfp_flags: Memory allocation flags. See details below. * * A new bounce page is allocated, and the specified block(s) are encrypted into * it. In the bounce page, the ciphertext block(s) will be located at the same @@ -149,6 +147,11 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, * * This is for use by the filesystem's ->writepages() method. * + * The bounce page allocation is mempool-backed, so it will always succeed when + * @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS. However, + * only the first page of each bio can be allocated this way. To prevent + * deadlocks, for any additional pages a mask like GFP_NOWAIT must be used. + * * Return: the new encrypted bounce page on success; an ERR_PTR() on failure */ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, @@ -285,54 +288,6 @@ int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, } EXPORT_SYMBOL(fscrypt_decrypt_block_inplace); -/* - * Validate dentries in encrypted directories to make sure we aren't potentially - * caching stale dentries after a key has been added. - */ -static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) -{ - struct dentry *dir; - int err; - int valid; - - /* - * Plaintext names are always valid, since fscrypt doesn't support - * reverting to ciphertext names without evicting the directory's inode - * -- which implies eviction of the dentries in the directory. - */ - if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME)) - return 1; - - /* - * Ciphertext name; valid if the directory's key is still unavailable. - * - * Although fscrypt forbids rename() on ciphertext names, we still must - * use dget_parent() here rather than use ->d_parent directly. That's - * because a corrupted fs image may contain directory hard links, which - * the VFS handles by moving the directory's dentry tree in the dcache - * each time ->lookup() finds the directory and it already has a dentry - * elsewhere. Thus ->d_parent can be changing, and we must safely grab - * a reference to some ->d_parent to prevent it from being freed. - */ - - if (flags & LOOKUP_RCU) - return -ECHILD; - - dir = dget_parent(dentry); - err = fscrypt_get_encryption_info(d_inode(dir)); - valid = !fscrypt_has_encryption_key(d_inode(dir)); - dput(dir); - - if (err < 0) - return err; - - return valid; -} - -const struct dentry_operations fscrypt_d_ops = { - .d_revalidate = fscrypt_d_revalidate, -}; - /** * fscrypt_initialize() - allocate major buffers for fs encryption. * @cop_flags: fscrypt operations flags diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 3aafddaab703..63bfe5e8accd 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -11,10 +11,88 @@ * This has not yet undergone a rigorous security audit. */ +#include #include +#include +#include #include #include "fscrypt_private.h" +/** + * struct fscrypt_nokey_name - identifier for directory entry when key is absent + * + * When userspace lists an encrypted directory without access to the key, the + * filesystem must present a unique "no-key name" for each filename that allows + * it to find the directory entry again if requested. Naively, that would just + * mean using the ciphertext filenames. However, since the ciphertext filenames + * can contain illegal characters ('\0' and '/'), they must be encoded in some + * way. We use base64. But that can cause names to exceed NAME_MAX (255 + * bytes), so we also need to use a strong hash to abbreviate long names. + * + * The filesystem may also need another kind of hash, the "dirhash", to quickly + * find the directory entry. Since filesystems normally compute the dirhash + * over the on-disk filename (i.e. the ciphertext), it's not computable from + * no-key names that abbreviate the ciphertext using the strong hash to fit in + * NAME_MAX. It's also not computable if it's a keyed hash taken over the + * plaintext (but it may still be available in the on-disk directory entry); + * casefolded directories use this type of dirhash. At least in these cases, + * each no-key name must include the name's dirhash too. + * + * To meet all these requirements, we base64-encode the following + * variable-length structure. It contains the dirhash, or 0's if the filesystem + * didn't provide one; up to 149 bytes of the ciphertext name; and for + * ciphertexts longer than 149 bytes, also the SHA-256 of the remaining bytes. + * + * This ensures that each no-key name contains everything needed to find the + * directory entry again, contains only legal characters, doesn't exceed + * NAME_MAX, is unambiguous unless there's a SHA-256 collision, and that we only + * take the performance hit of SHA-256 on very long filenames (which are rare). + */ +struct fscrypt_nokey_name { + u32 dirhash[2]; + u8 bytes[149]; + u8 sha256[SHA256_DIGEST_SIZE]; +}; /* 189 bytes => 252 bytes base64-encoded, which is <= NAME_MAX (255) */ + +/* + * Decoded size of max-size nokey name, i.e. a name that was abbreviated using + * the strong hash and thus includes the 'sha256' field. This isn't simply + * sizeof(struct fscrypt_nokey_name), as the padding at the end isn't included. + */ +#define FSCRYPT_NOKEY_NAME_MAX offsetofend(struct fscrypt_nokey_name, sha256) + +static struct crypto_shash *sha256_hash_tfm; + +static int fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result) +{ + struct crypto_shash *tfm = READ_ONCE(sha256_hash_tfm); + + if (unlikely(!tfm)) { + struct crypto_shash *prev_tfm; + + tfm = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(tfm)) { + fscrypt_err(NULL, + "Error allocating SHA-256 transform: %ld", + PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + prev_tfm = cmpxchg(&sha256_hash_tfm, NULL, tfm); + if (prev_tfm) { + crypto_free_shash(tfm); + tfm = prev_tfm; + } + } + { + SHASH_DESC_ON_STACK(desc, tfm); + + desc->tfm = tfm; + desc->flags = 0; + + return crypto_shash_digest(desc, data, data_len, result); + } +} + static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) { if (str->len == 1 && str->name[0] == '.') @@ -27,19 +105,19 @@ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) } /** - * fname_encrypt() - encrypt a filename + * fscrypt_fname_encrypt() - encrypt a filename * * The output buffer must be at least as large as the input buffer. * Any extra space is filled with NUL padding before encryption. * * Return: 0 on success, -errno on failure */ -int fname_encrypt(struct inode *inode, const struct qstr *iname, - u8 *out, unsigned int olen) +int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, + u8 *out, unsigned int olen) { struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); - struct fscrypt_info *ci = inode->i_crypt_info; + const struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_key.tfm; union fscrypt_iv iv; struct scatterlist sg; @@ -85,14 +163,14 @@ int fname_encrypt(struct inode *inode, const struct qstr *iname, * * Return: 0 on success, -errno on failure */ -static int fname_decrypt(struct inode *inode, - const struct fscrypt_str *iname, - struct fscrypt_str *oname) +static int fname_decrypt(const struct inode *inode, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) { struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; - struct fscrypt_info *ci = inode->i_crypt_info; + const struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_key.tfm; union fscrypt_iv iv; int res; @@ -206,9 +284,7 @@ int fscrypt_fname_alloc_buffer(const struct inode *inode, u32 max_encrypted_len, struct fscrypt_str *crypto_str) { - const u32 max_encoded_len = - max_t(u32, BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE), - 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name))); + const u32 max_encoded_len = BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX); u32 max_presented_len; max_presented_len = max(max_encoded_len, max_encrypted_len); @@ -241,19 +317,21 @@ EXPORT_SYMBOL(fscrypt_fname_free_buffer); * * The caller must have allocated sufficient memory for the @oname string. * - * If the key is available, we'll decrypt the disk name; otherwise, we'll encode - * it for presentation. Short names are directly base64-encoded, while long - * names are encoded in fscrypt_digested_name format. + * If the key is available, we'll decrypt the disk name. Otherwise, we'll + * encode it for presentation in fscrypt_nokey_name format. + * See struct fscrypt_nokey_name for details. * * Return: 0 on success, -errno on failure */ -int fscrypt_fname_disk_to_usr(struct inode *inode, - u32 hash, u32 minor_hash, - const struct fscrypt_str *iname, - struct fscrypt_str *oname) +int fscrypt_fname_disk_to_usr(const struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) { const struct qstr qname = FSTR_TO_QSTR(iname); - struct fscrypt_digested_name digested_name; + struct fscrypt_nokey_name nokey_name; + u32 size; /* size of the unencoded no-key name */ + int err; if (fscrypt_is_dot_dotdot(&qname)) { oname->name[0] = '.'; @@ -268,24 +346,37 @@ int fscrypt_fname_disk_to_usr(struct inode *inode, if (fscrypt_has_encryption_key(inode)) return fname_decrypt(inode, iname, oname); - if (iname->len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) { - oname->len = base64_encode(iname->name, iname->len, - oname->name); - return 0; - } + /* + * Sanity check that struct fscrypt_nokey_name doesn't have padding + * between fields and that its encoded size never exceeds NAME_MAX. + */ + BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, dirhash) != + offsetof(struct fscrypt_nokey_name, bytes)); + BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, bytes) != + offsetof(struct fscrypt_nokey_name, sha256)); + BUILD_BUG_ON(BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX) > NAME_MAX); + if (hash) { - digested_name.hash = hash; - digested_name.minor_hash = minor_hash; + nokey_name.dirhash[0] = hash; + nokey_name.dirhash[1] = minor_hash; } else { - digested_name.hash = 0; - digested_name.minor_hash = 0; + nokey_name.dirhash[0] = 0; + nokey_name.dirhash[1] = 0; } - memcpy(digested_name.digest, - FSCRYPT_FNAME_DIGEST(iname->name, iname->len), - FSCRYPT_FNAME_DIGEST_SIZE); - oname->name[0] = '_'; - oname->len = 1 + base64_encode((const u8 *)&digested_name, - sizeof(digested_name), oname->name + 1); + if (iname->len <= sizeof(nokey_name.bytes)) { + memcpy(nokey_name.bytes, iname->name, iname->len); + size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]); + } else { + memcpy(nokey_name.bytes, iname->name, sizeof(nokey_name.bytes)); + /* Compute strong hash of remaining part of name. */ + err = fscrypt_do_sha256(&iname->name[sizeof(nokey_name.bytes)], + iname->len - sizeof(nokey_name.bytes), + nokey_name.sha256); + if (err) + return err; + size = FSCRYPT_NOKEY_NAME_MAX; + } + oname->len = base64_encode((const u8 *)&nokey_name, size, oname->name); return 0; } EXPORT_SYMBOL(fscrypt_fname_disk_to_usr); @@ -306,8 +397,7 @@ EXPORT_SYMBOL(fscrypt_fname_disk_to_usr); * get the disk_name. * * Else, for keyless @lookup operations, @iname is the presented ciphertext, so - * we decode it to get either the ciphertext disk_name (for short names) or the - * fscrypt_digested_name (for long names). Non-@lookup operations will be + * we decode it to get the fscrypt_nokey_name. Non-@lookup operations will be * impossible in this case, so we fail them with ENOKEY. * * If successful, fscrypt_free_filename() must be called later to clean up. @@ -317,8 +407,8 @@ EXPORT_SYMBOL(fscrypt_fname_disk_to_usr); int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, int lookup, struct fscrypt_name *fname) { + struct fscrypt_nokey_name *nokey_name; int ret; - int digested; memset(fname, 0, sizeof(struct fscrypt_name)); fname->usr_fname = iname; @@ -342,8 +432,8 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, if (!fname->crypto_buf.name) return -ENOMEM; - ret = fname_encrypt(dir, iname, fname->crypto_buf.name, - fname->crypto_buf.len); + ret = fscrypt_fname_encrypt(dir, iname, fname->crypto_buf.name, + fname->crypto_buf.len); if (ret) goto errout; fname->disk_name.name = fname->crypto_buf.name; @@ -358,40 +448,31 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, * We don't have the key and we are doing a lookup; decode the * user-supplied name */ - if (iname->name[0] == '_') { - if (iname->len != - 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name))) - return -ENOENT; - digested = 1; - } else { - if (iname->len > - BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)) - return -ENOENT; - digested = 0; - } - fname->crypto_buf.name = - kmalloc(max_t(size_t, FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE, - sizeof(struct fscrypt_digested_name)), - GFP_KERNEL); + if (iname->len > BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX)) + return -ENOENT; + + fname->crypto_buf.name = kmalloc(FSCRYPT_NOKEY_NAME_MAX, GFP_KERNEL); if (fname->crypto_buf.name == NULL) return -ENOMEM; - ret = base64_decode(iname->name + digested, iname->len - digested, - fname->crypto_buf.name); - if (ret < 0) { + ret = base64_decode(iname->name, iname->len, fname->crypto_buf.name); + if (ret < (int)offsetof(struct fscrypt_nokey_name, bytes[1]) || + (ret > offsetof(struct fscrypt_nokey_name, sha256) && + ret != FSCRYPT_NOKEY_NAME_MAX)) { ret = -ENOENT; goto errout; } fname->crypto_buf.len = ret; - if (digested) { - const struct fscrypt_digested_name *n = - (const void *)fname->crypto_buf.name; - fname->hash = n->hash; - fname->minor_hash = n->minor_hash; - } else { - fname->disk_name.name = fname->crypto_buf.name; - fname->disk_name.len = fname->crypto_buf.len; + + nokey_name = (void *)fname->crypto_buf.name; + fname->hash = nokey_name->dirhash[0]; + fname->minor_hash = nokey_name->dirhash[1]; + if (ret != FSCRYPT_NOKEY_NAME_MAX) { + /* The full ciphertext filename is available. */ + fname->disk_name.name = nokey_name->bytes; + fname->disk_name.len = + ret - offsetof(struct fscrypt_nokey_name, bytes); } return 0; @@ -400,3 +481,106 @@ errout: return ret; } EXPORT_SYMBOL(fscrypt_setup_filename); + +/** + * fscrypt_match_name() - test whether the given name matches a directory entry + * @fname: the name being searched for + * @de_name: the name from the directory entry + * @de_name_len: the length of @de_name in bytes + * + * Normally @fname->disk_name will be set, and in that case we simply compare + * that to the name stored in the directory entry. The only exception is that + * if we don't have the key for an encrypted directory and the name we're + * looking for is very long, then we won't have the full disk_name and instead + * we'll need to match against a fscrypt_nokey_name that includes a strong hash. + * + * Return: %true if the name matches, otherwise %false. + */ +bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len) +{ + const struct fscrypt_nokey_name *nokey_name = + (const void *)fname->crypto_buf.name; + u8 sha256[SHA256_DIGEST_SIZE]; + + if (likely(fname->disk_name.name)) { + if (de_name_len != fname->disk_name.len) + return false; + return !memcmp(de_name, fname->disk_name.name, de_name_len); + } + if (de_name_len <= sizeof(nokey_name->bytes)) + return false; + if (memcmp(de_name, nokey_name->bytes, sizeof(nokey_name->bytes))) + return false; + if (fscrypt_do_sha256(&de_name[sizeof(nokey_name->bytes)], + de_name_len - sizeof(nokey_name->bytes), sha256)) + return false; + return !memcmp(sha256, nokey_name->sha256, sizeof(sha256)); +} +EXPORT_SYMBOL_GPL(fscrypt_match_name); + +/** + * fscrypt_fname_siphash() - calculate the SipHash of a filename + * @dir: the parent directory + * @name: the filename to calculate the SipHash of + * + * Given a plaintext filename @name and a directory @dir which uses SipHash as + * its dirhash method and has had its fscrypt key set up, this function + * calculates the SipHash of that name using the directory's secret dirhash key. + * + * Return: the SipHash of @name using the hash key of @dir + */ +u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name) +{ + const struct fscrypt_info *ci = dir->i_crypt_info; + + WARN_ON(!ci->ci_dirhash_key_initialized); + + return siphash(name->name, name->len, &ci->ci_dirhash_key); +} +EXPORT_SYMBOL_GPL(fscrypt_fname_siphash); + +/* + * Validate dentries in encrypted directories to make sure we aren't potentially + * caching stale dentries after a key has been added. + */ +int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) +{ + struct dentry *dir; + int err; + int valid; + + /* + * Plaintext names are always valid, since fscrypt doesn't support + * reverting to ciphertext names without evicting the directory's inode + * -- which implies eviction of the dentries in the directory. + */ + if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME)) + return 1; + + /* + * Ciphertext name; valid if the directory's key is still unavailable. + * + * Although fscrypt forbids rename() on ciphertext names, we still must + * use dget_parent() here rather than use ->d_parent directly. That's + * because a corrupted fs image may contain directory hard links, which + * the VFS handles by moving the directory's dentry tree in the dcache + * each time ->lookup() finds the directory and it already has a dentry + * elsewhere. Thus ->d_parent can be changing, and we must safely grab + * a reference to some ->d_parent to prevent it from being freed. + */ + + if (flags & LOOKUP_RCU) + return -ECHILD; + + dir = dget_parent(dentry); + err = fscrypt_get_encryption_info(d_inode(dir)); + valid = !fscrypt_has_encryption_key(d_inode(dir)); + dput(dir); + + if (err < 0) + return err; + + return valid; +} +EXPORT_SYMBOL(fscrypt_d_revalidate); diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 739d8a9d24f5..ae03c7fc7e52 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -12,6 +12,7 @@ #define _FSCRYPT_PRIVATE_H #include +#include #include #include @@ -77,6 +78,26 @@ static inline int fscrypt_context_size(const union fscrypt_context *ctx) return 0; } +/* Check whether an fscrypt_context has a recognized version number and size */ +static inline bool fscrypt_context_is_valid(const union fscrypt_context *ctx, + int ctx_size) +{ + return ctx_size >= 1 && ctx_size == fscrypt_context_size(ctx); +} + +/* Retrieve the context's nonce, assuming the context was already validated */ +static inline const u8 *fscrypt_context_nonce(const union fscrypt_context *ctx) +{ + switch (ctx->version) { + case FSCRYPT_CONTEXT_V1: + return ctx->v1.nonce; + case FSCRYPT_CONTEXT_V2: + return ctx->v2.nonce; + } + WARN_ON(1); + return NULL; +} + #undef fscrypt_policy union fscrypt_policy { u8 version; @@ -138,12 +159,6 @@ fscrypt_policy_flags(const union fscrypt_policy *policy) BUG(); } -static inline bool -fscrypt_is_direct_key_policy(const union fscrypt_policy *policy) -{ - return fscrypt_policy_flags(policy) & FSCRYPT_POLICY_FLAG_DIRECT_KEY; -} - /** * For encrypted symlinks, the ciphertext length is stored at the beginning * of the string in little-endian format. @@ -217,6 +232,14 @@ struct fscrypt_info { */ struct fscrypt_direct_key *ci_direct_key; + /* + * This inode's hash key for filenames. This is a 128-bit SipHash-2-4 + * key. This is only set for directories that use a keyed dirhash over + * the plaintext filenames -- currently just casefolded directories. + */ + siphash_key_t ci_dirhash_key; + bool ci_dirhash_key_initialized; + /* The encryption policy used by this inode */ union fscrypt_policy ci_policy; @@ -230,24 +253,6 @@ typedef enum { FS_ENCRYPT, } fscrypt_direction_t; -static inline bool fscrypt_valid_enc_modes(u32 contents_mode, - u32 filenames_mode) -{ - if (contents_mode == FSCRYPT_MODE_AES_128_CBC && - filenames_mode == FSCRYPT_MODE_AES_128_CTS) - return true; - - if (contents_mode == FSCRYPT_MODE_AES_256_XTS && - filenames_mode == FSCRYPT_MODE_AES_256_CTS) - return true; - - if (contents_mode == FSCRYPT_MODE_ADIANTUM && - filenames_mode == FSCRYPT_MODE_ADIANTUM) - return true; - - return false; -} - /* crypto.c */ extern struct kmem_cache *fscrypt_info_cachep; extern int fscrypt_initialize(unsigned int cop_flags); @@ -257,7 +262,6 @@ extern int fscrypt_crypt_block(const struct inode *inode, unsigned int len, unsigned int offs, gfp_t gfp_flags); extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags); -extern const struct dentry_operations fscrypt_d_ops; extern void __printf(3, 4) __cold fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...); @@ -285,8 +289,9 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, const struct fscrypt_info *ci); /* fname.c */ -extern int fname_encrypt(struct inode *inode, const struct qstr *iname, - u8 *out, unsigned int olen); +extern int fscrypt_fname_encrypt(const struct inode *inode, + const struct qstr *iname, + u8 *out, unsigned int olen); extern bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, u32 max_len, u32 *encrypted_len_ret); @@ -308,11 +313,12 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, * output doesn't reveal another. */ #define HKDF_CONTEXT_KEY_IDENTIFIER 1 -#define HKDF_CONTEXT_PER_FILE_KEY 2 +#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2 #define HKDF_CONTEXT_DIRECT_KEY 3 #define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 +#define HKDF_CONTEXT_DIRHASH_KEY 5 -extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context, +extern int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, u8 *okm, unsigned int okmlen); @@ -320,7 +326,8 @@ extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); /* inline_crypt.c */ #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT -extern void fscrypt_select_encryption_impl(struct fscrypt_info *ci); +extern int fscrypt_select_encryption_impl(struct fscrypt_info *ci, + bool is_hw_wrapped_key); static inline bool fscrypt_using_inline_encryption(const struct fscrypt_info *ci) @@ -332,6 +339,7 @@ extern int fscrypt_prepare_inline_crypt_key( struct fscrypt_prepared_key *prep_key, const u8 *raw_key, unsigned int raw_key_size, + bool is_hw_wrapped, const struct fscrypt_info *ci); extern void fscrypt_destroy_inline_crypt_key( @@ -363,8 +371,10 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ -static inline void fscrypt_select_encryption_impl(struct fscrypt_info *ci) +static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci, + bool is_hw_wrapped_key) { + return 0; } static inline bool fscrypt_using_inline_encryption( @@ -376,6 +386,7 @@ static inline bool fscrypt_using_inline_encryption( static inline int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, const u8 *raw_key, unsigned int raw_key_size, + bool is_hw_wrapped, const struct fscrypt_info *ci) { WARN_ON(1); @@ -568,20 +579,18 @@ struct fscrypt_mode { extern struct fscrypt_mode fscrypt_modes[]; -static inline bool -fscrypt_mode_supports_direct_key(const struct fscrypt_mode *mode) -{ - return mode->ivsize >= offsetofend(union fscrypt_iv, nonce); -} - extern int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, const u8 *raw_key, unsigned int raw_key_size, + bool is_hw_wrapped, const struct fscrypt_info *ci); extern void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key); -extern int fscrypt_set_derived_key(struct fscrypt_info *ci, - const u8 *derived_key); +extern int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, + const u8 *raw_key); + +extern int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, + const struct fscrypt_master_key *mk); /* keysetup_v1.c */ diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c index 2c026009c6e7..fd7f67628561 100644 --- a/fs/crypto/hkdf.c +++ b/fs/crypto/hkdf.c @@ -113,7 +113,7 @@ out: * adds to its application-specific info strings to guarantee that it doesn't * accidentally repeat an info string when using HKDF for different purposes.) */ -int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context, +int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, u8 *okm, unsigned int okmlen) { diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index 30b1ca661249..a6396bf721ac 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -4,6 +4,8 @@ * Encryption hooks for higher-level filesystem operations. */ +#include + #include "fscrypt_private.h" /** @@ -115,12 +117,53 @@ int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, spin_lock(&dentry->d_lock); dentry->d_flags |= DCACHE_ENCRYPTED_NAME; spin_unlock(&dentry->d_lock); - d_set_d_op(dentry, &fscrypt_d_ops); } return err; } EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup); +/** + * fscrypt_prepare_setflags() - prepare to change flags with FS_IOC_SETFLAGS + * @inode: the inode on which flags are being changed + * @oldflags: the old flags + * @flags: the new flags + * + * The caller should be holding i_rwsem for write. + * + * Return: 0 on success; -errno if the flags change isn't allowed or if + * another error occurs. + */ +int fscrypt_prepare_setflags(struct inode *inode, + unsigned int oldflags, unsigned int flags) +{ + struct fscrypt_info *ci; + struct fscrypt_master_key *mk; + int err; + + /* + * When the CASEFOLD flag is set on an encrypted directory, we must + * derive the secret key needed for the dirhash. This is only possible + * if the directory uses a v2 encryption policy. + */ + if (IS_ENCRYPTED(inode) && (flags & ~oldflags & FS_CASEFOLD_FL)) { + err = fscrypt_require_key(inode); + if (err) + return err; + ci = inode->i_crypt_info; + if (ci->ci_policy.version != FSCRYPT_POLICY_V2) + return -EINVAL; + mk = ci->ci_master_key->payload.data[0]; + down_read(&mk->mk_secret_sem); + if (is_master_key_secret_present(&mk->mk_secret)) + err = fscrypt_derive_dirhash_key(ci, mk); + else + err = -ENOKEY; + up_read(&mk->mk_secret_sem); + return err; + } + return 0; +} + int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, unsigned int max_len, struct fscrypt_str *disk_link) @@ -187,7 +230,8 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, ciphertext_len = disk_link->len - sizeof(*sd); sd->len = cpu_to_le16(ciphertext_len); - err = fname_encrypt(inode, &iname, sd->encrypted_path, ciphertext_len); + err = fscrypt_fname_encrypt(inode, &iname, sd->encrypted_path, + ciphertext_len); if (err) goto err_free_sd; diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index 92c471d3db73..e1bbaeff1c43 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -26,44 +26,94 @@ struct fscrypt_blk_crypto_key { struct request_queue *devs[]; }; +static int fscrypt_get_num_devices(struct super_block *sb) +{ + if (sb->s_cop->get_num_devices) + return sb->s_cop->get_num_devices(sb); + return 1; +} + +static void fscrypt_get_devices(struct super_block *sb, int num_devs, + struct request_queue **devs) +{ + if (num_devs == 1) + devs[0] = bdev_get_queue(sb->s_bdev); + else + sb->s_cop->get_devices(sb, devs); +} + /* Enable inline encryption for this file if supported. */ -void fscrypt_select_encryption_impl(struct fscrypt_info *ci) +int fscrypt_select_encryption_impl(struct fscrypt_info *ci, + bool is_hw_wrapped_key) { const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; + enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + struct request_queue **devs; + int num_devs; + int i; /* The file must need contents encryption, not filenames encryption */ if (!S_ISREG(inode->i_mode)) - return; + return 0; /* blk-crypto must implement the needed encryption algorithm */ - if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) - return; + if (crypto_mode == BLK_ENCRYPTION_MODE_INVALID) + return 0; /* The filesystem must be mounted with -o inlinecrypt */ if (!sb->s_cop->inline_crypt_enabled || !sb->s_cop->inline_crypt_enabled(sb)) - return; + return 0; + + /* + * The needed encryption settings must be supported either by + * blk-crypto-fallback, or by hardware on all the filesystem's devices. + */ + + if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) && + !is_hw_wrapped_key) { + ci->ci_inlinecrypt = true; + return 0; + } + + num_devs = fscrypt_get_num_devices(sb); + devs = kmalloc_array(num_devs, sizeof(*devs), GFP_NOFS); + if (!devs) + return -ENOMEM; + + fscrypt_get_devices(sb, num_devs, devs); + + for (i = 0; i < num_devs; i++) { + if (!keyslot_manager_crypto_mode_supported(devs[i]->ksm, + crypto_mode, + sb->s_blocksize, + is_hw_wrapped_key)) + goto out_free_devs; + } ci->ci_inlinecrypt = true; +out_free_devs: + kfree(devs); + return 0; } int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, const u8 *raw_key, unsigned int raw_key_size, + bool is_hw_wrapped, const struct fscrypt_info *ci) { const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; - int num_devs = 1; + int num_devs; int queue_refs = 0; struct fscrypt_blk_crypto_key *blk_key; int err; int i; - if (sb->s_cop->get_num_devices) - num_devs = sb->s_cop->get_num_devices(sb); + num_devs = fscrypt_get_num_devices(sb); if (WARN_ON(num_devs < 1)) return -EINVAL; @@ -72,16 +122,13 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, return -ENOMEM; blk_key->num_devs = num_devs; - if (num_devs == 1) - blk_key->devs[0] = bdev_get_queue(sb->s_bdev); - else - sb->s_cop->get_devices(sb, blk_key->devs); + fscrypt_get_devices(sb, num_devs, blk_key->devs); BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE); err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size, - crypto_mode, sb->s_blocksize); + is_hw_wrapped, crypto_mode, sb->s_blocksize); if (err) { fscrypt_err(inode, "error %d initializing blk-crypto key", err); goto fail; @@ -103,6 +150,7 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, queue_refs++; err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize, + is_hw_wrapped, blk_key->devs[i]); if (err) { fscrypt_err(inode, diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 40ea4bc1059d..0081fd48e96f 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -569,6 +569,7 @@ out_put: key_ref_put(ref); return err; } + /* Size of software "secret" derived from hardware-wrapped key */ #define RAW_SECRET_SIZE 32 @@ -616,11 +617,7 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; - BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE < - FSCRYPT_MAX_KEY_SIZE); - memset(&secret, 0, sizeof(secret)); - if (arg.key_id) { if (arg.raw_size != 0) return -EINVAL; @@ -629,7 +626,7 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) goto out_wipe_secret; err = -EINVAL; if (!(arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) && - secret.size > FSCRYPT_MAX_KEY_SIZE) + secret.size > FSCRYPT_MAX_KEY_SIZE) goto out_wipe_secret; } else { if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE || @@ -812,9 +809,6 @@ static int check_for_busy_inodes(struct super_block *sb, struct list_head *pos; size_t busy_count = 0; unsigned long ino; - struct dentry *dentry; - char _path[256]; - char *path = NULL; spin_lock(&mk->mk_decrypted_inodes_lock); @@ -833,22 +827,14 @@ static int check_for_busy_inodes(struct super_block *sb, struct fscrypt_info, ci_master_key_link)->ci_inode; ino = inode->i_ino; - dentry = d_find_alias(inode); } spin_unlock(&mk->mk_decrypted_inodes_lock); - if (dentry) { - path = dentry_path(dentry, _path, sizeof(_path)); - dput(dentry); - } - if (IS_ERR_OR_NULL(path)) - path = "(unknown)"; - fscrypt_warn(NULL, - "%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu (%s)", + "%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu", sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec), master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u, - ino, path); + ino); return -EBUSY; } diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index b51fc41395e0..c6ce78afbf8f 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -97,8 +97,11 @@ fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, * first time a mode is used. */ pr_info("fscrypt: %s using implementation \"%s\"\n", - mode->friendly_name, - crypto_skcipher_alg(tfm)->base.cra_driver_name); + mode->friendly_name, crypto_skcipher_driver_name(tfm)); + } + if (WARN_ON(crypto_skcipher_ivsize(tfm) != mode->ivsize)) { + err = -EINVAL; + goto err_free_tfm; } crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize); @@ -119,15 +122,15 @@ err_free_tfm: */ int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, const u8 *raw_key, unsigned int raw_key_size, - const struct fscrypt_info *ci) + bool is_hw_wrapped, const struct fscrypt_info *ci) { struct crypto_skcipher *tfm; if (fscrypt_using_inline_encryption(ci)) return fscrypt_prepare_inline_crypt_key(prep_key, - raw_key, raw_key_size, ci); + raw_key, raw_key_size, is_hw_wrapped, ci); - if (WARN_ON(raw_key_size != ci->ci_mode->keysize)) + if (WARN_ON(is_hw_wrapped || raw_key_size != ci->ci_mode->keysize)) return -EINVAL; tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode); @@ -148,18 +151,18 @@ void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key) fscrypt_destroy_inline_crypt_key(prep_key); } -/* Given the per-file key, set up the file's crypto transform object */ -int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key) +/* Given a per-file encryption key, set up the file's crypto transform object */ +int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key) { ci->ci_owns_key = true; - return fscrypt_prepare_key(&ci->ci_key, derived_key, - ci->ci_mode->keysize, ci); + return fscrypt_prepare_key(&ci->ci_key, raw_key, ci->ci_mode->keysize, + false /*is_hw_wrapped*/, ci); } -static int setup_per_mode_key(struct fscrypt_info *ci, - struct fscrypt_master_key *mk, - struct fscrypt_prepared_key *keys, - u8 hkdf_context, bool include_fs_uuid) +static int setup_per_mode_enc_key(struct fscrypt_info *ci, + struct fscrypt_master_key *mk, + struct fscrypt_prepared_key *keys, + u8 hkdf_context, bool include_fs_uuid) { static DEFINE_MUTEX(mode_key_setup_mutex); const struct inode *inode = ci->ci_inode; @@ -204,7 +207,7 @@ static int setup_per_mode_key(struct fscrypt_info *ci, } } err = fscrypt_prepare_key(prep_key, mk->mk_secret.raw, - mk->mk_secret.size, ci); + mk->mk_secret.size, true, ci); if (err) goto out_unlock; } else { @@ -223,7 +226,7 @@ static int setup_per_mode_key(struct fscrypt_info *ci, if (err) goto out_unlock; err = fscrypt_prepare_key(prep_key, mode_key, mode->keysize, - ci); + false /*is_hw_wrapped*/, ci); memzero_explicit(mode_key, mode->keysize); if (err) goto out_unlock; @@ -236,10 +239,24 @@ out_unlock: return err; } +int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, + const struct fscrypt_master_key *mk) +{ + int err; + + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_DIRHASH_KEY, + ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE, + (u8 *)&ci->ci_dirhash_key, + sizeof(ci->ci_dirhash_key)); + if (err) + return err; + ci->ci_dirhash_key_initialized = true; + return 0; +} + static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, struct fscrypt_master_key *mk) { - u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; int err; if (mk->mk_secret.is_hw_wrapped && @@ -251,21 +268,15 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { /* - * DIRECT_KEY: instead of deriving per-file keys, the per-file - * nonce will be included in all the IVs. But unlike v1 - * policies, for v2 policies in this case we don't encrypt with - * the master key directly but rather derive a per-mode key. - * This ensures that the master key is consistently used only - * for HKDF, avoiding key reuse issues. + * DIRECT_KEY: instead of deriving per-file encryption keys, the + * per-file nonce will be included in all the IVs. But unlike + * v1 policies, for v2 policies in this case we don't encrypt + * with the master key directly but rather derive a per-mode + * encryption key. This ensures that the master key is + * consistently used only for HKDF, avoiding key reuse issues. */ - if (!fscrypt_mode_supports_direct_key(ci->ci_mode)) { - fscrypt_warn(ci->ci_inode, - "Direct key flag not allowed with %s", - ci->ci_mode->friendly_name); - return -EINVAL; - } - return setup_per_mode_key(ci, mk, mk->mk_direct_keys, - HKDF_CONTEXT_DIRECT_KEY, false); + err = setup_per_mode_enc_key(ci, mk, mk->mk_direct_keys, + HKDF_CONTEXT_DIRECT_KEY, false); } else if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { /* @@ -274,21 +285,34 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, * the IVs. This format is optimized for use with inline * encryption hardware compliant with the UFS or eMMC standards. */ - return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_keys, - HKDF_CONTEXT_IV_INO_LBLK_64_KEY, - true); - } + err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_64_keys, + HKDF_CONTEXT_IV_INO_LBLK_64_KEY, + true); + } else { + u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; - err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, - HKDF_CONTEXT_PER_FILE_KEY, - ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE, - derived_key, ci->ci_mode->keysize); + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, + HKDF_CONTEXT_PER_FILE_ENC_KEY, + ci->ci_nonce, + FS_KEY_DERIVATION_NONCE_SIZE, + derived_key, ci->ci_mode->keysize); + if (err) + return err; + + err = fscrypt_set_per_file_enc_key(ci, derived_key); + memzero_explicit(derived_key, ci->ci_mode->keysize); + } if (err) return err; - err = fscrypt_set_derived_key(ci, derived_key); - memzero_explicit(derived_key, ci->ci_mode->keysize); - return err; + /* Derive a secret dirhash key for directories that need it. */ + if (S_ISDIR(ci->ci_inode->i_mode) && IS_CASEFOLDED(ci->ci_inode)) { + err = fscrypt_derive_dirhash_key(ci, mk); + if (err) + return err; + } + + return 0; } /* @@ -309,8 +333,6 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, struct fscrypt_key_specifier mk_spec; int err; - fscrypt_select_encryption_impl(ci); - switch (ci->ci_policy.version) { case FSCRYPT_POLICY_V1: mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; @@ -335,6 +357,10 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, ci->ci_policy.version != FSCRYPT_POLICY_V1) return PTR_ERR(key); + err = fscrypt_select_encryption_impl(ci, false); + if (err) + return err; + /* * As a legacy fallback for v1 policies, search for the key in * the current task's subscribed keyrings too. Don't move this @@ -369,6 +395,10 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, goto out_release_key; } + err = fscrypt_select_encryption_impl(ci, mk->mk_secret.is_hw_wrapped); + if (err) + goto out_release_key; + switch (ci->ci_policy.version) { case FSCRYPT_POLICY_V1: err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw); @@ -475,20 +505,8 @@ int fscrypt_get_encryption_info(struct inode *inode) goto out; } - switch (ctx.version) { - case FSCRYPT_CONTEXT_V1: - memcpy(crypt_info->ci_nonce, ctx.v1.nonce, - FS_KEY_DERIVATION_NONCE_SIZE); - break; - case FSCRYPT_CONTEXT_V2: - memcpy(crypt_info->ci_nonce, ctx.v2.nonce, - FS_KEY_DERIVATION_NONCE_SIZE); - break; - default: - WARN_ON(1); - res = -EINVAL; - goto out; - } + memcpy(crypt_info->ci_nonce, fscrypt_context_nonce(&ctx), + FS_KEY_DERIVATION_NONCE_SIZE); if (!fscrypt_supported_policy(&crypt_info->ci_policy, inode)) { res = -EINVAL; @@ -588,6 +606,15 @@ int fscrypt_drop_inode(struct inode *inode) return 0; mk = ci->ci_master_key->payload.data[0]; + /* + * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes + * protected by the key were cleaned by sync_filesystem(). But if + * userspace is still using the files, inodes can be dirtied between + * then and now. We mustn't lose any writes, so skip dirty inodes here. + */ + if (inode->i_state & I_DIRTY_ALL) + return 0; + /* * Note: since we aren't holding ->mk_secret_sem, the result here can * immediately become outdated. But there's no correctness problem with diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 47591c54dc3d..3f7bb48f7317 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -9,7 +9,7 @@ * This file implements compatibility functions for the original encryption * policy version ("v1"), including: * - * - Deriving per-file keys using the AES-128-ECB based KDF + * - Deriving per-file encryption keys using the AES-128-ECB based KDF * (rather than the new method of using HKDF-SHA512) * * - Retrieving fscrypt master keys from process-subscribed keyrings @@ -234,7 +234,7 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key) refcount_set(&dk->dk_refcount, 1); dk->dk_mode = ci->ci_mode; err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci->ci_mode->keysize, - ci); + false /*is_hw_wrapped*/, ci); if (err) goto err_free_dk; memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor, @@ -252,23 +252,8 @@ err_free_dk: static int setup_v1_file_key_direct(struct fscrypt_info *ci, const u8 *raw_master_key) { - const struct fscrypt_mode *mode = ci->ci_mode; struct fscrypt_direct_key *dk; - if (!fscrypt_mode_supports_direct_key(mode)) { - fscrypt_warn(ci->ci_inode, - "Direct key mode not allowed with %s", - mode->friendly_name); - return -EINVAL; - } - - if (ci->ci_policy.v1.contents_encryption_mode != - ci->ci_policy.v1.filenames_encryption_mode) { - fscrypt_warn(ci->ci_inode, - "Direct key mode not allowed with different contents and filenames modes"); - return -EINVAL; - } - dk = fscrypt_get_direct_key(ci, raw_master_key); if (IS_ERR(dk)) return PTR_ERR(dk); @@ -297,7 +282,7 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci, if (err) goto out; - err = fscrypt_set_derived_key(ci, derived_key); + err = fscrypt_set_per_file_enc_key(ci, derived_key); out: kzfree(derived_key); return err; diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 96f528071bed..10ccf945020c 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -29,6 +29,43 @@ bool fscrypt_policies_equal(const union fscrypt_policy *policy1, return !memcmp(policy1, policy2, fscrypt_policy_size(policy1)); } +static bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode) +{ + if (contents_mode == FSCRYPT_MODE_AES_256_XTS && + filenames_mode == FSCRYPT_MODE_AES_256_CTS) + return true; + + if (contents_mode == FSCRYPT_MODE_AES_128_CBC && + filenames_mode == FSCRYPT_MODE_AES_128_CTS) + return true; + + if (contents_mode == FSCRYPT_MODE_ADIANTUM && + filenames_mode == FSCRYPT_MODE_ADIANTUM) + return true; + + return false; +} + +static bool supported_direct_key_modes(const struct inode *inode, + u32 contents_mode, u32 filenames_mode) +{ + const struct fscrypt_mode *mode; + + if (contents_mode != filenames_mode) { + fscrypt_warn(inode, + "Direct key flag not allowed with different contents and filenames modes"); + return false; + } + mode = &fscrypt_modes[contents_mode]; + + if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) { + fscrypt_warn(inode, "Direct key flag not allowed with %s", + mode->friendly_name); + return false; + } + return true; +} + static bool supported_iv_ino_lblk_64_policy( const struct fscrypt_policy_v2 *policy, const struct inode *inode) @@ -63,13 +100,82 @@ static bool supported_iv_ino_lblk_64_policy( return true; } +static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy, + const struct inode *inode) +{ + if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, + policy->filenames_encryption_mode)) { + fscrypt_warn(inode, + "Unsupported encryption modes (contents %d, filenames %d)", + policy->contents_encryption_mode, + policy->filenames_encryption_mode); + return false; + } + + if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | + FSCRYPT_POLICY_FLAG_DIRECT_KEY)) { + fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", + policy->flags); + return false; + } + + if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && + !supported_direct_key_modes(inode, policy->contents_encryption_mode, + policy->filenames_encryption_mode)) + return false; + + if (IS_CASEFOLDED(inode)) { + /* With v1, there's no way to derive dirhash keys. */ + fscrypt_warn(inode, + "v1 policies can't be used on casefolded directories"); + return false; + } + + return true; +} + +static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, + const struct inode *inode) +{ + if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, + policy->filenames_encryption_mode)) { + fscrypt_warn(inode, + "Unsupported encryption modes (contents %d, filenames %d)", + policy->contents_encryption_mode, + policy->filenames_encryption_mode); + return false; + } + + if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) { + fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", + policy->flags); + return false; + } + + if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && + !supported_direct_key_modes(inode, policy->contents_encryption_mode, + policy->filenames_encryption_mode)) + return false; + + if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) && + !supported_iv_ino_lblk_64_policy(policy, inode)) + return false; + + if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) { + fscrypt_warn(inode, "Reserved bits set in encryption policy"); + return false; + } + + return true; +} + /** * fscrypt_supported_policy - check whether an encryption policy is supported * * Given an encryption policy, check whether all its encryption modes and other - * settings are supported by this kernel. (But we don't currently don't check - * for crypto API support here, so attempting to use an algorithm not configured - * into the crypto API will still fail later.) + * settings are supported by this kernel on the given inode. (But we don't + * currently don't check for crypto API support here, so attempting to use an + * algorithm not configured into the crypto API will still fail later.) * * Return: %true if supported, else %false */ @@ -77,60 +183,10 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, const struct inode *inode) { switch (policy_u->version) { - case FSCRYPT_POLICY_V1: { - const struct fscrypt_policy_v1 *policy = &policy_u->v1; - - if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, - policy->filenames_encryption_mode)) { - fscrypt_warn(inode, - "Unsupported encryption modes (contents %d, filenames %d)", - policy->contents_encryption_mode, - policy->filenames_encryption_mode); - return false; - } - - if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | - FSCRYPT_POLICY_FLAG_DIRECT_KEY)) { - fscrypt_warn(inode, - "Unsupported encryption flags (0x%02x)", - policy->flags); - return false; - } - - return true; - } - case FSCRYPT_POLICY_V2: { - const struct fscrypt_policy_v2 *policy = &policy_u->v2; - - if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, - policy->filenames_encryption_mode)) { - fscrypt_warn(inode, - "Unsupported encryption modes (contents %d, filenames %d)", - policy->contents_encryption_mode, - policy->filenames_encryption_mode); - return false; - } - - if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) { - fscrypt_warn(inode, - "Unsupported encryption flags (0x%02x)", - policy->flags); - return false; - } - - if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) && - !supported_iv_ino_lblk_64_policy(policy, inode)) - return false; - - if (memchr_inv(policy->__reserved, 0, - sizeof(policy->__reserved))) { - fscrypt_warn(inode, - "Reserved bits set in encryption policy"); - return false; - } - - return true; - } + case FSCRYPT_POLICY_V1: + return fscrypt_supported_v1_policy(&policy_u->v1, inode); + case FSCRYPT_POLICY_V2: + return fscrypt_supported_v2_policy(&policy_u->v2, inode); } return false; } @@ -202,7 +258,7 @@ int fscrypt_policy_from_context(union fscrypt_policy *policy_u, { memset(policy_u, 0, sizeof(*policy_u)); - if (ctx_size <= 0 || ctx_size != fscrypt_context_size(ctx_u)) + if (!fscrypt_context_is_valid(ctx_u, ctx_size)) return -EINVAL; switch (ctx_u->version) { @@ -425,6 +481,25 @@ int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *uarg) } EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_policy_ex); +/* FS_IOC_GET_ENCRYPTION_NONCE: retrieve file's encryption nonce for testing */ +int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg) +{ + struct inode *inode = file_inode(filp); + union fscrypt_context ctx; + int ret; + + ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (ret < 0) + return ret; + if (!fscrypt_context_is_valid(&ctx, ret)) + return -EINVAL; + if (copy_to_user(arg, fscrypt_context_nonce(&ctx), + FS_KEY_DERIVATION_NONCE_SIZE)) + return -EFAULT; + return 0; +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_nonce); + /** * fscrypt_has_permitted_context() - is a file's encryption policy permitted * within its directory? diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index ac2a73c00bfa..332ebb130fee 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -37,6 +37,7 @@ config EXT4_FS select CRC16 select CRYPTO select CRYPTO_CRC32C + select FS_ENCRYPTION_ALGS if FS_ENCRYPTION help This is the next generation of the ext3 filesystem. diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index e4d13c6ac931..ee766e3bed8b 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -117,7 +117,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) if (IS_ENCRYPTED(inode)) { err = fscrypt_get_encryption_info(inode); - if (err && err != -ENOKEY) + if (err) return err; } @@ -664,10 +664,3 @@ const struct file_operations ext4_dir_operations = { .open = ext4_dir_open, .release = ext4_release_dir, }; - -#ifdef CONFIG_UNICODE -const struct dentry_operations ext4_dentry_ops = { - .d_hash = generic_ci_d_hash, - .d_compare = generic_ci_d_compare, -}; -#endif diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index e7c7a6737a46..3e5ca2107998 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -1100,6 +1100,11 @@ resizefs_out: return -EOPNOTSUPP; return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); + case FS_IOC_GET_ENCRYPTION_NONCE: + if (!ext4_has_feature_encrypt(sb)) + return -EOPNOTSUPP; + return fscrypt_ioctl_get_nonce(filp, (void __user *)arg); + case EXT4_IOC_FSGETXATTR: { struct fsxattr fa; @@ -1243,6 +1248,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FS_IOC_REMOVE_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: case FS_IOC_GET_ENCRYPTION_KEY_STATUS: + case FS_IOC_GET_ENCRYPTION_NONCE: case EXT4_IOC_SHUTDOWN: case FS_IOC_GETFSMAP: case FS_IOC_ENABLE_VERITY: diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 0e6a7cb9e9cf..b134add5a5ad 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1608,6 +1608,7 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir, struct buffer_head *bh; err = ext4_fname_prepare_lookup(dir, dentry, &fname); + generic_set_encrypted_ci_d_ops(dir, dentry); if (err == -ENOENT) return NULL; if (err) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 8f6ee92e51db..8731f6935136 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4492,11 +4492,6 @@ no_journal: goto failed_mount4; } -#ifdef CONFIG_UNICODE - if (sb->s_encoding) - sb->s_d_op = &ext4_dentry_ops; -#endif - sb->s_root = d_make_root(root); if (!sb->s_root) { ext4_msg(sb, KERN_ERR, "get root dentry failed"); diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig index 675af7cd29d3..1940a6574b66 100644 --- a/fs/f2fs/Kconfig +++ b/fs/f2fs/Kconfig @@ -5,6 +5,7 @@ config F2FS_FS select CRYPTO select CRYPTO_CRC32 select F2FS_FS_XATTR if FS_ENCRYPTION + select FS_ENCRYPTION_ALGS if FS_ENCRYPTION help F2FS is based on Log-structured File System (LFS), which supports versatile "flash-friendly" features. The design has been focused on diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 0898fff69259..7fd0b08d7518 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -108,34 +108,52 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, * Test whether a case-insensitive directory entry matches the filename * being searched for. * + * Only called for encrypted names if the key is available. + * * Returns: 0 if the directory entry matches, more than 0 if it * doesn't match or less than zero on error. */ -int f2fs_ci_compare(const struct inode *parent, const struct qstr *name, - const struct qstr *entry, bool quick) +static int f2fs_ci_compare(const struct inode *parent, const struct qstr *name, + u8 *de_name, size_t de_name_len, bool quick) { const struct super_block *sb = parent->i_sb; const struct unicode_map *um = sb->s_encoding; + struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len); + struct qstr entry = QSTR_INIT(de_name, de_name_len); int ret; - if (quick) - ret = utf8_strncasecmp_folded(um, name, entry); - else - ret = utf8_strncasecmp(um, name, entry); + if (IS_ENCRYPTED(parent)) { + const struct fscrypt_str encrypted_name = + FSTR_INIT(de_name, de_name_len); + decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL); + if (!decrypted_name.name) + return -ENOMEM; + ret = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name, + &decrypted_name); + if (ret < 0) + goto out; + entry.name = decrypted_name.name; + entry.len = decrypted_name.len; + } + + if (quick) + ret = utf8_strncasecmp_folded(um, name, &entry); + else + ret = utf8_strncasecmp(um, name, &entry); if (ret < 0) { /* Handle invalid character sequence as either an error * or as an opaque byte sequence. */ if (sb_has_enc_strict_mode(sb)) - return -EINVAL; - - if (name->len != entry->len) - return 1; - - return !!memcmp(name->name, entry->name, name->len); + ret = -EINVAL; + else if (name->len != entry.len) + ret = 1; + else + ret = !!memcmp(name->name, entry.name, entry.len); } - +out: + kfree(decrypted_name.name); return ret; } @@ -173,24 +191,24 @@ static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d, { #ifdef CONFIG_UNICODE struct inode *parent = d->inode; - struct super_block *sb = parent->i_sb; - struct qstr entry; + u8 *name; + int len; #endif if (de->hash_code != namehash) return false; #ifdef CONFIG_UNICODE - entry.name = d->filename[bit_pos]; - entry.len = de->name_len; + name = d->filename[bit_pos]; + len = le16_to_cpu(de->name_len); - if (sb->s_encoding && IS_CASEFOLDED(parent)) { + if (needs_casefold(parent)) { if (cf_str->name) { struct qstr cf = {.name = cf_str->name, .len = cf_str->len}; - return !f2fs_ci_compare(parent, &cf, &entry, true); + return !f2fs_ci_compare(parent, &cf, name, len, true); } - return !f2fs_ci_compare(parent, fname->usr_fname, &entry, + return !f2fs_ci_compare(parent, fname->usr_fname, name, len, false); } #endif @@ -614,13 +632,13 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, const struct qstr *orig_name, + f2fs_hash_t dentry_hash, struct inode *inode, nid_t ino, umode_t mode) { unsigned int bit_pos; unsigned int level; unsigned int current_depth; unsigned long bidx, block; - f2fs_hash_t dentry_hash; unsigned int nbucket, nblock; struct page *dentry_page = NULL; struct f2fs_dentry_block *dentry_blk = NULL; @@ -630,7 +648,6 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, level = 0; slots = GET_DENTRY_SLOTS(new_name->len); - dentry_hash = f2fs_dentry_hash(dir, new_name, NULL); current_depth = F2FS_I(dir)->i_current_depth; if (F2FS_I(dir)->chash == dentry_hash) { @@ -716,17 +733,19 @@ int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname, struct inode *inode, nid_t ino, umode_t mode) { struct qstr new_name; + f2fs_hash_t dentry_hash; int err = -EAGAIN; new_name.name = fname_name(fname); new_name.len = fname_len(fname); if (f2fs_has_inline_dentry(dir)) - err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname, + err = f2fs_add_inline_entry(dir, &new_name, fname, inode, ino, mode); + dentry_hash = f2fs_dentry_hash(dir, &new_name, fname); if (err == -EAGAIN) err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname, - inode, ino, mode); + dentry_hash, inode, ino, mode); f2fs_update_time(F2FS_I_SB(dir), REQ_TIME); return err; @@ -999,7 +1018,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) if (IS_ENCRYPTED(inode)) { err = fscrypt_get_encryption_info(inode); - if (err && err != -ENOKEY) + if (err) goto out; err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr); @@ -1075,10 +1094,3 @@ const struct file_operations f2fs_dir_operations = { .compat_ioctl = f2fs_compat_ioctl, #endif }; - -#ifdef CONFIG_UNICODE -const struct dentry_operations f2fs_dentry_ops = { - .d_hash = generic_ci_d_hash, - .d_compare = generic_ci_d_compare, -}; -#endif diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 4a365cf7f068..a3529e3e7286 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3137,11 +3137,6 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, bool hot, bool set); struct dentry *f2fs_get_parent(struct dentry *child); -extern int f2fs_ci_compare(const struct inode *parent, - const struct qstr *name, - const struct qstr *entry, - bool quick); - /* * dir.c */ @@ -3175,7 +3170,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, const struct qstr *name, f2fs_hash_t name_hash, unsigned int bit_pos); int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, - const struct qstr *orig_name, + const struct qstr *orig_name, f2fs_hash_t dentry_hash, struct inode *inode, nid_t ino, umode_t mode); int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname, struct inode *inode, nid_t ino, umode_t mode); @@ -3208,7 +3203,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); * hash.c */ f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, - const struct qstr *name_info, struct fscrypt_name *fname); + const struct qstr *name_info, const struct fscrypt_name *fname); /* * node.c @@ -3688,9 +3683,6 @@ static inline void update_sit_info(struct f2fs_sb_info *sbi) {} #endif extern const struct file_operations f2fs_dir_operations; -#ifdef CONFIG_UNICODE -extern const struct dentry_operations f2fs_dentry_ops; -#endif extern const struct file_operations f2fs_file_operations; extern const struct inode_operations f2fs_file_inode_operations; extern const struct address_space_operations f2fs_dblock_aops; @@ -3721,7 +3713,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, struct page *ipage); int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, - const struct qstr *orig_name, + const struct fscrypt_name *fname, struct inode *inode, nid_t ino, umode_t mode); void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, struct inode *dir, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index b0c432c0fbb2..311a36cba330 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -2444,6 +2444,14 @@ static int f2fs_ioc_get_encryption_key_status(struct file *filp, return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); } +static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg) +{ + if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) + return -EOPNOTSUPP; + + return fscrypt_ioctl_get_nonce(filp, (void __user *)arg); +} + static int f2fs_ioc_gc(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); @@ -3411,6 +3419,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_remove_encryption_key_all_users(filp, arg); case FS_IOC_GET_ENCRYPTION_KEY_STATUS: return f2fs_ioc_get_encryption_key_status(filp, arg); + case FS_IOC_GET_ENCRYPTION_NONCE: + return f2fs_ioc_get_encryption_nonce(filp, arg); case F2FS_IOC_GARBAGE_COLLECT: return f2fs_ioc_gc(filp, arg); case F2FS_IOC_GARBAGE_COLLECT_RANGE: @@ -3590,6 +3600,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FS_IOC_REMOVE_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: case FS_IOC_GET_ENCRYPTION_KEY_STATUS: + case FS_IOC_GET_ENCRYPTION_NONCE: case F2FS_IOC_GARBAGE_COLLECT: case F2FS_IOC_GARBAGE_COLLECT_RANGE: case F2FS_IOC_WRITE_CHECKPOINT: diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index 28acb24e7a7a..8f7ee4362312 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -68,8 +68,9 @@ static void str2hashbuf(const unsigned char *msg, size_t len, *buf++ = pad; } -static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info, - struct fscrypt_name *fname) +static f2fs_hash_t __f2fs_dentry_hash(const struct inode *dir, + const struct qstr *name_info, + const struct fscrypt_name *fname) { __u32 hash; f2fs_hash_t f2fs_hash; @@ -79,12 +80,17 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info, size_t len = name_info->len; /* encrypted bigname case */ - if (fname && !fname->disk_name.name) + if (fname && fname->is_ciphertext_name) return cpu_to_le32(fname->hash); if (is_dot_dotdot(name_info)) return 0; + if (IS_CASEFOLDED(dir) && IS_ENCRYPTED(dir)) { + f2fs_hash = cpu_to_le32(fscrypt_fname_siphash(dir, name_info)); + return f2fs_hash; + } + /* Initialize the default seed for the hash checksum functions */ buf[0] = 0x67452301; buf[1] = 0xefcdab89; @@ -106,7 +112,7 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info, } f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, - const struct qstr *name_info, struct fscrypt_name *fname) + const struct qstr *name_info, const struct fscrypt_name *fname) { #ifdef CONFIG_UNICODE struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); @@ -114,27 +120,30 @@ f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, int r, dlen; unsigned char *buff; struct qstr folded; + const struct qstr *name = fname ? fname->usr_fname : name_info; if (!name_info->len || !IS_CASEFOLDED(dir)) goto opaque_seq; + if (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir)) + goto opaque_seq; + buff = f2fs_kzalloc(sbi, sizeof(char) * PATH_MAX, GFP_KERNEL); if (!buff) return -ENOMEM; - - dlen = utf8_casefold(um, name_info, buff, PATH_MAX); + dlen = utf8_casefold(um, name, buff, PATH_MAX); if (dlen < 0) { kvfree(buff); goto opaque_seq; } folded.name = buff; folded.len = dlen; - r = __f2fs_dentry_hash(&folded, fname); + r = __f2fs_dentry_hash(dir, &folded, fname); kvfree(buff); return r; opaque_seq: #endif - return __f2fs_dentry_hash(name_info, fname); + return __f2fs_dentry_hash(dir, name_info, fname); } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index f8c0f6eec6ae..b01e0ac34f8c 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -482,8 +482,8 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry) ino = le32_to_cpu(de->ino); fake_mode = f2fs_get_de_type(de) << S_SHIFT; - err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL, - ino, fake_mode); + err = f2fs_add_regular_entry(dir, &new_name, NULL, + de->hash_code, NULL, ino, fake_mode); if (err) goto punch_dentry_pages; @@ -595,7 +595,7 @@ out: } int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, - const struct qstr *orig_name, + const struct fscrypt_name *fname, struct inode *inode, nid_t ino, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); @@ -606,6 +606,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, struct f2fs_dentry_ptr d; int slots = GET_DENTRY_SLOTS(new_name->len); struct page *page = NULL; + const struct qstr *orig_name = fname->usr_fname; int err = 0; ipage = f2fs_get_node_page(sbi, dir->i_ino); @@ -636,7 +637,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, f2fs_wait_on_page_writeback(ipage, NODE, true, true); - name_hash = f2fs_dentry_hash(dir, new_name, NULL); + name_hash = f2fs_dentry_hash(dir, new_name, fname); f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos); set_page_dirty(ipage); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 23d6cccdb4c1..a8959c64bf3a 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -492,6 +492,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, } err = fscrypt_prepare_lookup(dir, dentry, &fname); + generic_set_encrypted_ci_d_ops(dir, dentry); if (err == -ENOENT) goto out_splice; if (err) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index de737389ba94..fa0a4ae4cf96 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -3331,12 +3331,6 @@ static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) struct unicode_map *encoding; __u16 encoding_flags; - if (f2fs_sb_has_encrypt(sbi)) { - f2fs_err(sbi, - "Can't mount with encoding and encryption"); - return -EINVAL; - } - if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info, &encoding_flags)) { f2fs_err(sbi, @@ -3359,7 +3353,6 @@ static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) sbi->sb->s_encoding = encoding; sbi->sb->s_encoding_flags = encoding_flags; - sbi->sb->s_d_op = &f2fs_dentry_ops; } #else if (f2fs_sb_has_casefold(sbi)) { diff --git a/fs/inode.c b/fs/inode.c index 4e30a37ef712..8c25e0df7a0e 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -2166,7 +2167,7 @@ int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, !capable(CAP_LINUX_IMMUTABLE)) return -EPERM; - return 0; + return fscrypt_prepare_setflags(inode, oldflags, flags); } EXPORT_SYMBOL(vfs_ioc_setflags_prepare); diff --git a/fs/libfs.c b/fs/libfs.c index f66eb521d4f8..4f2ac9ac0c9a 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1281,4 +1281,54 @@ err: return ret; } EXPORT_SYMBOL(generic_ci_d_hash); + +static const struct dentry_operations generic_ci_dentry_ops = { + .d_hash = generic_ci_d_hash, + .d_compare = generic_ci_d_compare, +}; #endif + +#ifdef CONFIG_FS_ENCRYPTION +static const struct dentry_operations generic_encrypted_dentry_ops = { + .d_revalidate = fscrypt_d_revalidate, +}; +#endif + +#if IS_ENABLED(CONFIG_UNICODE) && IS_ENABLED(CONFIG_FS_ENCRYPTION) +static const struct dentry_operations generic_encrypted_ci_dentry_ops = { + .d_hash = generic_ci_d_hash, + .d_compare = generic_ci_d_compare, + .d_revalidate = fscrypt_d_revalidate, +}; +#endif + +/** + * generic_set_encrypted_ci_d_ops - helper for setting d_ops for given dentry + * @dir: parent of dentry whose ops to set + * @dentry: detnry to set ops on + * + * This function sets the dentry ops for the given dentry to handle both + * casefolding and encryption of the dentry name. + */ +void generic_set_encrypted_ci_d_ops(struct inode *dir, struct dentry *dentry) +{ +#ifdef CONFIG_FS_ENCRYPTION + if (dentry->d_flags & DCACHE_ENCRYPTED_NAME) { +#ifdef CONFIG_UNICODE + if (dir->i_sb->s_encoding) { + d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops); + return; + } +#endif + d_set_d_op(dentry, &generic_encrypted_dentry_ops); + return; + } +#endif +#ifdef CONFIG_UNICODE + if (dir->i_sb->s_encoding) { + d_set_d_op(dentry, &generic_ci_dentry_ops); + return; + } +#endif +} +EXPORT_SYMBOL(generic_set_encrypted_ci_d_ops); diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig index dfc6fdf019d7..fe221d7d99d6 100644 --- a/fs/ubifs/Kconfig +++ b/fs/ubifs/Kconfig @@ -7,6 +7,7 @@ config UBIFS_FS select CRYPTO if UBIFS_FS_ZLIB select CRYPTO_LZO if UBIFS_FS_LZO select CRYPTO_DEFLATE if UBIFS_FS_ZLIB + select FS_ENCRYPTION_ALGS if FS_ENCRYPTION depends on MTD_UBI help UBIFS is a file system for flash devices which works on top of UBI. diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 26ac11d0eb4b..7d5c2cf95353 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -208,6 +208,7 @@ static int dbg_check_name(const struct ubifs_info *c, return 0; } +static void ubifs_set_d_ops(struct inode *dir, struct dentry *dentry); static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { @@ -221,6 +222,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino); err = fscrypt_prepare_lookup(dir, dentry, &nm); + ubifs_set_d_ops(dir, dentry); if (err == -ENOENT) return d_splice_alias(NULL, dentry); if (err) @@ -537,7 +539,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx) if (encrypted) { err = fscrypt_get_encryption_info(dir); - if (err && err != -ENOKEY) + if (err) return err; err = fscrypt_fname_alloc_buffer(dir, UBIFS_MAX_NLEN, &fstr); @@ -1684,3 +1686,19 @@ const struct file_operations ubifs_dir_operations = { .compat_ioctl = ubifs_compat_ioctl, #endif }; + +#ifdef CONFIG_FS_ENCRYPTION +static const struct dentry_operations ubifs_encrypted_dentry_ops = { + .d_revalidate = fscrypt_d_revalidate, +}; +#endif + +static void ubifs_set_d_ops(struct inode *dir, struct dentry *dentry) +{ +#ifdef CONFIG_FS_ENCRYPTION + if (dentry->d_flags & DCACHE_ENCRYPTED_NAME) { + d_set_d_op(dentry, &ubifs_encrypted_dentry_ops); + return; + } +#endif +} diff --git a/include/linux/bio-crypt-ctx.h b/include/linux/bio-crypt-ctx.h index 12b46ece9c55..d10c5ad5e07e 100644 --- a/include/linux/bio-crypt-ctx.h +++ b/include/linux/bio-crypt-ctx.h @@ -33,6 +33,8 @@ enum blk_crypto_mode_num { * @data_unit_size_bits: log2 of data_unit_size * @size: size of this key in bytes (determined by @crypto_mode) * @hash: hash of this key, for keyslot manager use only + * @is_hw_wrapped: @raw points to a wrapped key to be used by an inline + * encryption hardware that accepts wrapped keys. * @raw: the raw bytes of this key. Only the first @size bytes are used. * * A blk_crypto_key is immutable once created, and many bios can reference it at @@ -44,6 +46,7 @@ struct blk_crypto_key { unsigned int data_unit_size_bits; unsigned int size; unsigned int hash; + bool is_hw_wrapped; u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; }; diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h index 485cee0b92dd..7dc478a8c3ed 100644 --- a/include/linux/blk-crypto.h +++ b/include/linux/blk-crypto.h @@ -18,9 +18,15 @@ bool blk_crypto_endio(struct bio *bio); int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, unsigned int raw_key_size, + bool is_hw_wrapped, enum blk_crypto_mode_num crypto_mode, unsigned int data_unit_size); +int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size, + bool is_hw_wrapped_key, + struct request_queue *q); + int blk_crypto_evict_key(struct request_queue *q, const struct blk_crypto_key *key); @@ -40,22 +46,10 @@ static inline bool blk_crypto_endio(struct bio *bio) #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK -int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, - unsigned int data_unit_size, - struct request_queue *q); - int blk_crypto_fallback_init(void); #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ -static inline int -blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, - unsigned int data_unit_size, - struct request_queue *q) -{ - return 0; -} - static inline int blk_crypto_fallback_init(void) { return 0; diff --git a/include/linux/fs.h b/include/linux/fs.h index 1d8a53a6211a..ae71a1faca40 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3217,6 +3217,8 @@ static inline bool needs_casefold(const struct inode *dir) return 0; } #endif +extern void generic_set_encrypted_ci_d_ops(struct inode *dir, + struct dentry *dentry); #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 3a2971075432..9f791a4b4ad3 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -77,6 +77,21 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode) return READ_ONCE(inode->i_crypt_info) != NULL; } +/** + * fscrypt_needs_contents_encryption() - check whether an inode needs + * contents encryption + * + * Return: %true iff the inode is an encrypted regular file and the kernel was + * built with fscrypt support. + * + * If you need to know whether the encrypt bit is set even when the kernel was + * built without fscrypt support, you must use IS_ENCRYPTED() directly instead. + */ +static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) +{ + return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); +} + static inline bool fscrypt_dummy_context_enabled(struct inode *inode) { return inode->i_sb->s_cop->dummy_context && @@ -124,11 +139,13 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) } extern void fscrypt_free_bounce_page(struct page *bounce_page); +extern int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags); /* policy.c */ extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); extern int fscrypt_ioctl_get_policy(struct file *, void __user *); extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *); +extern int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); @@ -160,82 +177,14 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname) extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, struct fscrypt_str *); extern void fscrypt_fname_free_buffer(struct fscrypt_str *); -extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, - const struct fscrypt_str *, struct fscrypt_str *); - -#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 - -/* Extracts the second-to-last ciphertext block; see explanation below */ -#define FSCRYPT_FNAME_DIGEST(name, len) \ - ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \ - FS_CRYPTO_BLOCK_SIZE)) - -#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE - -/** - * fscrypt_digested_name - alternate identifier for an on-disk filename - * - * When userspace lists an encrypted directory without access to the key, - * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE - * bytes are shown in this abbreviated form (base64-encoded) rather than as the - * full ciphertext (base64-encoded). This is necessary to allow supporting - * filenames up to NAME_MAX bytes, since base64 encoding expands the length. - * - * To make it possible for filesystems to still find the correct directory entry - * despite not knowing the full on-disk name, we encode any filesystem-specific - * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups, - * followed by the second-to-last ciphertext block of the filename. Due to the - * use of the CBC-CTS encryption mode, the second-to-last ciphertext block - * depends on the full plaintext. (Note that ciphertext stealing causes the - * last two blocks to appear "flipped".) This makes accidental collisions very - * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they - * share the same filesystem-specific hashes. - * - * However, this scheme isn't immune to intentional collisions, which can be - * created by anyone able to create arbitrary plaintext filenames and view them - * without the key. Making the "digest" be a real cryptographic hash like - * SHA-256 over the full ciphertext would prevent this, although it would be - * less efficient and harder to implement, especially since the filesystem would - * need to calculate it for each directory entry examined during a search. - */ -struct fscrypt_digested_name { - u32 hash; - u32 minor_hash; - u8 digest[FSCRYPT_FNAME_DIGEST_SIZE]; -}; - -/** - * fscrypt_match_name() - test whether the given name matches a directory entry - * @fname: the name being searched for - * @de_name: the name from the directory entry - * @de_name_len: the length of @de_name in bytes - * - * Normally @fname->disk_name will be set, and in that case we simply compare - * that to the name stored in the directory entry. The only exception is that - * if we don't have the key for an encrypted directory and a filename in it is - * very long, then we won't have the full disk_name and we'll instead need to - * match against the fscrypt_digested_name. - * - * Return: %true if the name matches, otherwise %false. - */ -static inline bool fscrypt_match_name(const struct fscrypt_name *fname, - const u8 *de_name, u32 de_name_len) -{ - if (unlikely(!fname->disk_name.name)) { - const struct fscrypt_digested_name *n = - (const void *)fname->crypto_buf.name; - if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_')) - return false; - if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) - return false; - return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len), - n->digest, FSCRYPT_FNAME_DIGEST_SIZE); - } - - if (de_name_len != fname->disk_name.len) - return false; - return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); -} +extern int fscrypt_fname_disk_to_usr(const struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname); +extern bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len); +extern u64 fscrypt_fname_siphash(const struct inode *dir, + const struct qstr *name); /* bio.c */ extern void fscrypt_decrypt_bio(struct bio *); @@ -253,6 +202,8 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir, unsigned int flags); extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, struct fscrypt_name *fname); +extern int fscrypt_prepare_setflags(struct inode *inode, + unsigned int oldflags, unsigned int flags); extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, unsigned int max_len, struct fscrypt_str *disk_link); @@ -269,6 +220,11 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode) return false; } +static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) +{ + return false; +} + static inline bool fscrypt_dummy_context_enabled(struct inode *inode) { return false; @@ -348,6 +304,11 @@ static inline int fscrypt_ioctl_get_policy_ex(struct file *filp, return -EOPNOTSUPP; } +static inline int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + static inline int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { @@ -452,7 +413,7 @@ static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) return; } -static inline int fscrypt_fname_disk_to_usr(struct inode *inode, +static inline int fscrypt_fname_disk_to_usr(const struct inode *inode, u32 hash, u32 minor_hash, const struct fscrypt_str *iname, struct fscrypt_str *oname) @@ -469,6 +430,13 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname, return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); } +static inline u64 fscrypt_fname_siphash(const struct inode *dir, + const struct qstr *name) +{ + WARN_ON_ONCE(1); + return 0; +} + /* bio.c */ static inline void fscrypt_decrypt_bio(struct bio *bio) { @@ -511,6 +479,13 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir, return -EOPNOTSUPP; } +static inline int fscrypt_prepare_setflags(struct inode *inode, + unsigned int oldflags, + unsigned int flags) +{ + return 0; +} + static inline int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, unsigned int max_len, @@ -703,8 +678,9 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir, * filenames are presented in encrypted form. Therefore, we'll try to set up * the directory's encryption key, but even without it the lookup can continue. * - * This also installs a custom ->d_revalidate() method which will invalidate the - * dentry if it was created without the key and the key is later added. + * After calling this function, a filesystem should ensure that it's dentry + * operations contain fscrypt_d_revalidate if DCACHE_ENCRYPTED_NAME was set, + * so that the dentry can be invalidated if the key is later added. * * Return: 0 on success; -ENOENT if key is unavailable but the filename isn't a * correctly formed encoded ciphertext name, so a negative dentry should be diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h index 6d32a031218e..f022bd6d2497 100644 --- a/include/linux/keyslot-manager.h +++ b/include/linux/keyslot-manager.h @@ -8,6 +8,15 @@ #include +/* Inline crypto feature bits. Must set at least one. */ +enum { + /* Support for standard software-specified keys */ + BLK_CRYPTO_FEATURE_STANDARD_KEYS = BIT(0), + + /* Support for hardware-wrapped keys */ + BLK_CRYPTO_FEATURE_WRAPPED_KEYS = BIT(1), +}; + #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct keyslot_manager; @@ -43,6 +52,7 @@ struct keyslot_mgmt_ll_ops { struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, const struct keyslot_mgmt_ll_ops *ksm_ops, + unsigned int features, const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data); @@ -55,7 +65,8 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot); bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size); + unsigned int data_unit_size, + bool is_hw_wrapped_key); int keyslot_manager_evict_key(struct keyslot_manager *ksm, const struct blk_crypto_key *key); @@ -68,6 +79,7 @@ void keyslot_manager_destroy(struct keyslot_manager *ksm); struct keyslot_manager *keyslot_manager_create_passthrough( const struct keyslot_mgmt_ll_ops *ksm_ops, + unsigned int features, const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data); diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 1b9cdb7a5c8f..1b580ac60f98 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -8,6 +8,7 @@ #ifndef _UAPI_LINUX_FSCRYPT_H #define _UAPI_LINUX_FSCRYPT_H +#include #include /* Encryption policy flags */ @@ -166,6 +167,7 @@ struct fscrypt_get_key_status_arg { #define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) #define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg) #define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg) +#define FS_IOC_GET_ENCRYPTION_NONCE _IOR('f', 27, __u8[16]) /**********************************************************************/ From e62b481e0fdfb96cc198ae6af0ce8b350bf693ef Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Thu, 12 Mar 2020 13:11:55 -0700 Subject: [PATCH 036/141] Variant ops for UFS crypto and new crypto lib Add QTI implementation for variant ops required for inline encryption with wrapped key support. These include UFS crypto ops and KSM ops. Also add crypto common library to cater to different key programing mechanisms. Change-Id: Ica930a8a806a78d4c2d074639cbed355b895a459 Signed-off-by: Gaurav Kashyap Signed-off-by: Neeraj Soni --- drivers/scsi/ufs/Kconfig | 8 + drivers/scsi/ufs/Makefile | 1 + drivers/scsi/ufs/ufs-qcom.c | 7 + drivers/scsi/ufs/ufshcd-crypto-qti.c | 304 ++++++++++++++++ drivers/scsi/ufs/ufshcd-crypto-qti.h | 50 +++ drivers/scsi/ufs/ufshcd-crypto.c | 8 +- drivers/soc/qcom/Kconfig | 17 + drivers/soc/qcom/Makefile | 2 + drivers/soc/qcom/crypto-qti-common.c | 467 +++++++++++++++++++++++++ drivers/soc/qcom/crypto-qti-ice-regs.h | 163 +++++++++ drivers/soc/qcom/crypto-qti-platform.h | 47 +++ drivers/soc/qcom/crypto-qti-tz.c | 101 ++++++ drivers/soc/qcom/crypto-qti-tz.h | 71 ++++ include/linux/crypto-qti-common.h | 95 +++++ 14 files changed, 1337 insertions(+), 4 deletions(-) create mode 100644 drivers/scsi/ufs/ufshcd-crypto-qti.c create mode 100644 drivers/scsi/ufs/ufshcd-crypto-qti.h create mode 100644 drivers/soc/qcom/crypto-qti-common.c create mode 100644 drivers/soc/qcom/crypto-qti-ice-regs.h create mode 100644 drivers/soc/qcom/crypto-qti-platform.h create mode 100644 drivers/soc/qcom/crypto-qti-tz.c create mode 100644 drivers/soc/qcom/crypto-qti-tz.h create mode 100644 include/linux/crypto-qti-common.h diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index e63ed53620d7..8fa2313508ea 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -131,3 +131,11 @@ config SCSI_UFS_CRYPTO Enabling this makes it possible for the kernel to use the crypto capabilities of the UFS device (if present) to perform crypto operations on data being transferred to/from the device. + +config SCSI_UFS_CRYPTO_QTI + tristate "Vendor specific UFS Crypto Engine Support" + depends on SCSI_UFS_CRYPTO + help + Enable Vendor Crypto Engine Support in UFS + Enabling this allows kernel to use UFS crypto operations defined + and implemented by QTI. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 93a2e1a10335..fe4c092c006b 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o obj-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufs-qcom-debugfs.o ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o +ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO_QTI) += ufshcd-crypto-qti.o diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index c93a6f5048d4..4b76913104f7 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -30,6 +30,7 @@ #include "ufshci.h" #include "ufs-qcom-debugfs.h" #include "ufs_quirks.h" +#include "ufshcd-crypto-qti.h" #define MAX_PROP_SIZE 32 #define VDDP_REF_CLK_MIN_UV 1200000 @@ -2103,6 +2104,12 @@ static int ufs_qcom_init(struct ufs_hba *hba) /* restore the secure configuration */ ufs_qcom_update_sec_cfg(hba, true); + /* + * Set the vendor specific ops needed for ICE. + * Default implementation if the ops are not set. + */ + ufshcd_crypto_qti_set_vops(hba); + err = ufs_qcom_bus_register(host); if (err) goto out_variant_clear; diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.c b/drivers/scsi/ufs/ufshcd-crypto-qti.c new file mode 100644 index 000000000000..f06f2899dcac --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-crypto-qti.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include "ufshcd-crypto-qti.h" + +#define MINIMUM_DUN_SIZE 512 +#define MAXIMUM_DUN_SIZE 65536 + +#define NUM_KEYSLOTS(hba) (hba->crypto_capabilities.config_count + 1) + +static struct ufs_hba_crypto_variant_ops ufshcd_crypto_qti_variant_ops = { + .hba_init_crypto = ufshcd_crypto_qti_init_crypto, + .enable = ufshcd_crypto_qti_enable, + .disable = ufshcd_crypto_qti_disable, + .resume = ufshcd_crypto_qti_resume, + .debug = ufshcd_crypto_qti_debug, +}; + +static uint8_t get_data_unit_size_mask(unsigned int data_unit_size) +{ + if (data_unit_size < MINIMUM_DUN_SIZE || + data_unit_size > MAXIMUM_DUN_SIZE || + !is_power_of_2(data_unit_size)) + return 0; + + return data_unit_size / MINIMUM_DUN_SIZE; +} + +static bool ice_cap_idx_valid(struct ufs_hba *hba, + unsigned int cap_idx) +{ + return cap_idx < hba->crypto_capabilities.num_crypto_cap; +} + +void ufshcd_crypto_qti_enable(struct ufs_hba *hba) +{ + int err = 0; + + if (!ufshcd_hba_is_crypto_supported(hba)) + return; + + err = crypto_qti_enable(hba->crypto_vops->priv); + if (err) { + pr_err("%s: Error enabling crypto, err %d\n", + __func__, err); + ufshcd_crypto_qti_disable(hba); + } + + ufshcd_crypto_enable_spec(hba); + +} + +void ufshcd_crypto_qti_disable(struct ufs_hba *hba) +{ + ufshcd_crypto_disable_spec(hba); + crypto_qti_disable(hba->crypto_vops->priv); +} + + +static int ufshcd_crypto_qti_keyslot_program(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = keyslot_manager_private(ksm); + int err = 0; + u8 data_unit_mask; + int crypto_alg_id; + + crypto_alg_id = ufshcd_crypto_cap_find(hba, key->crypto_mode, + key->data_unit_size); + + if (!ufshcd_is_crypto_enabled(hba) || + !ufshcd_keyslot_valid(hba, slot) || + !ice_cap_idx_valid(hba, crypto_alg_id)) + return -EINVAL; + + data_unit_mask = get_data_unit_size_mask(key->data_unit_size); + + if (!(data_unit_mask & + hba->crypto_cap_array[crypto_alg_id].sdus_mask)) + return -EINVAL; + + pm_runtime_get_sync(hba->dev); + err = ufshcd_hold(hba, false); + if (err) { + pr_err("%s: failed to enable clocks, err %d\n", __func__, err); + return err; + } + + err = crypto_qti_keyslot_program(hba->crypto_vops->priv, key, slot, + data_unit_mask, crypto_alg_id); + if (err) { + pr_err("%s: failed with error %d\n", __func__, err); + ufshcd_release(hba, false); + pm_runtime_put_sync(hba->dev); + return err; + } + + ufshcd_release(hba, false); + pm_runtime_put_sync(hba->dev); + + return 0; +} + +static int ufshcd_crypto_qti_keyslot_evict(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + int err = 0; + struct ufs_hba *hba = keyslot_manager_private(ksm); + + if (!ufshcd_is_crypto_enabled(hba) || + !ufshcd_keyslot_valid(hba, slot)) + return -EINVAL; + + pm_runtime_get_sync(hba->dev); + err = ufshcd_hold(hba, false); + if (err) { + pr_err("%s: failed to enable clocks, err %d\n", __func__, err); + return err; + } + + err = crypto_qti_keyslot_evict(hba->crypto_vops->priv, slot); + if (err) { + pr_err("%s: failed with error %d\n", + __func__, err); + ufshcd_release(hba, false); + pm_runtime_put_sync(hba->dev); + return err; + } + + ufshcd_release(hba, false); + pm_runtime_put_sync(hba->dev); + + return err; +} + +static int ufshcd_crypto_qti_derive_raw_secret(struct keyslot_manager *ksm, + const u8 *wrapped_key, + unsigned int wrapped_key_size, + u8 *secret, + unsigned int secret_size) +{ + return crypto_qti_derive_raw_secret(wrapped_key, wrapped_key_size, + secret, secret_size); +} + +static const struct keyslot_mgmt_ll_ops ufshcd_crypto_qti_ksm_ops = { + .keyslot_program = ufshcd_crypto_qti_keyslot_program, + .keyslot_evict = ufshcd_crypto_qti_keyslot_evict, + .derive_raw_secret = ufshcd_crypto_qti_derive_raw_secret, +}; + +static enum blk_crypto_mode_num ufshcd_blk_crypto_qti_mode_num_for_alg_dusize( + enum ufs_crypto_alg ufs_crypto_alg, + enum ufs_crypto_key_size key_size) +{ + /* + * This is currently the only mode that UFS and blk-crypto both support. + */ + if (ufs_crypto_alg == UFS_CRYPTO_ALG_AES_XTS && + key_size == UFS_CRYPTO_KEY_SIZE_256) + return BLK_ENCRYPTION_MODE_AES_256_XTS; + + return BLK_ENCRYPTION_MODE_INVALID; +} + +static int ufshcd_hba_init_crypto_qti_spec(struct ufs_hba *hba, + const struct keyslot_mgmt_ll_ops *ksm_ops) +{ + int cap_idx = 0; + int err = 0; + unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; + enum blk_crypto_mode_num blk_mode_num; + + /* Default to disabling crypto */ + hba->caps &= ~UFSHCD_CAP_CRYPTO; + + if (!(hba->capabilities & MASK_CRYPTO_SUPPORT)) { + err = -ENODEV; + goto out; + } + + /* + * Crypto Capabilities should never be 0, because the + * config_array_ptr > 04h. So we use a 0 value to indicate that + * crypto init failed, and can't be enabled. + */ + hba->crypto_capabilities.reg_val = + cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP)); + hba->crypto_cfg_register = + (u32)hba->crypto_capabilities.config_array_ptr * 0x100; + hba->crypto_cap_array = + devm_kcalloc(hba->dev, + hba->crypto_capabilities.num_crypto_cap, + sizeof(hba->crypto_cap_array[0]), + GFP_KERNEL); + if (!hba->crypto_cap_array) { + err = -ENOMEM; + goto out; + } + + memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported)); + /* + * Store all the capabilities now so that we don't need to repeatedly + * access the device each time we want to know its capabilities + */ + for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap; + cap_idx++) { + hba->crypto_cap_array[cap_idx].reg_val = + cpu_to_le32(ufshcd_readl(hba, + REG_UFS_CRYPTOCAP + + cap_idx * sizeof(__le32))); + blk_mode_num = ufshcd_blk_crypto_qti_mode_num_for_alg_dusize( + hba->crypto_cap_array[cap_idx].algorithm_id, + hba->crypto_cap_array[cap_idx].key_size); + if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID) + continue; + crypto_modes_supported[blk_mode_num] |= + hba->crypto_cap_array[cap_idx].sdus_mask * 512; + } + + hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), ksm_ops, + BLK_CRYPTO_FEATURE_STANDARD_KEYS | + BLK_CRYPTO_FEATURE_WRAPPED_KEYS, + crypto_modes_supported, hba); + + if (!hba->ksm) { + err = -ENOMEM; + goto out; + } + pr_debug("%s: keyslot manager created\n", __func__); + + return 0; + +out: + /* Indicate that init failed by setting crypto_capabilities to 0 */ + hba->crypto_capabilities.reg_val = 0; + return err; +} + +int ufshcd_crypto_qti_init_crypto(struct ufs_hba *hba, + const struct keyslot_mgmt_ll_ops *ksm_ops) +{ + int err = 0; + struct platform_device *pdev = to_platform_device(hba->dev); + void __iomem *mmio_base; + struct resource *mem_res; + + mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ufs_ice"); + mmio_base = devm_ioremap_resource(hba->dev, mem_res); + if (IS_ERR(mmio_base)) { + pr_err("%s: Unable to get ufs_crypto mmio base\n", __func__); + return PTR_ERR(mmio_base); + } + + err = ufshcd_hba_init_crypto_qti_spec(hba, &ufshcd_crypto_qti_ksm_ops); + if (err) { + pr_err("%s: Error initiating crypto capabilities, err %d\n", + __func__, err); + return err; + } + + err = crypto_qti_init_crypto(hba->dev, + mmio_base, (void **)&hba->crypto_vops->priv); + if (err) { + pr_err("%s: Error initiating crypto, err %d\n", + __func__, err); + } + return err; +} + +int ufshcd_crypto_qti_debug(struct ufs_hba *hba) +{ + return crypto_qti_debug(hba->crypto_vops->priv); +} + +void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba) +{ + return ufshcd_crypto_set_vops(hba, &ufshcd_crypto_qti_variant_ops); +} + +int ufshcd_crypto_qti_resume(struct ufs_hba *hba, + enum ufs_pm_op pm_op) +{ + return crypto_qti_resume(hba->crypto_vops->priv); +} diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.h b/drivers/scsi/ufs/ufshcd-crypto-qti.h new file mode 100644 index 000000000000..1e75ce0a5c92 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-crypto-qti.h @@ -0,0 +1,50 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _UFSHCD_CRYPTO_QTI_H +#define _UFSHCD_CRYPTO_QTI_H + +#include "ufshcd.h" +#include "ufshcd-crypto.h" + +void ufshcd_crypto_qti_enable(struct ufs_hba *hba); + +void ufshcd_crypto_qti_disable(struct ufs_hba *hba); + +int ufshcd_crypto_qti_init_crypto(struct ufs_hba *hba, + const struct keyslot_mgmt_ll_ops *ksm_ops); + +void ufshcd_crypto_qti_setup_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q); + +void ufshcd_crypto_qti_destroy_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q); + +int ufshcd_crypto_qti_prepare_lrbp_crypto(struct ufs_hba *hba, + struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); + +int ufshcd_crypto_qti_complete_lrbp_crypto(struct ufs_hba *hba, + struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); + +int ufshcd_crypto_qti_debug(struct ufs_hba *hba); + +int ufshcd_crypto_qti_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op); + +int ufshcd_crypto_qti_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op); + +#ifdef CONFIG_SCSI_UFS_CRYPTO_QTI +void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba); +#else +static inline void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba) +{} +#endif /* CONFIG_SCSI_UFS_CRYPTO_QTI */ +#endif /* _UFSHCD_CRYPTO_QTI_H */ diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c index 28abedfbf609..4fb86fbf097e 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.c +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -128,8 +128,8 @@ static int ufshcd_program_key(struct ufs_hba *hba, pm_runtime_get_sync(hba->dev); ufshcd_hold(hba, false); - if (hba->vops->program_key) { - err = hba->vops->program_key(hba, cfg, slot); + if (hba->var->vops->program_key) { + err = hba->var->vops->program_key(hba, cfg, slot); goto out; } @@ -154,14 +154,14 @@ static int ufshcd_program_key(struct ufs_hba *hba, wmb(); err = 0; out: - ufshcd_release(hba); + ufshcd_release(hba, false); pm_runtime_put_sync(hba->dev); return err; } static void ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) { - union ufs_crypto_cfg_entry cfg = { 0 }; + union ufs_crypto_cfg_entry cfg = { {0} }; int err; err = ufshcd_program_key(hba, &cfg, slot); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index fa4e8e735872..a1b71feaec1d 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -1050,6 +1050,23 @@ config QCOM_SOC_INFO based on the chip ID and querying the SoC revision. This information is loaded by the bootloader into SMEM during the boot up process. +config QTI_CRYPTO_COMMON + tristate "Enable common crypto functionality used for FBE" + depends on BLK_INLINE_ENCRYPTION + help + Say 'Y' to enable the common crypto implementation to be used by + different storage layers such as UFS and EMMC for file based hardware + encryption. This library implements API to program and evict + keys using Trustzone or Hardware Key Manager. + +config QTI_CRYPTO_TZ + tristate "Enable Trustzone to be used for FBE" + depends on QTI_CRYPTO_COMMON + help + Say 'Y' to enable routing crypto requests to Trustzone while + performing hardware based file encryption. This means keys are + programmed and managed through SCM calls to TZ where ICE driver + will configure keys. endmenu config QCOM_HYP_CORE_CTL diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 1de8bfeefda0..c017c8f3afb4 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -119,3 +119,5 @@ obj-$(CONFIG_CPU_V7) += idle-v7.o obj-$(CONFIG_MSM_BAM_DMUX) += bam_dmux.o obj-$(CONFIG_WCNSS_CORE) += wcnss/ obj-$(CONFIG_RENAME_BLOCK_DEVICE) += rename_block_device.o +obj-$(CONFIG_QTI_CRYPTO_COMMON) += crypto-qti-common.o +obj-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o diff --git a/drivers/soc/qcom/crypto-qti-common.c b/drivers/soc/qcom/crypto-qti-common.c new file mode 100644 index 000000000000..cd2eaef78a10 --- /dev/null +++ b/drivers/soc/qcom/crypto-qti-common.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include "crypto-qti-ice-regs.h" +#include "crypto-qti-platform.h" + +static int ice_check_fuse_setting(struct crypto_vops_qti_entry *ice_entry) +{ + uint32_t regval; + uint32_t major, minor; + + major = (ice_entry->ice_hw_version & ICE_CORE_MAJOR_REV_MASK) >> + ICE_CORE_MAJOR_REV; + minor = (ice_entry->ice_hw_version & ICE_CORE_MINOR_REV_MASK) >> + ICE_CORE_MINOR_REV; + + /* Check fuse setting is not supported on ICE 3.2 onwards */ + if ((major == 0x03) && (minor >= 0x02)) + return 0; + regval = ice_readl(ice_entry, ICE_REGS_FUSE_SETTING); + regval &= (ICE_FUSE_SETTING_MASK | + ICE_FORCE_HW_KEY0_SETTING_MASK | + ICE_FORCE_HW_KEY1_SETTING_MASK); + + if (regval) { + pr_err("%s: error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n", + __func__); + return -EPERM; + } + return 0; +} + +static int ice_check_version(struct crypto_vops_qti_entry *ice_entry) +{ + uint32_t version, major, minor, step; + + version = ice_readl(ice_entry, ICE_REGS_VERSION); + major = (version & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV; + minor = (version & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV; + step = (version & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV; + + if (major < ICE_CORE_CURRENT_MAJOR_VERSION) { + pr_err("%s: Unknown ICE device at %lu, rev %d.%d.%d\n", + __func__, (unsigned long)ice_entry->icemmio_base, + major, minor, step); + return -ENODEV; + } + + ice_entry->ice_hw_version = version; + + return 0; +} + +int crypto_qti_init_crypto(struct device *dev, void __iomem *mmio_base, + void **priv_data) +{ + int err = 0; + struct crypto_vops_qti_entry *ice_entry; + + ice_entry = devm_kzalloc(dev, + sizeof(struct crypto_vops_qti_entry), + GFP_KERNEL); + if (!ice_entry) + return -ENOMEM; + + ice_entry->icemmio_base = mmio_base; + ice_entry->flags = 0; + + err = ice_check_version(ice_entry); + if (err) { + pr_err("%s: check version failed, err %d\n", __func__, err); + return err; + } + + err = ice_check_fuse_setting(ice_entry); + if (err) + return err; + + *priv_data = (void *)ice_entry; + + return err; +} + +static void ice_low_power_and_optimization_enable( + struct crypto_vops_qti_entry *ice_entry) +{ + uint32_t regval; + + regval = ice_readl(ice_entry, ICE_REGS_ADVANCED_CONTROL); + /* Enable low power mode sequence + * [0]-0,[1]-0,[2]-0,[3]-7,[4]-0,[5]-0,[6]-0,[7]-0, + * Enable CONFIG_CLK_GATING, STREAM2_CLK_GATING and STREAM1_CLK_GATING + */ + regval |= 0x7000; + /* Optimization enable sequence + */ + regval |= 0xD807100; + ice_writel(ice_entry, regval, ICE_REGS_ADVANCED_CONTROL); + /* + * Memory barrier - to ensure write completion before next transaction + */ + wmb(); +} + +static int ice_wait_bist_status(struct crypto_vops_qti_entry *ice_entry) +{ + int count; + uint32_t regval; + + for (count = 0; count < QTI_ICE_MAX_BIST_CHECK_COUNT; count++) { + regval = ice_readl(ice_entry, ICE_REGS_BIST_STATUS); + if (!(regval & ICE_BIST_STATUS_MASK)) + break; + udelay(50); + } + + if (regval) { + pr_err("%s: wait bist status failed, reg %d\n", + __func__, regval); + return -ETIMEDOUT; + } + + return 0; +} + +static void ice_enable_intr(struct crypto_vops_qti_entry *ice_entry) +{ + uint32_t regval; + + regval = ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_MASK); + regval &= ~ICE_REGS_NON_SEC_IRQ_MASK; + ice_writel(ice_entry, regval, ICE_REGS_NON_SEC_IRQ_MASK); + /* + * Memory barrier - to ensure write completion before next transaction + */ + wmb(); +} + +static void ice_disable_intr(struct crypto_vops_qti_entry *ice_entry) +{ + uint32_t regval; + + regval = ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_MASK); + regval |= ICE_REGS_NON_SEC_IRQ_MASK; + ice_writel(ice_entry, regval, ICE_REGS_NON_SEC_IRQ_MASK); + /* + * Memory barrier - to ensure write completion before next transaction + */ + wmb(); +} + +int crypto_qti_enable(void *priv_data) +{ + int err = 0; + struct crypto_vops_qti_entry *ice_entry; + + ice_entry = (struct crypto_vops_qti_entry *) priv_data; + if (!ice_entry) { + pr_err("%s: vops ice data is invalid\n", __func__); + return -EINVAL; + } + + ice_low_power_and_optimization_enable(ice_entry); + err = ice_wait_bist_status(ice_entry); + if (err) + return err; + ice_enable_intr(ice_entry); + + return err; +} + +void crypto_qti_disable(void *priv_data) +{ + struct crypto_vops_qti_entry *ice_entry; + + ice_entry = (struct crypto_vops_qti_entry *) priv_data; + if (!ice_entry) { + pr_err("%s: vops ice data is invalid\n", __func__); + return; + } + + crypto_qti_disable_platform(ice_entry); + ice_disable_intr(ice_entry); +} + +int crypto_qti_resume(void *priv_data) +{ + int err = 0; + struct crypto_vops_qti_entry *ice_entry; + + ice_entry = (struct crypto_vops_qti_entry *) priv_data; + if (!ice_entry) { + pr_err("%s: vops ice data is invalid\n", __func__); + return -EINVAL; + } + + err = ice_wait_bist_status(ice_entry); + + return err; +} + +static void ice_dump_test_bus(struct crypto_vops_qti_entry *ice_entry) +{ + uint32_t regval = 0x1; + uint32_t val; + uint8_t bus_selector; + uint8_t stream_selector; + + pr_err("ICE TEST BUS DUMP:\n"); + + for (bus_selector = 0; bus_selector <= 0xF; bus_selector++) { + regval = 0x1; /* enable test bus */ + regval |= bus_selector << 28; + if (bus_selector == 0xD) + continue; + ice_writel(ice_entry, regval, ICE_REGS_TEST_BUS_CONTROL); + /* + * make sure test bus selector is written before reading + * the test bus register + */ + wmb(); + val = ice_readl(ice_entry, ICE_REGS_TEST_BUS_REG); + pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n", + regval, val); + } + + pr_err("ICE TEST BUS DUMP (ICE_STREAM1_DATAPATH_TEST_BUS):\n"); + for (stream_selector = 0; stream_selector <= 0xF; stream_selector++) { + regval = 0xD0000001; /* enable stream test bus */ + regval |= stream_selector << 16; + ice_writel(ice_entry, regval, ICE_REGS_TEST_BUS_CONTROL); + /* + * make sure test bus selector is written before reading + * the test bus register + */ + wmb(); + val = ice_readl(ice_entry, ICE_REGS_TEST_BUS_REG); + pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n", + regval, val); + } +} + + +int crypto_qti_debug(void *priv_data) +{ + struct crypto_vops_qti_entry *ice_entry; + + ice_entry = (struct crypto_vops_qti_entry *) priv_data; + if (!ice_entry) { + pr_err("%s: vops ice data is invalid\n", __func__); + return -EINVAL; + } + + pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_CONTROL), + ice_readl(ice_entry, ICE_REGS_RESET)); + + pr_err("%s: ICE Version: 0x%08x | ICE FUSE: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_VERSION), + ice_readl(ice_entry, ICE_REGS_FUSE_SETTING)); + + pr_err("%s: ICE Param1: 0x%08x | ICE Param2: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_PARAMETERS_1), + ice_readl(ice_entry, ICE_REGS_PARAMETERS_2)); + + pr_err("%s: ICE Param3: 0x%08x | ICE Param4: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_PARAMETERS_3), + ice_readl(ice_entry, ICE_REGS_PARAMETERS_4)); + + pr_err("%s: ICE Param5: 0x%08x | ICE IRQ STTS: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_PARAMETERS_5), + ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_STTS)); + + pr_err("%s: ICE IRQ MASK: 0x%08x | ICE IRQ CLR: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_MASK), + ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_CLR)); + + pr_err("%s: ICE INVALID CCFG ERR STTS: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_INVALID_CCFG_ERR_STTS)); + + pr_err("%s: ICE BIST Sts: 0x%08x | ICE Bypass Sts: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_BIST_STATUS), + ice_readl(ice_entry, ICE_REGS_BYPASS_STATUS)); + + pr_err("%s: ICE ADV CTRL: 0x%08x | ICE ENDIAN SWAP: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_ADVANCED_CONTROL), + ice_readl(ice_entry, ICE_REGS_ENDIAN_SWAP)); + + pr_err("%s: ICE_STM1_ERR_SYND1: 0x%08x | ICE_STM1_ERR_SYND2: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM1_ERROR_SYNDROME1), + ice_readl(ice_entry, ICE_REGS_STREAM1_ERROR_SYNDROME2)); + + pr_err("%s: ICE_STM2_ERR_SYND1: 0x%08x | ICE_STM2_ERR_SYND2: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM2_ERROR_SYNDROME1), + ice_readl(ice_entry, ICE_REGS_STREAM2_ERROR_SYNDROME2)); + + pr_err("%s: ICE_STM1_COUNTER1: 0x%08x | ICE_STM1_COUNTER2: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS1), + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS2)); + + pr_err("%s: ICE_STM1_COUNTER3: 0x%08x | ICE_STM1_COUNTER4: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS3), + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS4)); + + pr_err("%s: ICE_STM2_COUNTER1: 0x%08x | ICE_STM2_COUNTER2: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS1), + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS2)); + + pr_err("%s: ICE_STM2_COUNTER3: 0x%08x | ICE_STM2_COUNTER4: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS3), + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS4)); + + pr_err("%s: ICE_STM1_CTR5_MSB: 0x%08x | ICE_STM1_CTR5_LSB: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS5_MSB), + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS5_LSB)); + + pr_err("%s: ICE_STM1_CTR6_MSB: 0x%08x | ICE_STM1_CTR6_LSB: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS6_MSB), + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS6_LSB)); + + pr_err("%s: ICE_STM1_CTR7_MSB: 0x%08x | ICE_STM1_CTR7_LSB: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS7_MSB), + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS7_LSB)); + + pr_err("%s: ICE_STM1_CTR8_MSB: 0x%08x | ICE_STM1_CTR8_LSB: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS8_MSB), + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS8_LSB)); + + pr_err("%s: ICE_STM1_CTR9_MSB: 0x%08x | ICE_STM1_CTR9_LSB: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS9_MSB), + ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS9_LSB)); + + pr_err("%s: ICE_STM2_CTR5_MSB: 0x%08x | ICE_STM2_CTR5_LSB: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS5_MSB), + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS5_LSB)); + + pr_err("%s: ICE_STM2_CTR6_MSB: 0x%08x | ICE_STM2_CTR6_LSB: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS6_MSB), + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS6_LSB)); + + pr_err("%s: ICE_STM2_CTR7_MSB: 0x%08x | ICE_STM2_CTR7_LSB: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS7_MSB), + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS7_LSB)); + + pr_err("%s: ICE_STM2_CTR8_MSB: 0x%08x | ICE_STM2_CTR8_LSB: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS8_MSB), + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS8_LSB)); + + pr_err("%s: ICE_STM2_CTR9_MSB: 0x%08x | ICE_STM2_CTR9_LSB: 0x%08x\n", + ice_entry->ice_dev_type, + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS9_MSB), + ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS9_LSB)); + + ice_dump_test_bus(ice_entry); + + return 0; +} + +int crypto_qti_keyslot_program(void *priv_data, + const struct blk_crypto_key *key, + unsigned int slot, + u8 data_unit_mask, int capid) +{ + int err = 0; + struct crypto_vops_qti_entry *ice_entry; + + ice_entry = (struct crypto_vops_qti_entry *) priv_data; + if (!ice_entry) { + pr_err("%s: vops ice data is invalid\n", __func__); + return -EINVAL; + } + + err = crypto_qti_program_key(ice_entry, key, slot, + data_unit_mask, capid); + if (err) { + pr_err("%s: program key failed with error %d\n", __func__, err); + err = crypto_qti_invalidate_key(ice_entry, slot); + if (err) { + pr_err("%s: invalidate key failed with error %d\n", + __func__, err); + return err; + } + } + + return err; +} + +int crypto_qti_keyslot_evict(void *priv_data, unsigned int slot) +{ + int err = 0; + struct crypto_vops_qti_entry *ice_entry; + + ice_entry = (struct crypto_vops_qti_entry *) priv_data; + if (!ice_entry) { + pr_err("%s: vops ice data is invalid\n", __func__); + return -EINVAL; + } + + err = crypto_qti_invalidate_key(ice_entry, slot); + if (err) { + pr_err("%s: invalidate key failed with error %d\n", + __func__, err); + return err; + } + + return err; +} + +int crypto_qti_derive_raw_secret(const u8 *wrapped_key, + unsigned int wrapped_key_size, u8 *secret, + unsigned int secret_size) +{ + int err = 0; + + if (wrapped_key_size <= RAW_SECRET_SIZE) { + pr_err("%s: Invalid wrapped_key_size: %u\n", + __func__, wrapped_key_size); + err = -EINVAL; + return err; + } + if (secret_size != RAW_SECRET_SIZE) { + pr_err("%s: Invalid secret size: %u\n", __func__, secret_size); + err = -EINVAL; + return err; + } + + memcpy(secret, wrapped_key, secret_size); + + return err; +} diff --git a/drivers/soc/qcom/crypto-qti-ice-regs.h b/drivers/soc/qcom/crypto-qti-ice-regs.h new file mode 100644 index 000000000000..d9e4cf2ad75f --- /dev/null +++ b/drivers/soc/qcom/crypto-qti-ice-regs.h @@ -0,0 +1,163 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_ +#define _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_ + +#include + +/* Register bits for ICE version */ +#define ICE_CORE_CURRENT_MAJOR_VERSION 0x03 + +#define ICE_CORE_STEP_REV_MASK 0xFFFF +#define ICE_CORE_STEP_REV 0 /* bit 15-0 */ +#define ICE_CORE_MAJOR_REV_MASK 0xFF000000 +#define ICE_CORE_MAJOR_REV 24 /* bit 31-24 */ +#define ICE_CORE_MINOR_REV_MASK 0xFF0000 +#define ICE_CORE_MINOR_REV 16 /* bit 23-16 */ + +#define ICE_BIST_STATUS_MASK (0xF0000000) /* bits 28-31 */ + +#define ICE_FUSE_SETTING_MASK 0x1 +#define ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 +#define ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 + +/* QTI ICE Registers from SWI */ +#define ICE_REGS_CONTROL 0x0000 +#define ICE_REGS_RESET 0x0004 +#define ICE_REGS_VERSION 0x0008 +#define ICE_REGS_FUSE_SETTING 0x0010 +#define ICE_REGS_PARAMETERS_1 0x0014 +#define ICE_REGS_PARAMETERS_2 0x0018 +#define ICE_REGS_PARAMETERS_3 0x001C +#define ICE_REGS_PARAMETERS_4 0x0020 +#define ICE_REGS_PARAMETERS_5 0x0024 + + +/* QTI ICE v3.X only */ +#define ICE_GENERAL_ERR_STTS 0x0040 +#define ICE_INVALID_CCFG_ERR_STTS 0x0030 +#define ICE_GENERAL_ERR_MASK 0x0044 + + +/* QTI ICE v2.X only */ +#define ICE_REGS_NON_SEC_IRQ_STTS 0x0040 +#define ICE_REGS_NON_SEC_IRQ_MASK 0x0044 + + +#define ICE_REGS_NON_SEC_IRQ_CLR 0x0048 +#define ICE_REGS_STREAM1_ERROR_SYNDROME1 0x0050 +#define ICE_REGS_STREAM1_ERROR_SYNDROME2 0x0054 +#define ICE_REGS_STREAM2_ERROR_SYNDROME1 0x0058 +#define ICE_REGS_STREAM2_ERROR_SYNDROME2 0x005C +#define ICE_REGS_STREAM1_BIST_ERROR_VEC 0x0060 +#define ICE_REGS_STREAM2_BIST_ERROR_VEC 0x0064 +#define ICE_REGS_STREAM1_BIST_FINISH_VEC 0x0068 +#define ICE_REGS_STREAM2_BIST_FINISH_VEC 0x006C +#define ICE_REGS_BIST_STATUS 0x0070 +#define ICE_REGS_BYPASS_STATUS 0x0074 +#define ICE_REGS_ADVANCED_CONTROL 0x1000 +#define ICE_REGS_ENDIAN_SWAP 0x1004 +#define ICE_REGS_TEST_BUS_CONTROL 0x1010 +#define ICE_REGS_TEST_BUS_REG 0x1014 +#define ICE_REGS_STREAM1_COUNTERS1 0x1100 +#define ICE_REGS_STREAM1_COUNTERS2 0x1104 +#define ICE_REGS_STREAM1_COUNTERS3 0x1108 +#define ICE_REGS_STREAM1_COUNTERS4 0x110C +#define ICE_REGS_STREAM1_COUNTERS5_MSB 0x1110 +#define ICE_REGS_STREAM1_COUNTERS5_LSB 0x1114 +#define ICE_REGS_STREAM1_COUNTERS6_MSB 0x1118 +#define ICE_REGS_STREAM1_COUNTERS6_LSB 0x111C +#define ICE_REGS_STREAM1_COUNTERS7_MSB 0x1120 +#define ICE_REGS_STREAM1_COUNTERS7_LSB 0x1124 +#define ICE_REGS_STREAM1_COUNTERS8_MSB 0x1128 +#define ICE_REGS_STREAM1_COUNTERS8_LSB 0x112C +#define ICE_REGS_STREAM1_COUNTERS9_MSB 0x1130 +#define ICE_REGS_STREAM1_COUNTERS9_LSB 0x1134 +#define ICE_REGS_STREAM2_COUNTERS1 0x1200 +#define ICE_REGS_STREAM2_COUNTERS2 0x1204 +#define ICE_REGS_STREAM2_COUNTERS3 0x1208 +#define ICE_REGS_STREAM2_COUNTERS4 0x120C +#define ICE_REGS_STREAM2_COUNTERS5_MSB 0x1210 +#define ICE_REGS_STREAM2_COUNTERS5_LSB 0x1214 +#define ICE_REGS_STREAM2_COUNTERS6_MSB 0x1218 +#define ICE_REGS_STREAM2_COUNTERS6_LSB 0x121C +#define ICE_REGS_STREAM2_COUNTERS7_MSB 0x1220 +#define ICE_REGS_STREAM2_COUNTERS7_LSB 0x1224 +#define ICE_REGS_STREAM2_COUNTERS8_MSB 0x1228 +#define ICE_REGS_STREAM2_COUNTERS8_LSB 0x122C +#define ICE_REGS_STREAM2_COUNTERS9_MSB 0x1230 +#define ICE_REGS_STREAM2_COUNTERS9_LSB 0x1234 + +#define ICE_STREAM1_PREMATURE_LBA_CHANGE (1L << 0) +#define ICE_STREAM2_PREMATURE_LBA_CHANGE (1L << 1) +#define ICE_STREAM1_NOT_EXPECTED_LBO (1L << 2) +#define ICE_STREAM2_NOT_EXPECTED_LBO (1L << 3) +#define ICE_STREAM1_NOT_EXPECTED_DUN (1L << 4) +#define ICE_STREAM2_NOT_EXPECTED_DUN (1L << 5) +#define ICE_STREAM1_NOT_EXPECTED_DUS (1L << 6) +#define ICE_STREAM2_NOT_EXPECTED_DUS (1L << 7) +#define ICE_STREAM1_NOT_EXPECTED_DBO (1L << 8) +#define ICE_STREAM2_NOT_EXPECTED_DBO (1L << 9) +#define ICE_STREAM1_NOT_EXPECTED_ENC_SEL (1L << 10) +#define ICE_STREAM2_NOT_EXPECTED_ENC_SEL (1L << 11) +#define ICE_STREAM1_NOT_EXPECTED_CONF_IDX (1L << 12) +#define ICE_STREAM2_NOT_EXPECTED_CONF_IDX (1L << 13) +#define ICE_STREAM1_NOT_EXPECTED_NEW_TRNS (1L << 14) +#define ICE_STREAM2_NOT_EXPECTED_NEW_TRNS (1L << 15) + +#define ICE_NON_SEC_IRQ_MASK \ + (ICE_STREAM1_PREMATURE_LBA_CHANGE |\ + ICE_STREAM2_PREMATURE_LBA_CHANGE |\ + ICE_STREAM1_NOT_EXPECTED_LBO |\ + ICE_STREAM2_NOT_EXPECTED_LBO |\ + ICE_STREAM1_NOT_EXPECTED_DUN |\ + ICE_STREAM2_NOT_EXPECTED_DUN |\ + ICE_STREAM2_NOT_EXPECTED_DUS |\ + ICE_STREAM1_NOT_EXPECTED_DBO |\ + ICE_STREAM2_NOT_EXPECTED_DBO |\ + ICE_STREAM1_NOT_EXPECTED_ENC_SEL |\ + ICE_STREAM2_NOT_EXPECTED_ENC_SEL |\ + ICE_STREAM1_NOT_EXPECTED_CONF_IDX |\ + ICE_STREAM1_NOT_EXPECTED_NEW_TRNS |\ + ICE_STREAM2_NOT_EXPECTED_NEW_TRNS) + +/* QTI ICE registers from secure side */ +#define ICE_TEST_BUS_REG_SECURE_INTR (1L << 28) +#define ICE_TEST_BUS_REG_NON_SECURE_INTR (1L << 2) + +#define ICE_LUT_KEYS_CRYPTOCFG_R_16 0x4040 +#define ICE_LUT_KEYS_CRYPTOCFG_R_17 0x4044 +#define ICE_LUT_KEYS_CRYPTOCFG_OFFSET 0x80 + + +#define ICE_LUT_KEYS_ICE_SEC_IRQ_STTS 0x6200 +#define ICE_LUT_KEYS_ICE_SEC_IRQ_MASK 0x6204 +#define ICE_LUT_KEYS_ICE_SEC_IRQ_CLR 0x6208 + +#define ICE_STREAM1_PARTIALLY_SET_KEY_USED (1L << 0) +#define ICE_STREAM2_PARTIALLY_SET_KEY_USED (1L << 1) +#define ICE_QTIC_DBG_OPEN_EVENT (1L << 30) +#define ICE_KEYS_RAM_RESET_COMPLETED (1L << 31) + +#define ICE_SEC_IRQ_MASK \ + (ICE_STREAM1_PARTIALLY_SET_KEY_USED |\ + ICE_STREAM2_PARTIALLY_SET_KEY_USED |\ + ICE_QTIC_DBG_OPEN_EVENT | \ + ICE_KEYS_RAM_RESET_COMPLETED) + +#define ice_writel(ice_entry, val, reg) \ + writel_relaxed((val), (ice_entry)->icemmio_base + (reg)) +#define ice_readl(ice_entry, reg) \ + readl_relaxed((ice_entry)->icemmio_base + (reg)) + +#endif /* _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_ */ diff --git a/drivers/soc/qcom/crypto-qti-platform.h b/drivers/soc/qcom/crypto-qti-platform.h new file mode 100644 index 000000000000..a37e34895ee7 --- /dev/null +++ b/drivers/soc/qcom/crypto-qti-platform.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CRYPTO_QTI_PLATFORM_H +#define _CRYPTO_QTI_PLATFORM_H + +#include +#include +#include +#include + +#if IS_ENABLED(CONFIG_QTI_CRYPTO_TZ) +int crypto_qti_program_key(struct crypto_vops_qti_entry *ice_entry, + const struct blk_crypto_key *key, unsigned int slot, + unsigned int data_unit_mask, int capid); +int crypto_qti_invalidate_key(struct crypto_vops_qti_entry *ice_entry, + unsigned int slot); +#else +static inline int crypto_qti_program_key( + struct crypto_vops_qti_entry *ice_entry, + const struct blk_crypto_key *key, + unsigned int slot, unsigned int data_unit_mask, + int capid) +{ + return 0; +} +static inline int crypto_qti_invalidate_key( + struct crypto_vops_qti_entry *ice_entry, unsigned int slot) +{ + return 0; +} +#endif /* CONFIG_QTI_CRYPTO_TZ */ + +static inline void crypto_qti_disable_platform( + struct crypto_vops_qti_entry *ice_entry) +{} + +#endif /* _CRYPTO_QTI_PLATFORM_H */ diff --git a/drivers/soc/qcom/crypto-qti-tz.c b/drivers/soc/qcom/crypto-qti-tz.c new file mode 100644 index 000000000000..154a08389274 --- /dev/null +++ b/drivers/soc/qcom/crypto-qti-tz.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include "crypto-qti-platform.h" +#include "crypto-qti-tz.h" + +unsigned int storage_type = SDCC_CE; + +#define ICE_BUFFER_SIZE 128 + +static uint8_t ice_buffer[ICE_BUFFER_SIZE]; + +int crypto_qti_program_key(struct crypto_vops_qti_entry *ice_entry, + const struct blk_crypto_key *key, + unsigned int slot, unsigned int data_unit_mask, + int capid) +{ + int err = 0; + uint32_t smc_id = 0; + char *tzbuf = NULL; + struct scm_desc desc = {0}; + + tzbuf = ice_buffer; + + memcpy(tzbuf, key->raw, key->size); + dmac_flush_range(tzbuf, tzbuf + key->size); + + smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID; + desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID; + desc.args[0] = slot; + desc.args[1] = virt_to_phys(tzbuf); + desc.args[2] = ICE_BUFFER_SIZE; + desc.args[3] = ICE_CIPHER_MODE_XTS_256; + desc.args[4] = data_unit_mask; + + + err = scm_call2_noretry(smc_id, &desc); + if (err) + pr_err("%s:SCM call Error: 0x%x slot %d\n", + __func__, err, slot); + + return err; +} + +int crypto_qti_invalidate_key( + struct crypto_vops_qti_entry *ice_entry, unsigned int slot) +{ + int err = 0; + uint32_t smc_id = 0; + struct scm_desc desc = {0}; + + smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID; + + desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID; + desc.args[0] = slot; + + err = scm_call2_noretry(smc_id, &desc); + if (err) + pr_err("%s:SCM call Error: 0x%x\n", __func__, err); + return err; +} + +static int crypto_qti_storage_type(unsigned int *s_type) +{ + char boot[20] = {'\0'}; + char *match = (char *)strnstr(saved_command_line, + "androidboot.bootdevice=", + strlen(saved_command_line)); + if (match) { + memcpy(boot, (match + strlen("androidboot.bootdevice=")), + sizeof(boot) - 1); + if (strnstr(boot, "ufs", strlen(boot))) + *s_type = UFS_CE; + + return 0; + } + return -EINVAL; +} + +static int __init crypto_qti_init(void) +{ + return crypto_qti_storage_type(&storage_type); +} + +module_init(crypto_qti_init); diff --git a/drivers/soc/qcom/crypto-qti-tz.h b/drivers/soc/qcom/crypto-qti-tz.h new file mode 100644 index 000000000000..bcb946096072 --- /dev/null +++ b/drivers/soc/qcom/crypto-qti-tz.h @@ -0,0 +1,71 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#ifndef _CRYPTO_QTI_TZ_H +#define _CRYPTO_QTI_TZ_H + +#define TZ_ES_INVALIDATE_ICE_KEY 0x3 +#define TZ_ES_CONFIG_SET_ICE_KEY 0x4 +#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE 0x5 +#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE 0x6 + +#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_ID \ + TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \ + TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE) + +#define TZ_ES_CONFIG_SET_ICE_KEY_ID \ + TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \ + TZ_ES_CONFIG_SET_ICE_KEY) + +#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_ID \ + TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \ + TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE) + +#define TZ_ES_INVALIDATE_ICE_KEY_ID \ + TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \ + TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY) + +#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \ + TZ_SYSCALL_CREATE_PARAM_ID_1( \ + TZ_SYSCALL_PARAM_TYPE_VAL) + +#define TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID \ + TZ_SYSCALL_CREATE_PARAM_ID_5( \ + TZ_SYSCALL_PARAM_TYPE_VAL, \ + TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \ + TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL) + +#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_PARAM_ID \ + TZ_SYSCALL_CREATE_PARAM_ID_2( \ + TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL) + +#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_PARAM_ID \ + TZ_SYSCALL_CREATE_PARAM_ID_6( \ + TZ_SYSCALL_PARAM_TYPE_VAL, \ + TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \ + TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \ + TZ_SYSCALL_PARAM_TYPE_VAL) + +enum { + ICE_CIPHER_MODE_XTS_128 = 0, + ICE_CIPHER_MODE_CBC_128 = 1, + ICE_CIPHER_MODE_XTS_256 = 3, + ICE_CIPHER_MODE_CBC_256 = 4 +}; + +#define UFS_CE 10 +#define SDCC_CE 20 +#define UFS_CARD_CE 30 + +#endif /* _CRYPTO_QTI_TZ_H */ diff --git a/include/linux/crypto-qti-common.h b/include/linux/crypto-qti-common.h new file mode 100644 index 000000000000..dd4122f4d9a8 --- /dev/null +++ b/include/linux/crypto-qti-common.h @@ -0,0 +1,95 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CRYPTO_QTI_COMMON_H +#define _CRYPTO_QTI_COMMON_H + +#include +#include +#include +#include +#include + +#define RAW_SECRET_SIZE 32 +#define QTI_ICE_MAX_BIST_CHECK_COUNT 100 +#define QTI_ICE_TYPE_NAME_LEN 8 + +struct crypto_vops_qti_entry { + void __iomem *icemmio_base; + uint32_t ice_hw_version; + uint8_t ice_dev_type[QTI_ICE_TYPE_NAME_LEN]; + uint32_t flags; +}; + +#if IS_ENABLED(CONFIG_QTI_CRYPTO_COMMON) +// crypto-qti-common.c +int crypto_qti_init_crypto(struct device *dev, void __iomem *mmio_base, + void **priv_data); +int crypto_qti_enable(void *priv_data); +void crypto_qti_disable(void *priv_data); +int crypto_qti_resume(void *priv_data); +int crypto_qti_debug(void *priv_data); +int crypto_qti_keyslot_program(void *priv_data, + const struct blk_crypto_key *key, + unsigned int slot, u8 data_unit_mask, + int capid); +int crypto_qti_keyslot_evict(void *priv_data, unsigned int slot); +int crypto_qti_derive_raw_secret(const u8 *wrapped_key, + unsigned int wrapped_key_size, u8 *secret, + unsigned int secret_size); + +#else +static inline int crypto_qti_init_crypto(struct device *dev, + void __iomem *mmio_base, + void **priv_data) +{ + return 0; +} +static inline int crypto_qti_enable(void *priv_data) +{ + return 0; +} +static inline void crypto_qti_disable(void *priv_data) +{ + return 0; +} +static inline int crypto_qti_resume(void *priv_data) +{ + return 0; +} +static inline int crypto_qti_debug(void *priv_data) +{ + return 0; +} +static inline int crypto_qti_keyslot_program(void *priv_data, + const struct blk_crypto_key *key, + unsigned int slot, + u8 data_unit_mask, + int capid) +{ + return 0; +} +static inline int crypto_qti_keyslot_evict(void *priv_data, unsigned int slot) +{ + return 0; +} +static inline int crypto_qti_derive_raw_secret(const u8 *wrapped_key, + unsigned int wrapped_key_size, + u8 *secret, + unsigned int secret_size) +{ + return 0; +} + +#endif /* CONFIG_QTI_CRYPTO_COMMON */ + +#endif /* _CRYPTO_QTI_COMMON_H */ From ffc41530ad3b923d41a64cd890060607de770df0 Mon Sep 17 00:00:00 2001 From: Pradeep P V K Date: Tue, 4 Aug 2020 12:05:28 +0530 Subject: [PATCH 037/141] mmc: host: Use request queue pointer for mmc crypto To use block crypto based inline encryption mechanism storage device driver should create a keyslot manager and register it with device request queue. To achieve this pass request queue pointer during host controller initialization where request queue can be updated with keyslot manager. Change-Id: I71f0005a1ad8867b6210e92878b8c112d436688e Signed-off-by: Pradeep P V K --- drivers/mmc/core/queue.c | 3 +++ drivers/mmc/host/cmdq_hci.c | 7 +++++++ include/linux/mmc/host.h | 7 +++++++ 3 files changed, 17 insertions(+) diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index ecc794323729..ba338d2a1c00 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -437,6 +437,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, /* hook for pm qos cmdq init */ if (card->host->cmdq_ops->init) card->host->cmdq_ops->init(card->host); + if (host->cmdq_ops->cqe_crypto_update_queue) + host->cmdq_ops->cqe_crypto_update_queue(host, + mq->queue); mq->thread = kthread_run(mmc_cmdq_thread, mq, "mmc-cmdqd/%d%s", host->index, diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c index 87c1cb7abf39..f1e4ba86f5d4 100644 --- a/drivers/mmc/host/cmdq_hci.c +++ b/drivers/mmc/host/cmdq_hci.c @@ -1277,6 +1277,12 @@ static int cmdq_late_init(struct mmc_host *mmc) return 0; } +static void cqhci_crypto_update_queue(struct mmc_host *mmc, + struct request_queue *queue) +{ + //struct cqhci_host *cq_host = mmc->cqe_private; +} + static const struct mmc_cmdq_host_ops cmdq_host_ops = { .init = cmdq_late_init, .enable = cmdq_enable, @@ -1286,6 +1292,7 @@ static const struct mmc_cmdq_host_ops cmdq_host_ops = { .halt = cmdq_halt, .reset = cmdq_reset, .dumpstate = cmdq_dumpstate, + .cqe_crypto_update_queue = cqhci_crypto_update_queue, }; struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 3c2b261b9c79..ab2c6af4dca1 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -122,6 +122,13 @@ struct mmc_cmdq_host_ops { int (*halt)(struct mmc_host *host, bool halt); void (*reset)(struct mmc_host *host, bool soft); void (*dumpstate)(struct mmc_host *host); + /* + * Update the request queue with keyslot manager details. This keyslot + * manager will be used by block crypto to configure the crypto Engine + * for data encryption. + */ + void (*cqe_crypto_update_queue)(struct mmc_host *host, + struct request_queue *queue); }; struct mmc_host_ops { From 98c06766bef41b5c080338d65124d87438b7676e Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Tue, 21 Jul 2020 15:29:02 +0530 Subject: [PATCH 038/141] mmc: cqhci: eMMC JEDEC v5.2 crypto spec addition Add crypto capability registers and structs defined in v5.2 of JEDEC eMMC specification in prepration to add support for inline encryption to eMMC controllers. Change-Id: I4cd9d73a291033b885a66a209d097c768fbff921 Signed-off-by: Neeraj Soni --- drivers/mmc/host/cmdq_hci.h | 60 +++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h index 0b7c38710c34..56932b7f42f0 100644 --- a/drivers/mmc/host/cmdq_hci.h +++ b/drivers/mmc/host/cmdq_hci.h @@ -12,12 +12,16 @@ #ifndef LINUX_MMC_CQ_HCI_H #define LINUX_MMC_CQ_HCI_H #include +#include /* registers */ /* version */ #define CQVER 0x00 /* capabilities */ #define CQCAP 0x04 +#define CQ_CAP_CS (1 << 28) +#define CQ_CCAP 0x100 +#define CQ_CRYPTOCAP 0x104 /* configuration */ #define CQCFG 0x08 #define CQ_DCMD 0x00001000 @@ -151,6 +155,62 @@ #define CQ_VENDOR_CFG 0x100 #define CMDQ_SEND_STATUS_TRIGGER (1 << 31) +/* CCAP - Crypto Capability 100h */ +union cmdq_crypto_capabilities { + __le32 reg_val; + struct { + u8 num_crypto_cap; + u8 config_count; + u8 reserved; + u8 config_array_ptr; + }; +}; + +enum cmdq_crypto_key_size { + CMDQ_CRYPTO_KEY_SIZE_INVALID = 0x0, + CMDQ_CRYPTO_KEY_SIZE_128 = 0x1, + CMDQ_CRYPTO_KEY_SIZE_192 = 0x2, + CMDQ_CRYPTO_KEY_SIZE_256 = 0x3, + CMDQ_CRYPTO_KEY_SIZE_512 = 0x4, +}; + +enum cmdq_crypto_alg { + CMDQ_CRYPTO_ALG_AES_XTS = 0x0, + CMDQ_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1, + CMDQ_CRYPTO_ALG_AES_ECB = 0x2, + CMDQ_CRYPTO_ALG_ESSIV_AES_CBC = 0x3, +}; + +/* x-CRYPTOCAP - Crypto Capability X */ +union cmdq_crypto_cap_entry { + __le32 reg_val; + struct { + u8 algorithm_id; + u8 sdus_mask; /* Supported data unit size mask */ + u8 key_size; + u8 reserved; + }; +}; + +#define CMDQ_CRYPTO_CONFIGURATION_ENABLE (1 << 7) +#define CMDQ_CRYPTO_KEY_MAX_SIZE 64 + +/* x-CRYPTOCFG - Crypto Configuration X */ +union cmdq_crypto_cfg_entry { + __le32 reg_val[32]; + struct { + u8 crypto_key[CMDQ_CRYPTO_KEY_MAX_SIZE]; + u8 data_unit_size; + u8 crypto_cap_idx; + u8 reserved_1; + u8 config_enable; + u8 reserved_multi_host; + u8 reserved_2; + u8 vsb[2]; + u8 reserved_3[56]; + }; +}; + struct task_history { u64 task; bool is_dcmd; From e718389e5ebf677f3cac233b9243ad408165c221 Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Thu, 6 Feb 2020 16:48:04 +0530 Subject: [PATCH 039/141] mmc: cqhci: Add eMMC crypto APIs Add functions to use eMMC inline encryption hardware capability inline with JEDEC eMMC v5.2 specification and to work with block keyslot manager. Also add crypto variant vops to handle quirks in individual inline encryption hardware. The vops fallback to default implementation which is JEDEC eMMC v5.2 compliant. Change-Id: I72b85d572d7c76b966e34b80e7e8eca83a2bb35f Signed-off-by: Neeraj Soni --- drivers/mmc/host/Kconfig | 9 + drivers/mmc/host/Makefile | 2 +- drivers/mmc/host/cmdq_hci-crypto.c | 536 +++++++++++++++++++++++++++++ drivers/mmc/host/cmdq_hci-crypto.h | 188 ++++++++++ drivers/mmc/host/cmdq_hci.h | 28 ++ 5 files changed, 762 insertions(+), 1 deletion(-) create mode 100644 drivers/mmc/host/cmdq_hci-crypto.c create mode 100644 drivers/mmc/host/cmdq_hci-crypto.h diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 979b909704df..1919fbaffc8d 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -913,3 +913,12 @@ config MMC_SDHCI_XENON This selects Marvell Xenon eMMC/SD/SDIO SDHCI. If you have a controller with this interface, say Y or M here. If unsure, say N. + +config MMC_CQ_HCI_CRYPTO + bool "CQHCI Crypto Engine Support" + depends on MMC_CQ_HCI && BLK_INLINE_ENCRYPTION + help + Enable Crypto Engine Support in CQHCI. + Enabling this makes it possible for the kernel to use the crypto + capabilities of the CQHCI device (if present) to perform crypto + operations on data being transferred to/from the device. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 6389e8125299..7ab3a706bd38 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -85,13 +85,13 @@ obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o -obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o obj-$(CONFIG_MMC_CQ_HCI) += cmdq_hci.o obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o +obj-$(CONFIG_MMC_CQ_HCI_CRYPTO) += cmdq_hci-crypto.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/cmdq_hci-crypto.c b/drivers/mmc/host/cmdq_hci-crypto.c new file mode 100644 index 000000000000..26f84001f064 --- /dev/null +++ b/drivers/mmc/host/cmdq_hci-crypto.c @@ -0,0 +1,536 @@ +/* + * Copyright 2020 Google LLC + * + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * drivers/mmc/host/cmdq-crypto.c - Qualcomm Technologies, Inc. + * + * Original source is taken from: + * https://android.googlesource.com/kernel/common/+/4bac1109a10c55d49c0aa4f7ebdc4bc53cc368e8 + * The driver caters to crypto engine support for UFS controllers. + * The crypto engine programming sequence, HW functionality and register + * offset is almost same in UFS and eMMC controllers. + */ + +#include +#include "cmdq_hci-crypto.h" +#include "../core/queue.h" + +static bool cmdq_cap_idx_valid(struct cmdq_host *host, unsigned int cap_idx) +{ + return cap_idx < host->crypto_capabilities.num_crypto_cap; +} + +static u8 get_data_unit_size_mask(unsigned int data_unit_size) +{ + if (data_unit_size < 512 || data_unit_size > 65536 || + !is_power_of_2(data_unit_size)) + return 0; + + return data_unit_size / 512; +} + +static size_t get_keysize_bytes(enum cmdq_crypto_key_size size) +{ + switch (size) { + case CMDQ_CRYPTO_KEY_SIZE_128: + return 16; + case CMDQ_CRYPTO_KEY_SIZE_192: + return 24; + case CMDQ_CRYPTO_KEY_SIZE_256: + return 32; + case CMDQ_CRYPTO_KEY_SIZE_512: + return 64; + default: + return 0; + } +} + +int cmdq_crypto_cap_find(void *host_p, enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size) +{ + struct cmdq_host *host = host_p; + enum cmdq_crypto_alg cmdq_alg; + u8 data_unit_mask; + int cap_idx; + enum cmdq_crypto_key_size cmdq_key_size; + union cmdq_crypto_cap_entry *ccap_array = host->crypto_cap_array; + + if (!cmdq_host_is_crypto_supported(host)) + return -EINVAL; + + switch (crypto_mode) { + case BLK_ENCRYPTION_MODE_AES_256_XTS: + cmdq_alg = CMDQ_CRYPTO_ALG_AES_XTS; + cmdq_key_size = CMDQ_CRYPTO_KEY_SIZE_256; + break; + default: + return -EINVAL; + } + + data_unit_mask = get_data_unit_size_mask(data_unit_size); + + for (cap_idx = 0; cap_idx < host->crypto_capabilities.num_crypto_cap; + cap_idx++) { + if (ccap_array[cap_idx].algorithm_id == cmdq_alg && + (ccap_array[cap_idx].sdus_mask & data_unit_mask) && + ccap_array[cap_idx].key_size == cmdq_key_size) + return cap_idx; + } + + return -EINVAL; +} +EXPORT_SYMBOL(cmdq_crypto_cap_find); + +/** + * cmdq_crypto_cfg_entry_write_key - Write a key into a crypto_cfg_entry + * + * Writes the key with the appropriate format - for AES_XTS, + * the first half of the key is copied as is, the second half is + * copied with an offset halfway into the cfg->crypto_key array. + * For the other supported crypto algs, the key is just copied. + * + * @cfg: The crypto config to write to + * @key: The key to write + * @cap: The crypto capability (which specifies the crypto alg and key size) + * + * Returns 0 on success, or -EINVAL + */ +static int cmdq_crypto_cfg_entry_write_key(union cmdq_crypto_cfg_entry *cfg, + const u8 *key, + union cmdq_crypto_cap_entry cap) +{ + size_t key_size_bytes = get_keysize_bytes(cap.key_size); + + if (key_size_bytes == 0) + return -EINVAL; + + switch (cap.algorithm_id) { + case CMDQ_CRYPTO_ALG_AES_XTS: + key_size_bytes *= 2; + if (key_size_bytes > CMDQ_CRYPTO_KEY_MAX_SIZE) + return -EINVAL; + + memcpy(cfg->crypto_key, key, key_size_bytes/2); + memcpy(cfg->crypto_key + CMDQ_CRYPTO_KEY_MAX_SIZE/2, + key + key_size_bytes/2, key_size_bytes/2); + return 0; + case CMDQ_CRYPTO_ALG_BITLOCKER_AES_CBC: + /* fall through */ + case CMDQ_CRYPTO_ALG_AES_ECB: + /* fall through */ + case CMDQ_CRYPTO_ALG_ESSIV_AES_CBC: + memcpy(cfg->crypto_key, key, key_size_bytes); + return 0; + } + + return -EINVAL; +} + +static void cmdq_program_key(struct cmdq_host *host, + const union cmdq_crypto_cfg_entry *cfg, + int slot) +{ + int i; + u32 slot_offset = host->crypto_cfg_register + slot * sizeof(*cfg); + + if (host->crypto_vops && host->crypto_vops->program_key) + host->crypto_vops->program_key(host, cfg, slot); + + /* Clear the dword 16 */ + cmdq_writel(host, 0, slot_offset + 16 * sizeof(cfg->reg_val[0])); + /* Ensure that CFGE is cleared before programming the key */ + wmb(); + for (i = 0; i < 16; i++) { + cmdq_writel(host, le32_to_cpu(cfg->reg_val[i]), + slot_offset + i * sizeof(cfg->reg_val[0])); + /* Spec says each dword in key must be written sequentially */ + wmb(); + } + /* Write dword 17 */ + cmdq_writel(host, le32_to_cpu(cfg->reg_val[17]), + slot_offset + 17 * sizeof(cfg->reg_val[0])); + /* Dword 16 must be written last */ + wmb(); + /* Write dword 16 */ + cmdq_writel(host, le32_to_cpu(cfg->reg_val[16]), + slot_offset + 16 * sizeof(cfg->reg_val[0])); + /*Ensure that dword 16 is written */ + wmb(); +} + +static void cmdq_crypto_clear_keyslot(struct cmdq_host *host, int slot) +{ + union cmdq_crypto_cfg_entry cfg = { {0} }; + + cmdq_program_key(host, &cfg, slot); +} + +static void cmdq_crypto_clear_all_keyslots(struct cmdq_host *host) +{ + int slot; + + for (slot = 0; slot < cmdq_num_keyslots(host); slot++) + cmdq_crypto_clear_keyslot(host, slot); +} + +static int cmdq_crypto_keyslot_program(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct cmdq_host *host = keyslot_manager_private(ksm); + int err = 0; + u8 data_unit_mask; + union cmdq_crypto_cfg_entry cfg; + int cap_idx; + + cap_idx = cmdq_crypto_cap_find(host, key->crypto_mode, + key->data_unit_size); + + if (!cmdq_is_crypto_enabled(host) || + !cmdq_keyslot_valid(host, slot) || + !cmdq_cap_idx_valid(host, cap_idx)) + return -EINVAL; + + data_unit_mask = get_data_unit_size_mask(key->data_unit_size); + + if (!(data_unit_mask & host->crypto_cap_array[cap_idx].sdus_mask)) + return -EINVAL; + + memset(&cfg, 0, sizeof(cfg)); + cfg.data_unit_size = data_unit_mask; + cfg.crypto_cap_idx = cap_idx; + cfg.config_enable |= CMDQ_CRYPTO_CONFIGURATION_ENABLE; + + err = cmdq_crypto_cfg_entry_write_key(&cfg, key->raw, + host->crypto_cap_array[cap_idx]); + if (err) + return err; + + cmdq_program_key(host, &cfg, slot); + + memzero_explicit(&cfg, sizeof(cfg)); + + return 0; +} + +static int cmdq_crypto_keyslot_evict(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct cmdq_host *host = keyslot_manager_private(ksm); + + if (!cmdq_is_crypto_enabled(host) || + !cmdq_keyslot_valid(host, slot)) + return -EINVAL; + + /* + * Clear the crypto cfg on the device. Clearing CFGE + * might not be sufficient, so just clear the entire cfg. + */ + cmdq_crypto_clear_keyslot(host, slot); + + return 0; +} + +/* Functions implementing eMMC v5.2 specification behaviour */ +void cmdq_crypto_enable_spec(struct cmdq_host *host) +{ + if (!cmdq_host_is_crypto_supported(host)) + return; + + host->caps |= CMDQ_CAP_CRYPTO_SUPPORT; +} +EXPORT_SYMBOL(cmdq_crypto_enable_spec); + +void cmdq_crypto_disable_spec(struct cmdq_host *host) +{ + host->caps &= ~CMDQ_CAP_CRYPTO_SUPPORT; +} +EXPORT_SYMBOL(cmdq_crypto_disable_spec); + +static const struct keyslot_mgmt_ll_ops cmdq_ksm_ops = { + .keyslot_program = cmdq_crypto_keyslot_program, + .keyslot_evict = cmdq_crypto_keyslot_evict, +}; + +enum blk_crypto_mode_num cmdq_crypto_blk_crypto_mode_num_for_alg_dusize( + enum cmdq_crypto_alg cmdq_crypto_alg, + enum cmdq_crypto_key_size key_size) +{ + /* + * Currently the only mode that eMMC and blk-crypto both support. + */ + if (cmdq_crypto_alg == CMDQ_CRYPTO_ALG_AES_XTS && + key_size == CMDQ_CRYPTO_KEY_SIZE_256) + return BLK_ENCRYPTION_MODE_AES_256_XTS; + + return BLK_ENCRYPTION_MODE_INVALID; +} + +/** + * cmdq_host_init_crypto - Read crypto capabilities, init crypto fields in host + * @host: Per adapter instance + * + * Returns 0 on success. Returns -ENODEV if such capabilities don't exist, and + * -ENOMEM upon OOM. + */ +int cmdq_host_init_crypto_spec(struct cmdq_host *host, + const struct keyslot_mgmt_ll_ops *ksm_ops) +{ + int cap_idx = 0; + int err = 0; + unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; + enum blk_crypto_mode_num blk_mode_num; + + /* Default to disabling crypto */ + host->caps &= ~CMDQ_CAP_CRYPTO_SUPPORT; + + if (!(cmdq_readl(host, CQCAP) & CQ_CAP_CS)) { + pr_err("%s no crypto capability\n", __func__); + err = -ENODEV; + goto out; + } + + /* + * Crypto Capabilities should never be 0, because the + * config_array_ptr > 04h. So we use a 0 value to indicate that + * crypto init failed, and can't be enabled. + */ + host->crypto_capabilities.reg_val = cmdq_readl(host, CQ_CCAP); + host->crypto_cfg_register = + (u32)host->crypto_capabilities.config_array_ptr * 0x100; + host->crypto_cap_array = + devm_kcalloc(mmc_dev(host->mmc), + host->crypto_capabilities.num_crypto_cap, + sizeof(host->crypto_cap_array[0]), GFP_KERNEL); + if (!host->crypto_cap_array) { + err = -ENOMEM; + pr_err("%s no memory cap\n", __func__); + goto out; + } + + memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported)); + + /* + * Store all the capabilities now so that we don't need to repeatedly + * access the device each time we want to know its capabilities + */ + for (cap_idx = 0; cap_idx < host->crypto_capabilities.num_crypto_cap; + cap_idx++) { + host->crypto_cap_array[cap_idx].reg_val = + cpu_to_le32(cmdq_readl(host, + CQ_CRYPTOCAP + + cap_idx * sizeof(__le32))); + blk_mode_num = cmdq_crypto_blk_crypto_mode_num_for_alg_dusize( + host->crypto_cap_array[cap_idx].algorithm_id, + host->crypto_cap_array[cap_idx].key_size); + if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID) + continue; + crypto_modes_supported[blk_mode_num] |= + host->crypto_cap_array[cap_idx].sdus_mask * 512; + } + + cmdq_crypto_clear_all_keyslots(host); + + host->ksm = keyslot_manager_create(cmdq_num_keyslots(host), ksm_ops, + BLK_CRYPTO_FEATURE_STANDARD_KEYS, + crypto_modes_supported, host); + + if (!host->ksm) { + err = -ENOMEM; + goto out_free_caps; + } + /* + * In case host controller supports cryptographic operations + * then, it uses 128bit task descriptor. Upper 64 bits of task + * descriptor would be used to pass crypto specific informaton. + */ + host->caps |= CMDQ_TASK_DESC_SZ_128; + + return 0; +out_free_caps: + devm_kfree(mmc_dev(host->mmc), host->crypto_cap_array); +out: + // TODO: print error? + /* Indicate that init failed by setting crypto_capabilities to 0 */ + host->crypto_capabilities.reg_val = 0; + return err; +} +EXPORT_SYMBOL(cmdq_host_init_crypto_spec); + +void cmdq_crypto_setup_rq_keyslot_manager_spec(struct cmdq_host *host, + struct request_queue *q) +{ + if (!cmdq_host_is_crypto_supported(host) || !q) + return; + + q->ksm = host->ksm; +} +EXPORT_SYMBOL(cmdq_crypto_setup_rq_keyslot_manager_spec); + +void cmdq_crypto_destroy_rq_keyslot_manager_spec(struct cmdq_host *host, + struct request_queue *q) +{ + keyslot_manager_destroy(host->ksm); +} +EXPORT_SYMBOL(cmdq_crypto_destroy_rq_keyslot_manager_spec); + +int cmdq_prepare_crypto_desc_spec(struct cmdq_host *host, + struct mmc_request *mrq, + u64 *ice_ctx) +{ + struct bio_crypt_ctx *bc; + struct request *req = mrq->req; + + if (!req->bio || + !bio_crypt_should_process(req)) { + *ice_ctx = 0; + return 0; + } + if (WARN_ON(!cmdq_is_crypto_enabled(host))) { + /* + * Upper layer asked us to do inline encryption + * but that isn't enabled, so we fail this request. + */ + return -EINVAL; + } + + bc = req->bio->bi_crypt_context; + + if (!cmdq_keyslot_valid(host, bc->bc_keyslot)) + return -EINVAL; + + if (ice_ctx) { + *ice_ctx = DATA_UNIT_NUM(bc->bc_dun[0]) | + CRYPTO_CONFIG_INDEX(bc->bc_keyslot) | + CRYPTO_ENABLE(true); + } + + return 0; +} +EXPORT_SYMBOL(cmdq_prepare_crypto_desc_spec); + +/* Crypto Variant Ops Support */ + +void cmdq_crypto_enable(struct cmdq_host *host) +{ + if (host->crypto_vops && host->crypto_vops->enable) + return host->crypto_vops->enable(host); + + return cmdq_crypto_enable_spec(host); +} + +void cmdq_crypto_disable(struct cmdq_host *host) +{ + if (host->crypto_vops && host->crypto_vops->disable) + return host->crypto_vops->disable(host); + + return cmdq_crypto_disable_spec(host); +} + +int cmdq_host_init_crypto(struct cmdq_host *host) +{ + if (host->crypto_vops && host->crypto_vops->host_init_crypto) + return host->crypto_vops->host_init_crypto(host, + &cmdq_ksm_ops); + + return cmdq_host_init_crypto_spec(host, &cmdq_ksm_ops); +} + +void cmdq_crypto_setup_rq_keyslot_manager(struct cmdq_host *host, + struct request_queue *q) +{ + if (host->crypto_vops && host->crypto_vops->setup_rq_keyslot_manager) + return host->crypto_vops->setup_rq_keyslot_manager(host, q); + + return cmdq_crypto_setup_rq_keyslot_manager_spec(host, q); +} + +void cmdq_crypto_destroy_rq_keyslot_manager(struct cmdq_host *host, + struct request_queue *q) +{ + if (host->crypto_vops && host->crypto_vops->destroy_rq_keyslot_manager) + return host->crypto_vops->destroy_rq_keyslot_manager(host, q); + + return cmdq_crypto_destroy_rq_keyslot_manager_spec(host, q); +} + +int cmdq_crypto_get_ctx(struct cmdq_host *host, + struct mmc_request *mrq, + u64 *ice_ctx) +{ + if (host->crypto_vops && host->crypto_vops->prepare_crypto_desc) + return host->crypto_vops->prepare_crypto_desc(host, mrq, + ice_ctx); + + return cmdq_prepare_crypto_desc_spec(host, mrq, ice_ctx); +} + +int cmdq_complete_crypto_desc(struct cmdq_host *host, + struct mmc_request *mrq, + u64 *ice_ctx) +{ + if (host->crypto_vops && host->crypto_vops->complete_crypto_desc) + return host->crypto_vops->complete_crypto_desc(host, mrq, + ice_ctx); + + return 0; +} + +void cmdq_crypto_debug(struct cmdq_host *host) +{ + if (host->crypto_vops && host->crypto_vops->debug) + host->crypto_vops->debug(host); +} + +void cmdq_crypto_set_vops(struct cmdq_host *host, + struct cmdq_host_crypto_variant_ops *crypto_vops) +{ + if (host) + host->crypto_vops = crypto_vops; +} + +int cmdq_crypto_suspend(struct cmdq_host *host) +{ + if (host->crypto_vops && host->crypto_vops->suspend) + return host->crypto_vops->suspend(host); + + return 0; +} + +int cmdq_crypto_resume(struct cmdq_host *host) +{ + if (host->crypto_vops && host->crypto_vops->resume) + return host->crypto_vops->resume(host); + + return 0; +} + +int cmdq_crypto_reset(struct cmdq_host *host) +{ + if (host->crypto_vops && host->crypto_vops->reset) + return host->crypto_vops->reset(host); + + return 0; +} + +int cmdq_crypto_recovery_finish(struct cmdq_host *host) +{ + if (host->crypto_vops && host->crypto_vops->recovery_finish) + return host->crypto_vops->recovery_finish(host); + + /* Reset/Recovery might clear all keys, so reprogram all the keys. */ + keyslot_manager_reprogram_all_keys(host->ksm); + + return 0; +} diff --git a/drivers/mmc/host/cmdq_hci-crypto.h b/drivers/mmc/host/cmdq_hci-crypto.h new file mode 100644 index 000000000000..8fb44d1eff8b --- /dev/null +++ b/drivers/mmc/host/cmdq_hci-crypto.h @@ -0,0 +1,188 @@ +/* Copyright 2019 Google LLC + * + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CMDQ_CRYPTO_H +#define _CMDQ_CRYPTO_H + +#ifdef CONFIG_MMC_CQ_HCI_CRYPTO +#include +#include "cmdq_hci.h" + +static inline int cmdq_num_keyslots(struct cmdq_host *host) +{ + return host->crypto_capabilities.config_count + 1; +} + +static inline bool cmdq_keyslot_valid(struct cmdq_host *host, + unsigned int slot) +{ + /* + * The actual number of configurations supported is (CFGC+1), so slot + * numbers range from 0 to config_count inclusive. + */ + return slot < cmdq_num_keyslots(host); +} + +static inline bool cmdq_host_is_crypto_supported(struct cmdq_host *host) +{ + return host->crypto_capabilities.reg_val != 0; +} + +static inline bool cmdq_is_crypto_enabled(struct cmdq_host *host) +{ + return host->caps & CMDQ_CAP_CRYPTO_SUPPORT; +} + +/* Functions implementing eMMC v5.2 specification behaviour */ +int cmdq_prepare_crypto_desc_spec(struct cmdq_host *host, + struct mmc_request *mrq, + u64 *ice_ctx); + +void cmdq_crypto_enable_spec(struct cmdq_host *host); + +void cmdq_crypto_disable_spec(struct cmdq_host *host); + +int cmdq_host_init_crypto_spec(struct cmdq_host *host, + const struct keyslot_mgmt_ll_ops *ksm_ops); + +void cmdq_crypto_setup_rq_keyslot_manager_spec(struct cmdq_host *host, + struct request_queue *q); + +void cmdq_crypto_destroy_rq_keyslot_manager_spec(struct cmdq_host *host, + struct request_queue *q); + +void cmdq_crypto_set_vops(struct cmdq_host *host, + struct cmdq_host_crypto_variant_ops *crypto_vops); + +/* Crypto Variant Ops Support */ + +void cmdq_crypto_enable(struct cmdq_host *host); + +void cmdq_crypto_disable(struct cmdq_host *host); + +int cmdq_host_init_crypto(struct cmdq_host *host); + +void cmdq_crypto_setup_rq_keyslot_manager(struct cmdq_host *host, + struct request_queue *q); + +void cmdq_crypto_destroy_rq_keyslot_manager(struct cmdq_host *host, + struct request_queue *q); + +int cmdq_crypto_get_ctx(struct cmdq_host *host, + struct mmc_request *mrq, + u64 *ice_ctx); + +int cmdq_complete_crypto_desc(struct cmdq_host *host, + struct mmc_request *mrq, + u64 *ice_ctx); + +void cmdq_crypto_debug(struct cmdq_host *host); + +int cmdq_crypto_suspend(struct cmdq_host *host); + +int cmdq_crypto_resume(struct cmdq_host *host); + +int cmdq_crypto_reset(struct cmdq_host *host); + +int cmdq_crypto_recovery_finish(struct cmdq_host *host); + +int cmdq_crypto_cap_find(void *host_p, enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size); + +#else /* CONFIG_MMC_CQ_HCI_CRYPTO */ + +static inline bool cmdq_keyslot_valid(struct cmdq_host *host, + unsigned int slot) +{ + return false; +} + +static inline bool cmdq_host_is_crypto_supported(struct cmdq_host *host) +{ + return false; +} + +static inline bool cmdq_is_crypto_enabled(struct cmdq_host *host) +{ + return false; +} + +static inline void cmdq_crypto_enable(struct cmdq_host *host) { } + +static inline int cmdq_crypto_cap_find(void *host_p, + enum blk_crypto_mode_num crypto_mode, + unsigned int data_unit_size) +{ + return 0; +} + +static inline void cmdq_crypto_disable(struct cmdq_host *host) { } + +static inline int cmdq_host_init_crypto(struct cmdq_host *host) +{ + return 0; +} + +static inline void cmdq_crypto_setup_rq_keyslot_manager( + struct cmdq_host *host, + struct request_queue *q) { } + +static inline void +cmdq_crypto_destroy_rq_keyslot_manager(struct cmdq_host *host, + struct request_queue *q) { } + +static inline int cmdq_crypto_get_ctx(struct cmdq_host *host, + struct mmc_request *mrq, + u64 *ice_ctx) +{ + *ice_ctx = 0; + return 0; +} + +static inline int cmdq_complete_crypto_desc(struct cmdq_host *host, + struct mmc_request *mrq, + u64 *ice_ctx) +{ + return 0; +} + +static inline void cmdq_crypto_debug(struct cmdq_host *host) { } + +static inline void cmdq_crypto_set_vops(struct cmdq_host *host, + struct cmdq_host_crypto_variant_ops *crypto_vops) { } + +static inline int cmdq_crypto_suspend(struct cmdq_host *host) +{ + return 0; +} + +static inline int cmdq_crypto_resume(struct cmdq_host *host) +{ + return 0; +} + +static inline int cmdq_crypto_reset(struct cmdq_host *host) +{ + return 0; +} + +static inline int cmdq_crypto_recovery_finish(struct cmdq_host *host) +{ + return 0; +} + +#endif /* CONFIG_MMC_CMDQ_CRYPTO */ +#endif /* _CMDQ_CRYPTO_H */ + + diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h index 56932b7f42f0..65828d8026dc 100644 --- a/drivers/mmc/host/cmdq_hci.h +++ b/drivers/mmc/host/cmdq_hci.h @@ -13,6 +13,7 @@ #define LINUX_MMC_CQ_HCI_H #include #include +#include /* registers */ /* version */ @@ -155,6 +156,8 @@ #define CQ_VENDOR_CFG 0x100 #define CMDQ_SEND_STATUS_TRIGGER (1 << 31) +struct cmdq_host; + /* CCAP - Crypto Capability 100h */ union cmdq_crypto_capabilities { __le32 reg_val; @@ -211,6 +214,31 @@ union cmdq_crypto_cfg_entry { }; }; +struct cmdq_host_crypto_variant_ops { + void (*setup_rq_keyslot_manager)(struct cmdq_host *host, + struct request_queue *q); + void (*destroy_rq_keyslot_manager)(struct cmdq_host *host, + struct request_queue *q); +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + int (*host_init_crypto)(struct cmdq_host *host, + const struct keyslot_mgmt_ll_ops *ksm_ops); +#endif + void (*enable)(struct cmdq_host *host); + void (*disable)(struct cmdq_host *host); + int (*suspend)(struct cmdq_host *host); + int (*resume)(struct cmdq_host *host); + int (*debug)(struct cmdq_host *host); + int (*prepare_crypto_desc)(struct cmdq_host *host, + struct mmc_request *mrq, u64 *ice_ctx); + int (*complete_crypto_desc)(struct cmdq_host *host, + struct mmc_request *mrq, u64 *ice_ctx); + int (*reset)(struct cmdq_host *host); + int (*recovery_finish)(struct cmdq_host *host); + int (*program_key)(struct cmdq_host *host, + const union cmdq_crypto_cfg_entry *cfg, int slot); + void *priv; +}; + struct task_history { u64 task; bool is_dcmd; From a058c82783f224cb476b32a0be6460740d047ddc Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Thu, 23 Jul 2020 11:34:06 +0530 Subject: [PATCH 040/141] mmc: cqhci: Add inline crypto support to cqhci Wire up cqhci.c with the eMMC Crypto API and support for block layer inline encryption additions and the keyslot manager. Change-Id: I0fb63a3d7601a8979386ecf1c65bc5c44369429e Signed-off-by: Neeraj Soni --- drivers/mmc/host/cmdq_hci.c | 68 ++++++++++++++++++++++++++++++++++++- drivers/mmc/host/cmdq_hci.h | 20 +++++++++++ 2 files changed, 87 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c index f1e4ba86f5d4..a7b7597d96a3 100644 --- a/drivers/mmc/host/cmdq_hci.c +++ b/drivers/mmc/host/cmdq_hci.c @@ -29,8 +29,10 @@ #include #include "cmdq_hci.h" +#include "cmdq_hci-crypto.h" #include "sdhci.h" #include "sdhci-msm.h" +#include "../core/queue.h" #define DCMD_SLOT 31 #define NUM_SLOTS 32 @@ -277,6 +279,8 @@ static void cmdq_dumpregs(struct cmdq_host *cq_host) cmdq_readl(cq_host, CQ_VENDOR_CFG + offset)); pr_err(DRV_NAME ": ===========================================\n"); + cmdq_crypto_debug(cq_host); + cmdq_dump_task_history(cq_host); if (cq_host->ops->dump_vendor_regs) cq_host->ops->dump_vendor_regs(mmc); @@ -404,6 +408,11 @@ static int cmdq_enable(struct mmc_host *mmc) cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) | (dcmd_enable ? CQ_DCMD : 0)); + if (cmdq_host_is_crypto_supported(cq_host)) { + cmdq_crypto_enable(cq_host); + cqcfg |= CQ_ICE_ENABLE; + } + cmdq_writel(cq_host, cqcfg, CQCFG); /* enable CQ_HOST */ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE, @@ -473,6 +482,9 @@ static void cmdq_disable_nosync(struct mmc_host *mmc, bool soft) { struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc); + if (cmdq_host_is_crypto_supported(cq_host)) + cmdq_crypto_disable(cq_host); + if (soft) { cmdq_writel(cq_host, cmdq_readl( cq_host, CQCFG) & ~(CQ_ENABLE), @@ -512,6 +524,8 @@ static void cmdq_reset(struct mmc_host *mmc, bool soft) cmdq_disable(mmc, true); + cmdq_crypto_reset(cq_host); + if (cq_host->ops->reset) { ret = cq_host->ops->reset(mmc); if (ret) { @@ -541,6 +555,29 @@ static void cmdq_reset(struct mmc_host *mmc, bool soft) mmc_host_clr_cq_disable(mmc); } +static inline void cmdq_prep_crypto_desc(struct cmdq_host *cq_host, + u64 *task_desc, u64 ice_ctx) +{ + u64 *ice_desc = NULL; + + if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) { + /* + * Get the address of ice context for the given task descriptor. + * ice context is present in the upper 64bits of task descriptor + * ice_conext_base_address = task_desc + 8-bytes + */ + ice_desc = (u64 *)((u8 *)task_desc + + CQ_TASK_DESC_ICE_PARAM_OFFSET); + memset(ice_desc, 0, CQ_TASK_DESC_ICE_PARAMS_SIZE); + + /* + * Assign upper 64bits data of task descritor with ice context + */ + if (ice_ctx) + *ice_desc = ice_ctx; + } +} + static void cmdq_prep_task_desc(struct mmc_request *mrq, u64 *data, bool intr, bool qbr) { @@ -742,6 +779,7 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq) u32 tag = mrq->cmdq_req->tag; struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc); struct sdhci_host *host = mmc_priv(mmc); + u64 ice_ctx = 0; if (!cq_host->enabled) { pr_err("%s: CMDQ host not enabled yet !!!\n", @@ -760,12 +798,22 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq) goto ring_doorbell; } + err = cmdq_crypto_get_ctx(cq_host, mrq, &ice_ctx); + if (err) { + mmc->err_stats[MMC_ERR_ICE_CFG]++; + pr_err("%s: failed to retrieve crypto ctx for tag %d\n", + mmc_hostname(mmc), tag); + goto ice_err; + } + task_desc = (__le64 __force *)get_desc(cq_host, tag); cmdq_prep_task_desc(mrq, &data, 1, (mrq->cmdq_req->cmdq_req_flags & QBR)); *task_desc = cpu_to_le64(data); + cmdq_prep_crypto_desc(cq_host, task_desc, ice_ctx); + cmdq_log_task_desc_history(cq_host, *task_desc, false); err = cmdq_prep_tran_desc(mrq, cq_host, tag); @@ -792,8 +840,12 @@ ring_doorbell: /* Commit the doorbell write immediately */ wmb(); + return err; + +ice_err: if (err) cmdq_runtime_pm_put(cq_host); + out: return err; } @@ -810,6 +862,8 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag) if (tag == cq_host->dcmd_slot) mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT); + cmdq_complete_crypto_desc(cq_host, mrq, NULL); + if (mrq->cmdq_req->cmdq_req_flags & DCMD) cmdq_writel(cq_host, cmdq_readl(cq_host, CQ_VENDOR_CFG + offset) | @@ -1280,7 +1334,15 @@ static int cmdq_late_init(struct mmc_host *mmc) static void cqhci_crypto_update_queue(struct mmc_host *mmc, struct request_queue *queue) { - //struct cqhci_host *cq_host = mmc->cqe_private; + struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc); + + if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) { + if (queue) + cmdq_crypto_setup_rq_keyslot_manager(cq_host, queue); + else + pr_err("%s can not register keyslot manager\n", + __func__); + } } static const struct mmc_cmdq_host_ops cmdq_host_ops = { @@ -1347,6 +1409,10 @@ int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc, if (!cq_host->mrq_slot) return -ENOMEM; + err = cmdq_host_init_crypto(cq_host); + if (err) + pr_err("%s: CMDQ Crypto init failed err %d\n", err); + init_completion(&cq_host->halt_comp); return err; } diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h index 65828d8026dc..79e7acc69e6a 100644 --- a/drivers/mmc/host/cmdq_hci.h +++ b/drivers/mmc/host/cmdq_hci.h @@ -28,6 +28,7 @@ #define CQ_DCMD 0x00001000 #define CQ_TASK_DESC_SZ 0x00000100 #define CQ_ENABLE 0x00000001 +#define CQ_ICE_ENABLE 0x00000002 /* control */ #define CQCTL 0x0C @@ -147,6 +148,14 @@ #define DAT_LENGTH(x) ((x & 0xFFFF) << 16) #define DAT_ADDR_LO(x) ((x & 0xFFFFFFFF) << 32) #define DAT_ADDR_HI(x) ((x & 0xFFFFFFFF) << 0) +#define DATA_UNIT_NUM(x) (((u64)(x) & 0xFFFFFFFF) << 0) +#define CRYPTO_CONFIG_INDEX(x) (((u64)(x) & 0xFF) << 32) +#define CRYPTO_ENABLE(x) (((u64)(x) & 0x1) << 47) + +/* ICE context is present in the upper 64bits of task descriptor */ +#define CQ_TASK_DESC_ICE_PARAM_OFFSET 8 +/* ICE descriptor size */ +#define CQ_TASK_DESC_ICE_PARAMS_SIZE 8 /* * Add new macro for updated CQ vendor specific @@ -247,6 +256,7 @@ struct task_history { struct cmdq_host { const struct cmdq_host_ops *ops; void __iomem *mmio; + void __iomem *icemmio; struct mmc_host *mmc; /* 64 bit DMA */ @@ -256,6 +266,7 @@ struct cmdq_host { u32 dcmd_slot; u32 caps; #define CMDQ_TASK_DESC_SZ_128 0x1 +#define CMDQ_CAP_CRYPTO_SUPPORT 0x2 u32 quirks; #define CMDQ_QUIRK_SHORT_TXFR_DESC_SZ 0x1 @@ -290,6 +301,15 @@ struct cmdq_host { struct completion halt_comp; struct mmc_request **mrq_slot; void *private; + const struct cmdq_host_crypto_variant_ops *crypto_vops; +#ifdef CONFIG_MMC_CQ_HCI_CRYPTO + union cmdq_crypto_capabilities crypto_capabilities; + union cmdq_crypto_cap_entry *crypto_cap_array; + u32 crypto_cfg_register; +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + struct keyslot_manager *ksm; +#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ +#endif /* CONFIG_SCSI_CQHCI_CRYPTO */ }; struct cmdq_host_ops { From 8e301b525543c67c6e4710e7334e33acc649ef22 Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Thu, 9 Apr 2020 17:14:37 +0530 Subject: [PATCH 041/141] mmc: host: Add variant ops for cqhci crypto QTI implementation for block keyslot manager and crypto vops for crypto support in CQHCI. Change-Id: I9b64f85ca97c269a6ecd6fde2bb693745d4c43d4 Signed-off-by: Neeraj Soni --- drivers/mmc/host/Kconfig | 8 + drivers/mmc/host/Makefile | 1 + drivers/mmc/host/cmdq_hci-crypto-qti.c | 304 +++++++++++++++++++++++++ drivers/mmc/host/cmdq_hci-crypto-qti.h | 33 +++ drivers/mmc/host/sdhci-msm.c | 8 + 5 files changed, 354 insertions(+) create mode 100644 drivers/mmc/host/cmdq_hci-crypto-qti.c create mode 100644 drivers/mmc/host/cmdq_hci-crypto-qti.h diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 1919fbaffc8d..f361eed35180 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -922,3 +922,11 @@ config MMC_CQ_HCI_CRYPTO Enabling this makes it possible for the kernel to use the crypto capabilities of the CQHCI device (if present) to perform crypto operations on data being transferred to/from the device. + +config MMC_CQ_HCI_CRYPTO_QTI + bool "Vendor specific CQHCI Crypto Engine Support" + depends on MMC_CQ_HCI_CRYPTO + help + Enable Vendor Crypto Engine Support in CQHCI + Enabling this allows kernel to use CQHCI crypto operations defined + and implemented by QTI. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 7ab3a706bd38..3b2f1dd243c3 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -92,6 +92,7 @@ obj-$(CONFIG_MMC_CQ_HCI) += cmdq_hci.o obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o obj-$(CONFIG_MMC_CQ_HCI_CRYPTO) += cmdq_hci-crypto.o +obj-$(CONFIG_MMC_CQ_HCI_CRYPTO_QTI) += cmdq_hci-crypto-qti.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/cmdq_hci-crypto-qti.c b/drivers/mmc/host/cmdq_hci-crypto-qti.c new file mode 100644 index 000000000000..9921a14c9cef --- /dev/null +++ b/drivers/mmc/host/cmdq_hci-crypto-qti.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include "sdhci.h" +#include "sdhci-pltfm.h" +#include "sdhci-msm.h" +#include "cmdq_hci-crypto-qti.h" +#include + +#define RAW_SECRET_SIZE 32 +#define MINIMUM_DUN_SIZE 512 +#define MAXIMUM_DUN_SIZE 65536 + +static struct cmdq_host_crypto_variant_ops cmdq_crypto_qti_variant_ops = { + .host_init_crypto = cmdq_crypto_qti_init_crypto, + .enable = cmdq_crypto_qti_enable, + .disable = cmdq_crypto_qti_disable, + .resume = cmdq_crypto_qti_resume, + .debug = cmdq_crypto_qti_debug, +}; + +static bool ice_cap_idx_valid(struct cmdq_host *host, + unsigned int cap_idx) +{ + return cap_idx < host->crypto_capabilities.num_crypto_cap; +} + +static uint8_t get_data_unit_size_mask(unsigned int data_unit_size) +{ + if (data_unit_size < MINIMUM_DUN_SIZE || + data_unit_size > MAXIMUM_DUN_SIZE || + !is_power_of_2(data_unit_size)) + return 0; + + return data_unit_size / MINIMUM_DUN_SIZE; +} + + +void cmdq_crypto_qti_enable(struct cmdq_host *host) +{ + int err = 0; + + if (!cmdq_host_is_crypto_supported(host)) + return; + + host->caps |= CMDQ_CAP_CRYPTO_SUPPORT; + + err = crypto_qti_enable(host->crypto_vops->priv); + if (err) { + pr_err("%s: Error enabling crypto, err %d\n", + __func__, err); + cmdq_crypto_qti_disable(host); + } +} + +void cmdq_crypto_qti_disable(struct cmdq_host *host) +{ + /* cmdq_crypto_disable_spec(host) and + * crypto_qti_disable(host->crypto_vops->priv) + * are needed here? + */ +} + +static int cmdq_crypto_qti_keyslot_program(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct cmdq_host *host = keyslot_manager_private(ksm); + int err = 0; + u8 data_unit_mask; + int crypto_alg_id; + + crypto_alg_id = cmdq_crypto_cap_find(host, key->crypto_mode, + key->data_unit_size); + + if (!cmdq_is_crypto_enabled(host) || + !cmdq_keyslot_valid(host, slot) || + !ice_cap_idx_valid(host, crypto_alg_id)) { + return -EINVAL; + } + + data_unit_mask = get_data_unit_size_mask(key->data_unit_size); + + if (!(data_unit_mask & + host->crypto_cap_array[crypto_alg_id].sdus_mask)) { + return -EINVAL; + } + + err = crypto_qti_keyslot_program(host->crypto_vops->priv, key, + slot, data_unit_mask, crypto_alg_id); + if (err) + pr_err("%s: failed with error %d\n", __func__, err); + + return err; +} + +static int cmdq_crypto_qti_keyslot_evict(struct keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + int err = 0; + struct cmdq_host *host = keyslot_manager_private(ksm); + + if (!cmdq_is_crypto_enabled(host) || + !cmdq_keyslot_valid(host, slot)) + return -EINVAL; + + err = crypto_qti_keyslot_evict(host->crypto_vops->priv, slot); + if (err) + pr_err("%s: failed with error %d\n", __func__, err); + + return err; +} + +static int cmdq_crypto_qti_derive_raw_secret(struct keyslot_manager *ksm, + const u8 *wrapped_key, unsigned int wrapped_key_size, + u8 *secret, unsigned int secret_size) +{ + int err = 0; + + if (wrapped_key_size <= RAW_SECRET_SIZE) { + pr_err("%s: Invalid wrapped_key_size: %u\n", __func__, + wrapped_key_size); + err = -EINVAL; + return err; + } + if (secret_size != RAW_SECRET_SIZE) { + pr_err("%s: Invalid secret size: %u\n", __func__, secret_size); + err = -EINVAL; + return err; + } + memcpy(secret, wrapped_key, secret_size); + return 0; +} + +static const struct keyslot_mgmt_ll_ops cmdq_crypto_qti_ksm_ops = { + .keyslot_program = cmdq_crypto_qti_keyslot_program, + .keyslot_evict = cmdq_crypto_qti_keyslot_evict, + .derive_raw_secret = cmdq_crypto_qti_derive_raw_secret +}; + +enum blk_crypto_mode_num cmdq_blk_crypto_qti_mode_num_for_alg_dusize( + enum cmdq_crypto_alg cmdq_crypto_alg, + enum cmdq_crypto_key_size key_size) +{ + /* + * Currently the only mode that eMMC and blk-crypto both support. + */ + if (cmdq_crypto_alg == CMDQ_CRYPTO_ALG_AES_XTS && + key_size == CMDQ_CRYPTO_KEY_SIZE_256) + return BLK_ENCRYPTION_MODE_AES_256_XTS; + + return BLK_ENCRYPTION_MODE_INVALID; +} + +int cmdq_host_init_crypto_qti_spec(struct cmdq_host *host, + const struct keyslot_mgmt_ll_ops *ksm_ops) +{ + int cap_idx = 0; + int err = 0; + unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; + enum blk_crypto_mode_num blk_mode_num; + + /* Default to disabling crypto */ + host->caps &= ~CMDQ_CAP_CRYPTO_SUPPORT; + + if (!(cmdq_readl(host, CQCAP) & CQ_CAP_CS)) { + pr_debug("%s no crypto capability\n", __func__); + err = -ENODEV; + goto out; + } + + /* + * Crypto Capabilities should never be 0, because the + * config_array_ptr > 04h. So we use a 0 value to indicate that + * crypto init failed, and can't be enabled. + */ + host->crypto_capabilities.reg_val = cmdq_readl(host, CQ_CCAP); + host->crypto_cfg_register = + (u32)host->crypto_capabilities.config_array_ptr * 0x100; + host->crypto_cap_array = + devm_kcalloc(mmc_dev(host->mmc), + host->crypto_capabilities.num_crypto_cap, + sizeof(host->crypto_cap_array[0]), GFP_KERNEL); + if (!host->crypto_cap_array) { + err = -ENOMEM; + pr_err("%s failed to allocate memory\n", __func__); + goto out; + } + + memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported)); + + /* + * Store all the capabilities now so that we don't need to repeatedly + * access the device each time we want to know its capabilities + */ + for (cap_idx = 0; cap_idx < host->crypto_capabilities.num_crypto_cap; + cap_idx++) { + host->crypto_cap_array[cap_idx].reg_val = + cpu_to_le32(cmdq_readl(host, + CQ_CRYPTOCAP + + cap_idx * sizeof(__le32))); + blk_mode_num = cmdq_blk_crypto_qti_mode_num_for_alg_dusize( + host->crypto_cap_array[cap_idx].algorithm_id, + host->crypto_cap_array[cap_idx].key_size); + if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID) + continue; + crypto_modes_supported[blk_mode_num] |= + host->crypto_cap_array[cap_idx].sdus_mask * 512; + } + + host->ksm = keyslot_manager_create(cmdq_num_keyslots(host), ksm_ops, + BLK_CRYPTO_FEATURE_STANDARD_KEYS | + BLK_CRYPTO_FEATURE_WRAPPED_KEYS, + crypto_modes_supported, host); + + if (!host->ksm) { + err = -ENOMEM; + goto out; + } + /* + * In case host controller supports cryptographic operations + * then, it uses 128bit task descriptor. Upper 64 bits of task + * descriptor would be used to pass crypto specific informaton. + */ + host->caps |= CMDQ_TASK_DESC_SZ_128; + + return 0; + +out: + /* Indicate that init failed by setting crypto_capabilities to 0 */ + host->crypto_capabilities.reg_val = 0; + return err; +} + +int cmdq_crypto_qti_init_crypto(struct cmdq_host *host, + const struct keyslot_mgmt_ll_ops *ksm_ops) +{ + int err = 0; + struct sdhci_host *sdhci = mmc_priv(host->mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + struct resource *cmdq_ice_memres = NULL; + + cmdq_ice_memres = platform_get_resource_byname(msm_host->pdev, + IORESOURCE_MEM, + "cmdq_ice"); + if (!cmdq_ice_memres) { + pr_debug("%s ICE not supported\n", __func__); + host->icemmio = NULL; + return PTR_ERR(cmdq_ice_memres); + } + + host->icemmio = devm_ioremap(&msm_host->pdev->dev, + cmdq_ice_memres->start, + resource_size(cmdq_ice_memres)); + if (!host->icemmio) { + pr_err("%s failed to remap ice regs\n", __func__); + return PTR_ERR(host->icemmio); + } + + err = cmdq_host_init_crypto_qti_spec(host, &cmdq_crypto_qti_ksm_ops); + if (err) { + pr_err("%s: Error initiating crypto capabilities, err %d\n", + __func__, err); + return err; + } + + err = crypto_qti_init_crypto(&msm_host->pdev->dev, + host->icemmio, (void **)&host->crypto_vops->priv); + if (err) { + pr_err("%s: Error initiating crypto, err %d\n", + __func__, err); + } + return err; +} + +int cmdq_crypto_qti_debug(struct cmdq_host *host) +{ + return crypto_qti_debug(host->crypto_vops->priv); +} + +void cmdq_crypto_qti_set_vops(struct cmdq_host *host) +{ + return cmdq_crypto_set_vops(host, &cmdq_crypto_qti_variant_ops); +} + +int cmdq_crypto_qti_resume(struct cmdq_host *host) +{ + return crypto_qti_resume(host->crypto_vops->priv); +} diff --git a/drivers/mmc/host/cmdq_hci-crypto-qti.h b/drivers/mmc/host/cmdq_hci-crypto-qti.h new file mode 100644 index 000000000000..e63465bca3e2 --- /dev/null +++ b/drivers/mmc/host/cmdq_hci-crypto-qti.h @@ -0,0 +1,33 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CMDQ_HCI_CRYPTO_QTI_H +#define _CMDQ_HCI_CRYPTO_QTI_H + +#include "cmdq_hci-crypto.h" + +void cmdq_crypto_qti_enable(struct cmdq_host *host); + +void cmdq_crypto_qti_disable(struct cmdq_host *host); + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION +int cmdq_crypto_qti_init_crypto(struct cmdq_host *host, + const struct keyslot_mgmt_ll_ops *ksm_ops); +#endif + +int cmdq_crypto_qti_debug(struct cmdq_host *host); + +void cmdq_crypto_qti_set_vops(struct cmdq_host *host); + +int cmdq_crypto_qti_resume(struct cmdq_host *host); + +#endif /* _CMDQ_HCI_CRYPTO_QTI_H */ diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index ed15b5bc8018..9e9fb3afb0ec 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -43,6 +43,7 @@ #include "sdhci-msm.h" #include "cmdq_hci.h" +#include "cmdq_hci-crypto-qti.h" #define QOS_REMOVE_DELAY_MS 10 #define CORE_POWER 0x0 @@ -4644,6 +4645,13 @@ static void sdhci_msm_cmdq_init(struct sdhci_host *host, } else { msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE; } + /* + * Set the vendor specific ops needed for ICE. + * Default implementation if the ops are not set. + */ +#ifdef CONFIG_MMC_CQ_HCI_CRYPTO_QTI + cmdq_crypto_qti_set_vops(host->cq_host); +#endif } #else static void sdhci_msm_cmdq_init(struct sdhci_host *host, From 2f70573ca48451e34f5a7336c25b8628c402e590 Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Thu, 23 Jul 2020 13:25:57 +0530 Subject: [PATCH 042/141] mmc: host: Fix the offset for ICE address ICE address space was added to command queue address space in eMMC JEDEC v5.2 spec so adapt the offset of crypto registers accordingly. Change-Id: I409b031edcf1055289f2868aaaa00adc20eabfec Signed-off-by: Neeraj Soni --- drivers/mmc/host/cmdq_hci.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c index a7b7597d96a3..2d182bcb401f 100644 --- a/drivers/mmc/host/cmdq_hci.c +++ b/drivers/mmc/host/cmdq_hci.c @@ -411,6 +411,11 @@ static int cmdq_enable(struct mmc_host *mmc) if (cmdq_host_is_crypto_supported(cq_host)) { cmdq_crypto_enable(cq_host); cqcfg |= CQ_ICE_ENABLE; + /* For SDHC v5.0 onwards, ICE 3.0 specific registers are added + * in CQ register space, due to which few CQ registers are + * shifted. Set offset_changed boolean to use updated address. + */ + cq_host->offset_changed = true; } cmdq_writel(cq_host, cqcfg, CQCFG); From da172629102626cf266fd37b9f434d34adb2ec56 Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Mon, 3 Feb 2020 23:25:19 +0530 Subject: [PATCH 043/141] fscrypt: support legacy inline crypto mode Add support for legacy inline crypto mode in new v2 FBE framework to make on disk data format compatible to new v2 framework. Change-Id: I3c1384604ee8e022db151299850b0dc330b6a17d Signed-off-by: Neeraj Soni --- fs/crypto/crypto.c | 9 ++++++++- fs/crypto/keyring.c | 2 +- fs/crypto/keysetup.c | 6 ++++-- fs/crypto/keysetup_v1.c | 12 ++++++++++++ include/uapi/linux/fscrypt.h | 3 +-- 5 files changed, 26 insertions(+), 6 deletions(-) diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index ed6ea28dbdad..cc8e334165f5 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -72,9 +72,16 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, { u8 flags = fscrypt_policy_flags(&ci->ci_policy); + bool inlinecrypt = false; + +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + inlinecrypt = ci->ci_inlinecrypt; +#endif memset(iv, 0, ci->ci_mode->ivsize); - if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 || + ((fscrypt_policy_contents_mode(&ci->ci_policy) == + FSCRYPT_MODE_PRIVATE) && inlinecrypt)) { WARN_ON_ONCE((u32)lblk_num != lblk_num); lblk_num |= (u64)ci->ci_inode->i_ino << 32; } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 0081fd48e96f..9257ea1102b1 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -652,7 +652,7 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) goto out_wipe_secret; err = -EINVAL; - if (arg.__flags) + if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) goto out_wipe_secret; break; case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index c6ce78afbf8f..a3626425d633 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -48,9 +48,11 @@ struct fscrypt_mode fscrypt_modes[] = { .blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM, }, [FSCRYPT_MODE_PRIVATE] = { - .friendly_name = "ICE", - .cipher_str = "bugon", + .friendly_name = "ice", + .cipher_str = "xts(aes)", .keysize = 64, + .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS, }, }; diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 3f7bb48f7317..ac549eeb1444 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -269,6 +269,18 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci, u8 *derived_key; int err; + /*Support legacy ice based content encryption mode*/ + if ((fscrypt_policy_contents_mode(&ci->ci_policy) == + FSCRYPT_MODE_PRIVATE) && + fscrypt_using_inline_encryption(ci)) { + + err = fscrypt_prepare_inline_crypt_key(&ci->ci_key, + raw_master_key, + ci->ci_mode->keysize, + false, + ci); + return err; + } /* * This cannot be a stack buffer because it will be passed to the * scatterlist crypto API during derive_key_aes(). diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 1b580ac60f98..b134bfc90912 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -27,9 +27,8 @@ #define FSCRYPT_MODE_AES_128_CBC 5 #define FSCRYPT_MODE_AES_128_CTS 6 #define FSCRYPT_MODE_ADIANTUM 9 -#define __FSCRYPT_MODE_MAX 9 #define FSCRYPT_MODE_PRIVATE 127 - +#define __FSCRYPT_MODE_MAX 127 /* * Legacy policy version; ad-hoc KDF and no key verification. * For new encrypted directories, use fscrypt_policy_v2 instead. From ba3bee4b2734aeb6ec3fcdf72a90fc6f8c95f74e Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Fri, 27 Mar 2020 21:39:02 +0530 Subject: [PATCH 044/141] dm: Support legacy on disk format in dm-default-key Version 1.0.0 did not add iv_offset to dun and did not mandate sector size. This resulted in different on disk data format compared to what version 2.1.0 will support. To support OTA upgrades with legacy data format, adapt the sector size and iv_offset if legacy encryption algorithm is used. Fix compilation issue for block crypto fallback using keyslot manager API. Change-Id: I3b7a0279bcb98c3cba9dec3f572c12d618fdc816 Signed-off-by: Neeraj Soni --- block/blk-crypto-fallback.c | 3 +-- drivers/md/dm-default-key.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index ad83e1077ba3..18b6851d8301 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -600,8 +600,7 @@ int __init blk_crypto_fallback_init(void) crypto_mode_supported[i] = 0xFFFFFFFF; crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; - blk_crypto_ksm = keyslot_manager_create( - NULL, blk_crypto_num_keyslots, + blk_crypto_ksm = keyslot_manager_create(blk_crypto_num_keyslots, &blk_crypto_ksm_ll_ops, BLK_CRYPTO_FEATURE_STANDARD_KEYS, crypto_mode_supported, NULL); diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 3d0bd0645f7a..4b47f25a257e 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -135,6 +135,22 @@ static int default_key_ctr_optional(struct dm_target *ti, return 0; } +void default_key_adjust_sector_size_and_iv(char **argv, struct dm_target *ti, + struct default_key_c **dkc) +{ + struct dm_dev *dev; + + dev = (*dkc)->dev; + + if (!strcmp(argv[0], "AES-256-XTS")) { + if (ti->len & (((*dkc)->sector_size >> SECTOR_SHIFT) - 1)) + (*dkc)->sector_size = SECTOR_SIZE; + + if (dev->bdev->bd_part) + (*dkc)->iv_offset += dev->bdev->bd_part->start_sect; + } +} + /* * Construct a default-key mapping: * @@ -225,6 +241,9 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (err) goto bad; } + + default_key_adjust_sector_size_and_iv(argv, ti, &dkc); + dkc->sector_bits = ilog2(dkc->sector_size); if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) { ti->error = "Device size is not a multiple of sector_size"; From 162c3e7e6bdfa29163ab15fb32e02bdb2ffd71e4 Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Mon, 10 Aug 2020 22:10:35 +0530 Subject: [PATCH 045/141] defconfig: Enable new file encryption flags New file encryption architecture and hardware support for it are enabled with these flags. Change-Id: I9be98badf3d6a1d1d41cf13fa984bbe1afce6cab Signed-off-by: Neeraj Soni --- arch/arm64/configs/vendor/atoll-perf_defconfig | 10 ++++++++++ arch/arm64/configs/vendor/atoll_defconfig | 10 ++++++++++ arch/arm64/configs/vendor/sdmsteppe-perf_defconfig | 10 ++++++++++ arch/arm64/configs/vendor/sdmsteppe_defconfig | 10 ++++++++++ arch/arm64/configs/vendor/sm8150-perf_defconfig | 8 ++++++++ arch/arm64/configs/vendor/sm8150_defconfig | 8 ++++++++ 6 files changed, 56 insertions(+) diff --git a/arch/arm64/configs/vendor/atoll-perf_defconfig b/arch/arm64/configs/vendor/atoll-perf_defconfig index 8703fffe9ba7..1298d7b34fba 100644 --- a/arch/arm64/configs/vendor/atoll-perf_defconfig +++ b/arch/arm64/configs/vendor/atoll-perf_defconfig @@ -52,6 +52,8 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y @@ -280,9 +282,12 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -508,6 +513,8 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQ_HCI=y +CONFIG_MMC_CQ_HCI_CRYPTO=y +CONFIG_MMC_CQ_HCI_CRYPTO_QTI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_QPNP_FLASH_V2=y @@ -636,6 +643,8 @@ CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_CX_IPEAK=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -677,6 +686,7 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/vendor/atoll_defconfig b/arch/arm64/configs/vendor/atoll_defconfig index 35b0bb68edfe..f60a0c9906ed 100644 --- a/arch/arm64/configs/vendor/atoll_defconfig +++ b/arch/arm64/configs/vendor/atoll_defconfig @@ -55,6 +55,8 @@ CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_CFQ_GROUP_IOSCHED=y @@ -291,9 +293,12 @@ CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -523,6 +528,8 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQ_HCI=y +CONFIG_MMC_CQ_HCI_CRYPTO=y +CONFIG_MMC_CQ_HCI_CRYPTO_QTI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_QPNP_FLASH_V2=y @@ -663,6 +670,8 @@ CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_CX_IPEAK=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -707,6 +716,7 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig index f06dced29a56..825622c8c55a 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig @@ -51,6 +51,8 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y @@ -274,9 +276,12 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -494,6 +499,8 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQ_HCI=y +CONFIG_MMC_CQ_HCI_CRYPTO=y +CONFIG_MMC_CQ_HCI_CRYPTO_QTI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_QPNP_FLASH_V2=y @@ -615,6 +622,8 @@ CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_CX_IPEAK=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -655,6 +664,7 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/vendor/sdmsteppe_defconfig b/arch/arm64/configs/vendor/sdmsteppe_defconfig index e23a67e5fa74..f7c6eefdc4cd 100644 --- a/arch/arm64/configs/vendor/sdmsteppe_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe_defconfig @@ -53,6 +53,8 @@ CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_CFQ_GROUP_IOSCHED=y @@ -285,9 +287,12 @@ CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -518,6 +523,8 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQ_HCI=y +CONFIG_MMC_CQ_HCI_CRYPTO=y +CONFIG_MMC_CQ_HCI_CRYPTO_QTI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_QPNP_FLASH_V2=y @@ -650,6 +657,8 @@ CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_CX_IPEAK=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -692,6 +701,7 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/vendor/sm8150-perf_defconfig b/arch/arm64/configs/vendor/sm8150-perf_defconfig index c5444dbd97af..16fcd3c04a7f 100644 --- a/arch/arm64/configs/vendor/sm8150-perf_defconfig +++ b/arch/arm64/configs/vendor/sm8150-perf_defconfig @@ -53,6 +53,8 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y @@ -285,9 +287,12 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -624,6 +629,8 @@ CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_AOP_DDR_MESSAGING=y CONFIG_QCOM_AOP_DDRSS_COMMANDS=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y CONFIG_QCOM_HYP_CORE_CTL=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y @@ -664,6 +671,7 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/vendor/sm8150_defconfig b/arch/arm64/configs/vendor/sm8150_defconfig index 53fd6b98411a..3a528fdee801 100644 --- a/arch/arm64/configs/vendor/sm8150_defconfig +++ b/arch/arm64/configs/vendor/sm8150_defconfig @@ -56,6 +56,8 @@ CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_CFQ_GROUP_IOSCHED=y @@ -298,9 +300,12 @@ CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -652,6 +657,8 @@ CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_AOP_DDR_MESSAGING=y CONFIG_QCOM_AOP_DDRSS_COMMANDS=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y CONFIG_QCOM_HYP_CORE_CTL=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y @@ -694,6 +701,7 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y From 4abe03d70b5af292fb823b1f250989510aa99232 Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Fri, 7 Aug 2020 16:31:42 +0530 Subject: [PATCH 046/141] ARM: dts: Make crypto address part of host controller node New file encryption architecture parses crypto base address from host controller node. Change-Id: I73d67e82d11611c9d9f75da8fdc83ed10f1efeb2 Signed-off-by: Neeraj Soni --- arch/arm64/boot/dts/qcom/atoll.dtsi | 9 ++++----- arch/arm64/boot/dts/qcom/sdmmagpie.dtsi | 9 ++++----- arch/arm64/boot/dts/qcom/sm6150.dtsi | 9 ++++----- arch/arm64/boot/dts/qcom/sm8150.dtsi | 5 ++--- 4 files changed, 14 insertions(+), 18 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/atoll.dtsi b/arch/arm64/boot/dts/qcom/atoll.dtsi index 3c8b470205c1..4f2f8b1c7b09 100644 --- a/arch/arm64/boot/dts/qcom/atoll.dtsi +++ b/arch/arm64/boot/dts/qcom/atoll.dtsi @@ -2679,13 +2679,12 @@ sdhc_1: sdhci@7c4000 { compatible = "qcom,sdhci-msm-v5"; - reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>; - reg-names = "hc_mem", "cmdq_mem"; + reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>, <0x7c8000 0x8000>; + reg-names = "hc_mem", "cmdq_mem", "cmdq_ice"; interrupts = , ; interrupt-names = "hc_irq", "pwr_irq"; - sdhc-msm-crypto = <&sdcc1_ice>; qcom,bus-width = <8>; qcom,large-address-bus; @@ -2835,11 +2834,11 @@ ufshc_mem: ufshc@1d84000 { compatible = "qcom,ufshc"; - reg = <0x1d84000 0x3000>; + reg = <0x1d84000 0x3000>, <0x1d90000 0x8000>; + reg-names = "ufs_mem", "ufs_ice"; interrupts = <0 265 0>; phys = <&ufsphy_mem>; phy-names = "ufsphy"; - ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <1>; dev-ref-clk-freq = <0>; /* 19.2 MHz */ diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi index 2e2abfe4b014..e129213936d6 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi @@ -2121,13 +2121,12 @@ sdhc_1: sdhci@7c4000 { compatible = "qcom,sdhci-msm-v5"; - reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>; - reg-names = "hc_mem", "cmdq_mem"; + reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>, <0x7C8000 0x8000>; + reg-names = "hc_mem", "cmdq_mem", "cmdq_ice"; interrupts = , ; interrupt-names = "hc_irq", "pwr_irq"; - sdhc-msm-crypto = <&sdcc1_ice>; qcom,bus-width = <8>; qcom,large-address-bus; @@ -2339,11 +2338,11 @@ ufshc_mem: ufshc@1d84000 { compatible = "qcom,ufshc"; - reg = <0x1d84000 0x3000>; + reg = <0x1d84000 0x3000>, <0x1d90000 0x8000>; + reg-names = "ufs_mem", "ufs_ice"; interrupts = <0 265 0>; phys = <&ufsphy_mem>; phy-names = "ufsphy"; - ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <1>; dev-ref-clk-freq = <0>; /* 19.2 MHz */ diff --git a/arch/arm64/boot/dts/qcom/sm6150.dtsi b/arch/arm64/boot/dts/qcom/sm6150.dtsi index 5af542dee2a5..75bb674b5d46 100644 --- a/arch/arm64/boot/dts/qcom/sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150.dtsi @@ -1409,12 +1409,11 @@ sdhc_1: sdhci@7c4000 { compatible = "qcom,sdhci-msm-v5"; - reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>; - reg-names = "hc_mem", "cmdq_mem"; + reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>, <0x7C8000 0x8000>; + reg-names = "hc_mem", "cmdq_mem", "cmdq_ice"; interrupts = <0 641 0>, <0 644 0>; interrupt-names = "hc_irq", "pwr_irq"; - sdhc-msm-crypto = <&sdcc1_ice>; qcom,bus-width = <8>; qcom,large-address-bus; @@ -1623,11 +1622,11 @@ ufshc_mem: ufshc@1d84000 { compatible = "qcom,ufshc"; - reg = <0x1d84000 0x3000>; + reg = <0x1d84000 0x3000>, <0x1d90000 0x8000>; + reg-names = "ufs_mem", "ufs_ice"; interrupts = <0 265 0>; phys = <&ufsphy_mem>; phy-names = "ufsphy"; - ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <1>; dev-ref-clk-freq = <0>; /* 19.2 MHz */ diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index bcc15b5f454e..8428b869d768 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -2281,7 +2281,6 @@ reg = <0x1d87000 0xda8>; /* PHY regs */ reg-names = "phy_mem"; #phy-cells = <0>; - ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <2>; @@ -2297,11 +2296,11 @@ ufshc_mem: ufshc@1d84000 { compatible = "qcom,ufshc"; - reg = <0x1d84000 0x2500>; + reg = <0x1d84000 0x2500>, <0x1d90000 0x8000>; + reg-names = "ufs_mem", "ufs_ice"; interrupts = <0 265 0>; phys = <&ufsphy_mem>; phy-names = "ufsphy"; - ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <2>; dev-ref-clk-freq = <0>; /* 19.2 MHz */ From 54c597ad60fddf28431350aa33be495b290c03d7 Mon Sep 17 00:00:00 2001 From: Dundi Raviteja Date: Fri, 14 Aug 2020 18:45:03 +0530 Subject: [PATCH 047/141] defconfig: Disable wlan vendors to optimize memory Disable wlan vendors to optimize memory usage. Change-Id: I2b5895e23f2edefa249a30e5826e8711e93ce4dc CRs-Fixed: 2755793 Signed-off-by: Dundi Raviteja --- arch/arm/configs/vendor/sdm429-bg-perf_defconfig | 16 +++++++++++++++- arch/arm/configs/vendor/sdm429-bg_defconfig | 15 +++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig index d1fdcb58461c..fc123a17f87e 100644 --- a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig @@ -294,7 +294,21 @@ CONFIG_PPP_ASYNC=y CONFIG_PPP_SYNC_TTY=y CONFIG_USB_RTL8152=y CONFIG_USB_USBNET=y -CONFIG_WIL6210=m +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +# CONFIG_WLAN_VENDOR_QUANTENNA is not set CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_CLD_LL_CORE=y CONFIG_INPUT_EVDEV=y diff --git a/arch/arm/configs/vendor/sdm429-bg_defconfig b/arch/arm/configs/vendor/sdm429-bg_defconfig index 23b7a19357f8..cfd6f665b64f 100644 --- a/arch/arm/configs/vendor/sdm429-bg_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg_defconfig @@ -301,6 +301,21 @@ CONFIG_PPP_ASYNC=y CONFIG_PPP_SYNC_TTY=y CONFIG_USB_RTL8152=y CONFIG_USB_USBNET=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +# CONFIG_WLAN_VENDOR_QUANTENNA is not set CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_CLD_LL_CORE=y CONFIG_INPUT_EVDEV=y From 7bc85c41a81b4ac6f9eb13cfda40d1d2adb10d5c Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Fri, 14 Aug 2020 23:09:50 +0530 Subject: [PATCH 048/141] mmc: host: Fix the condition to parse crypto clocks If crypto clock details are present the call returns 0 which must be checked to parse the clock entry in host controller dtsi node. Test: 1. Clock scalability. 2. Performance number using Andorbench apk. Change-Id: I10ee3de1c1e02299de6ab390852d367cdb303224 Signed-off-by: Neeraj Soni --- drivers/mmc/host/sdhci-msm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 9e9fb3afb0ec..29a87b31b6de 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2055,7 +2055,7 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, } } - if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates", + if (!sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates", &ice_clk_table, &ice_clk_table_len, 0)) { if (ice_clk_table && ice_clk_table_len) { if (ice_clk_table_len != 2) { From 5d9e79304aff5b9bb89d69e1b1224c8362071494 Mon Sep 17 00:00:00 2001 From: Pradeep P V K Date: Mon, 10 Aug 2020 21:03:31 +0530 Subject: [PATCH 049/141] mtd: msm_qpic_nand: Use logical unit count in flash density In an ONFI compliant devices, the flash parameters are calculated by reading the onfi parameter page of the device. Existing flash density is calculated based on a single logical unit(LUN). This shows wrong density information on Multi LUN flashes. So, always use number of LUNS value in flash density calculations. Change-Id: Idba4d4e129e7fdcdab1e509bd8c3149f26fc11f2 Signed-off-by: Pradeep P V K --- drivers/mtd/devices/msm_qpic_nand.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c index 6e84a4b2c54b..6d80804d3310 100644 --- a/drivers/mtd/devices/msm_qpic_nand.c +++ b/drivers/mtd/devices/msm_qpic_nand.c @@ -1056,8 +1056,9 @@ static int msm_nand_flash_onfi_probe(struct msm_nand_info *info) flash->blksize = onfi_param_page_ptr->number_of_pages_per_block * flash->pagesize; flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page; - flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit - * flash->blksize; + flash->density = onfi_param_page_ptr->number_of_logical_units * + onfi_param_page_ptr->number_of_blocks_per_logical_unit * + flash->blksize; flash->ecc_correctability = onfi_param_page_ptr->number_of_bits_ecc_correctability; From c26572257d2ca7b6b298225476523048b24590c7 Mon Sep 17 00:00:00 2001 From: Protik Biswas Date: Thu, 30 Jul 2020 18:14:21 +0530 Subject: [PATCH 050/141] sdm429w: add bg-rsg driver changes bg-rsb driver is taken from msm-4.9 'commit <94afce5b> ("soc: qcom: bg-rsb: enable/disable events through gpio")'. bg-rsb driver maintains RSB state machine. bgrsb-rpmsg is registered as a rpmsg driver when remote processor opens RSB_CTRL channel it sends callback to rsb driver. Change-Id: I58e341ce81f33df38e900f822325be6f37cef8e6 Signed-off-by: Protik Biswas --- .../devicetree/bindings/soc/qcom/bg_rsb.txt | 19 + drivers/soc/qcom/Kconfig | 9 + drivers/soc/qcom/Makefile | 1 + drivers/soc/qcom/bg_rsb.c | 891 ++++++++++++++++++ drivers/soc/qcom/bgrsb.h | 72 +- 5 files changed, 982 insertions(+), 10 deletions(-) create mode 100644 Documentation/devicetree/bindings/soc/qcom/bg_rsb.txt create mode 100644 drivers/soc/qcom/bg_rsb.c diff --git a/Documentation/devicetree/bindings/soc/qcom/bg_rsb.txt b/Documentation/devicetree/bindings/soc/qcom/bg_rsb.txt new file mode 100644 index 000000000000..5e4c0ef147ed --- /dev/null +++ b/Documentation/devicetree/bindings/soc/qcom/bg_rsb.txt @@ -0,0 +1,19 @@ +Qualcomm technologies, Inc. bg-rsb + +BG-RSB : bg-rsb is used to communicate with Blackghost over +Glink to configure the RSB events. bg-rsb enable/disable +LDO11 and LDO15 before making any communication to BG +regarding RSB. It also provides an input device, which is +used to send the RSB/Button events to input framework. + +Required properties: +- compatible : should be "qcom,bg-rsb" +- vdd-ldo1-supply : for powering main supply +- vdd-ldo2-supply : for powering sensor + +Example: + qcom,bg-rsb { + compatible = "qcom,bg-rsb"; + vdd-ldo1-supply = <&pm660_l11>; + vdd-ldo2-supply = <&pm660_l15>; + }; diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 8d9c0e27508f..c6469580cf2d 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -1031,6 +1031,15 @@ config MSM_BAM_DMUX communication between G-Link/bg_com_dev and BG processor over SPI. This handle the interrupts raised by BG and notify the G-link with interrupt event and event data. + +config MSM_BGRSB + bool "Provide support for rsb events on Blackghost chipset" + help + BGRSB communicates to BG over rpmsg driver for RSB configuration and + enable/disable on device power state change. It enables/disables + the regulator specific to RSB. Sends the side band events generated + by BG to input framework. + config MSM_PIL_SSR_BG tristate "MSM Subsystem Blackghost(BG) Support" depends on MSM_PIL && MSM_SUBSYSTEM_RESTART diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 9c2a3fb9fd36..5c3226232b8c 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o obj-$(CONFIG_SDX_EXT_IPC) += sdx_ext_ipc.o obj-$(CONFIG_MSM_PIL_SSR_BG) += subsys-pil-bg.o obj-$(CONFIG_QTI_NOTIFY_SIDEBAND) += sideband_notify.o +obj-$(CONFIG_MSM_BGRSB) += bg_rsb.o ifdef CONFIG_MSM_SUBSYSTEM_RESTART obj-y += subsystem_notif.o diff --git a/drivers/soc/qcom/bg_rsb.c b/drivers/soc/qcom/bg_rsb.c new file mode 100644 index 000000000000..cafe5af25af9 --- /dev/null +++ b/drivers/soc/qcom/bg_rsb.c @@ -0,0 +1,891 @@ +/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(msg) "bgrsb: %s: " msg, __func__ +#include "bgrsb.h" + +struct bgrsb_priv { + void *handle; + struct input_dev *input; + struct mutex glink_mutex; + struct mutex rsb_state_mutex; + enum bgrsb_state bgrsb_current_state; + void *lhndl; + struct work_struct bg_up_work; + struct work_struct bg_down_work; + struct work_struct rsb_up_work; + struct work_struct rsb_down_work; + struct work_struct rsb_calibration_work; + struct work_struct bttn_configr_work; + struct workqueue_struct *bgrsb_wq; + struct bgrsb_regulator rgltr; + enum ldo_task ldo_action; + void *bgwear_subsys_handle; + struct completion bg_resp_cmplt; + struct completion wrk_cmplt; + struct completion bg_lnikup_cmplt; + struct completion tx_done; + struct device *ldev; + struct wakeup_source bgrsb_ws; + wait_queue_head_t link_state_wait; + uint32_t calbrtion_intrvl; + uint32_t calbrtion_cpi; + uint8_t bttn_configs; + int msmrsb_gpio; + bool rsb_rpmsg; + bool rsb_use_msm_gpio; + bool is_in_twm; + bool calibration_needed; + bool is_calibrd; + bool is_cnfgrd; + bool blk_rsb_cmnds; + bool pending_enable; +}; + +static void *bgrsb_drv; +static int bgrsb_enable(struct bgrsb_priv *dev, bool enable); + +void bgrsb_send_input(struct event *evnt) +{ + uint8_t press_code; + uint8_t value; + struct bgrsb_priv *dev = + container_of(bgrsb_drv, struct bgrsb_priv, lhndl); + + pr_debug("%s: Called\n", __func__); + if (!evnt) { + pr_err("%s: No event received\n", __func__); + return; + } + if (evnt->sub_id == 1) { + input_report_rel(dev->input, REL_WHEEL, evnt->evnt_data); + input_sync(dev->input); + } else if (evnt->sub_id == 2) { + press_code = (uint8_t) evnt->evnt_data; + value = (uint8_t) (evnt->evnt_data >> 8); + + switch (press_code) { + case 0x1: + if (value == 0) { + input_report_key(dev->input, KEY_VOLUMEDOWN, 1); + input_sync(dev->input); + } else { + input_report_key(dev->input, KEY_VOLUMEDOWN, 0); + input_sync(dev->input); + } + break; + case 0x2: + if (value == 0) { + input_report_key(dev->input, KEY_VOLUMEUP, 1); + input_sync(dev->input); + } else { + input_report_key(dev->input, KEY_VOLUMEUP, 0); + input_sync(dev->input); + } + break; + case 0x3: + if (value == 0) { + input_report_key(dev->input, KEY_POWER, 1); + input_sync(dev->input); + } else { + input_report_key(dev->input, KEY_POWER, 0); + input_sync(dev->input); + } + break; + default: + pr_info("event: type[%d] , data: %d\n", + evnt->sub_id, evnt->evnt_data); + } + } + pr_debug("%s: Ended\n", __func__); +} +EXPORT_SYMBOL(bgrsb_send_input); + +static int bgrsb_init_regulators(struct device *pdev) +{ + struct regulator *reg11; + struct regulator *reg15; + struct bgrsb_priv *dev = dev_get_drvdata(pdev); + + reg11 = devm_regulator_get(pdev, "vdd-ldo1"); + if (IS_ERR_OR_NULL(reg11)) { + pr_err("Unable to get regulator for LDO-11\n"); + return PTR_ERR(reg11); + } + + reg15 = devm_regulator_get(pdev, "vdd-ldo2"); + if (IS_ERR_OR_NULL(reg15)) { + pr_err("Unable to get regulator for LDO-15\n"); + return PTR_ERR(reg15); + } + + dev->rgltr.regldo11 = reg11; + dev->rgltr.regldo15 = reg15; + return 0; +} + +static int bgrsb_set_ldo(struct bgrsb_priv *dev, enum ldo_task ldo_action) +{ + int ret = 0; + bool value; + + switch (ldo_action) { + case BGRSB_HW_TURN_ON: + ret = regulator_set_voltage(dev->rgltr.regldo11, + BGRSB_LDO11_VTG_MIN_UV, BGRSB_LDO11_VTG_MAX_UV); + if (ret) { + pr_err("Failed to request LDO-11 voltage %d\n", ret); + goto err_ret; + } + ret = regulator_enable(dev->rgltr.regldo11); + if (ret) { + pr_err("Failed to enable LDO-11 %d\n", ret); + goto err_ret; + } + break; + case BGRSB_ENABLE_WHEEL_EVENTS: + if (dev->rsb_use_msm_gpio == true) { + if (!gpio_is_valid(dev->msmrsb_gpio)) { + pr_err("gpio %d is not valid\n", + dev->msmrsb_gpio); + ret = -ENXIO; + goto err_ret; + } + + /* Sleep 50ms for h/w to detect signal */ + msleep(50); + + gpio_set_value(dev->msmrsb_gpio, 1); + value = gpio_get_value(dev->msmrsb_gpio); + if (value == true) { + pr_debug("gpio %d set properly\n", + dev->msmrsb_gpio); + } else { + pr_debug("gpio %d set failed\n", + dev->msmrsb_gpio); + ret = -ENXIO; + goto err_ret; + } + } else { + ret = regulator_set_voltage(dev->rgltr.regldo15, + BGRSB_LDO15_VTG_MIN_UV, BGRSB_LDO15_VTG_MAX_UV); + if (ret) { + pr_err("Request failed LDO-15 %d\n", + ret); + goto err_ret; + } + ret = regulator_enable(dev->rgltr.regldo15); + if (ret) { + pr_err("LDO-15 not enabled%d\n", + ret); + goto err_ret; + } + } + break; + case BGRSB_HW_TURN_OFF: + ret = regulator_disable(dev->rgltr.regldo11); + if (ret) { + pr_err("Failed to disable LDO-11 %d\n", ret); + goto err_ret; + } + break; + case BGRSB_DISABLE_WHEEL_EVENTS: + if (dev->rsb_use_msm_gpio == true) { + if (!gpio_is_valid(dev->msmrsb_gpio)) { + pr_err("Invalid gpio %d\n", + dev->msmrsb_gpio); + ret = -ENXIO; + goto err_ret; + } + /* Sleep 50ms for h/w to detect signal */ + msleep(50); + gpio_set_value(dev->msmrsb_gpio, 0); + } else { + ret = regulator_disable(dev->rgltr.regldo15); + if (ret) { + pr_err("Failed to disable LDO-15 %d\n", ret); + goto err_ret; + } + regulator_set_load(dev->rgltr.regldo15, 0); + } + break; + default: + ret = -EINVAL; + } + +err_ret: + return ret; +} + +static void bgrsb_bgdown_work(struct work_struct *work) +{ + int ret = 0; + struct bgrsb_priv *dev = container_of(work, struct bgrsb_priv, + bg_down_work); + + mutex_lock(&dev->rsb_state_mutex); + if (dev->bgrsb_current_state == BGRSB_STATE_RSB_ENABLED) { + ret = bgrsb_set_ldo(dev, BGRSB_DISABLE_WHEEL_EVENTS); + if (ret == 0) + dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED; + else + pr_err("Failed to unvote LDO-15 on BG down\n"); + } + + if (dev->bgrsb_current_state == BGRSB_STATE_RSB_CONFIGURED) { + ret = bgrsb_set_ldo(dev, BGRSB_HW_TURN_OFF); + if (ret == 0) + dev->bgrsb_current_state = BGRSB_STATE_INIT; + else + pr_err("Failed to unvote LDO-11 on BG down\n"); + } + + dev->is_cnfgrd = false; + dev->blk_rsb_cmnds = false; + pr_debug("RSB current state is : %d\n", dev->bgrsb_current_state); + + if (dev->bgrsb_current_state == BGRSB_STATE_INIT) { + if (dev->is_calibrd) + dev->calibration_needed = true; + } + mutex_unlock(&dev->rsb_state_mutex); +} + +static int bgrsb_tx_msg(struct bgrsb_priv *dev, void *msg, size_t len) +{ + int rc = 0; + + __pm_stay_awake(&dev->bgrsb_ws); + mutex_lock(&dev->glink_mutex); + if (!dev->rsb_rpmsg) { + pr_err("bgrsb-rpmsg is not probed yet, waiting for it to be probed\n"); + goto err_ret; + } + if (rc != 0) + pr_err("bgrsb_rpmsg_tx_msg failed %d\n", rc); + +err_ret: + mutex_unlock(&dev->glink_mutex); + __pm_relax(&dev->bgrsb_ws); + return rc; +} + +static int bgrsb_enable(struct bgrsb_priv *dev, bool enable) +{ + struct bgrsb_msg req = {0}; + + req.cmd_id = 0x02; + req.data = enable ? 0x01 : 0x00; + + return bgrsb_tx_msg(dev, &req, BGRSB_MSG_SIZE); +} + +static int bgrsb_configr_rsb(struct bgrsb_priv *dev, bool enable) +{ + struct bgrsb_msg req = {0}; + + req.cmd_id = 0x01; + req.data = enable ? 0x01 : 0x00; + + return bgrsb_tx_msg(dev, &req, BGRSB_MSG_SIZE); +} + +void bgrsb_notify_glink_channel_state(bool state) +{ + struct bgrsb_priv *dev = + container_of(bgrsb_drv, struct bgrsb_priv, lhndl); + + pr_debug("%s: RSB-CTRL channel state: %d\n", __func__, state); + dev->rsb_rpmsg = state; +} +EXPORT_SYMBOL(bgrsb_notify_glink_channel_state); + +static void bgrsb_bgup_work(struct work_struct *work) +{ + int ret = 0; + struct bgrsb_priv *dev = + container_of(work, struct bgrsb_priv, bg_up_work); + + mutex_lock(&dev->rsb_state_mutex); + ret = bgrsb_set_ldo(dev, BGRSB_HW_TURN_ON); + if (ret == 0) { + if (!dev->rsb_rpmsg) + pr_err("bgrsb-rpmsg is not probed yet\n"); + + ret = wait_event_timeout(dev->link_state_wait, + (dev->rsb_rpmsg == true), msecs_to_jiffies(TIMEOUT_MS)); + if (ret == 0) { + pr_err("channel connection time out %d\n", + ret); + goto unlock; + } + pr_debug("bgrsb-rpmsg is probed\n"); + ret = bgrsb_configr_rsb(dev, true); + if (ret != 0) { + pr_err("BG failed to configure RSB %d\n", ret); + if (bgrsb_set_ldo(dev, BGRSB_HW_TURN_OFF) == 0) + dev->bgrsb_current_state = BGRSB_STATE_INIT; + goto unlock; + } + dev->is_cnfgrd = true; + dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED; + pr_debug("RSB Cofigured\n"); + if (dev->pending_enable) + queue_work(dev->bgrsb_wq, &dev->rsb_up_work); + } +unlock: + mutex_unlock(&dev->rsb_state_mutex); +} + +/** + * ssr_bg_cb(): callback function is called. + * @arg1: a notifier_block. + * @arg2: opcode that defines the event. + * @arg3: void pointer. + * + * by ssr framework when BG goes down, up and during + * ramdump collection. It handles BG shutdown and + * power up events. + * + * Return: NOTIFY_DONE. + */ +static int ssr_bgrsb_cb(struct notifier_block *this, + unsigned long opcode, void *data) +{ + struct bgrsb_priv *dev = container_of(bgrsb_drv, + struct bgrsb_priv, lhndl); + + switch (opcode) { + case SUBSYS_BEFORE_SHUTDOWN: + if (dev->bgrsb_current_state == BGRSB_STATE_RSB_ENABLED) + dev->pending_enable = true; + queue_work(dev->bgrsb_wq, &dev->bg_down_work); + break; + case SUBSYS_AFTER_POWERUP: + if (dev->bgrsb_current_state == BGRSB_STATE_INIT) + queue_work(dev->bgrsb_wq, &dev->bg_up_work); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block ssr_bg_nb = { + .notifier_call = ssr_bgrsb_cb, + .priority = 0, +}; + +/** + * bgrsb_ssr_register(): callback function is called. + * @arg1: pointer to bgrsb_priv structure. + * + * ssr_register checks that domain id should be in range + * and register SSR framework for value at domain id. + * + * Return: 0 for success and -ENODEV otherwise. + */ +static int bgrsb_ssr_register(struct bgrsb_priv *dev) +{ + struct notifier_block *nb; + + if (!dev) + return -ENODEV; + + nb = &ssr_bg_nb; + dev->bgwear_subsys_handle = + subsys_notif_register_notifier(BGRSB_BGWEAR_SUBSYS, nb); + + if (!dev->bgwear_subsys_handle) { + dev->bgwear_subsys_handle = NULL; + return -ENODEV; + } + return 0; +} + +static void bgrsb_enable_rsb(struct work_struct *work) +{ + int rc = 0; + struct bgrsb_priv *dev = + container_of(work, struct bgrsb_priv, rsb_up_work); + + mutex_lock(&dev->rsb_state_mutex); + if (dev->bgrsb_current_state == BGRSB_STATE_RSB_ENABLED) { + pr_debug("RSB is already enabled\n"); + goto unlock; + } + if (dev->bgrsb_current_state != BGRSB_STATE_RSB_CONFIGURED) { + pr_err("BG is not yet configured for RSB\n"); + dev->pending_enable = true; + goto unlock; + } + rc = bgrsb_set_ldo(dev, BGRSB_ENABLE_WHEEL_EVENTS); + if (rc == 0) { + rc = bgrsb_enable(dev, true); + if (rc != 0) { + pr_err("Failed to send enable command to BG %d\n", rc); + bgrsb_set_ldo(dev, BGRSB_DISABLE_WHEEL_EVENTS); + dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED; + goto unlock; + } + } + dev->bgrsb_current_state = BGRSB_STATE_RSB_ENABLED; + dev->pending_enable = false; + pr_debug("RSB Enabled\n"); + + if (dev->calibration_needed) { + dev->calibration_needed = false; + queue_work(dev->bgrsb_wq, &dev->rsb_calibration_work); + } +unlock: + mutex_unlock(&dev->rsb_state_mutex); + +} + +static void bgrsb_disable_rsb(struct work_struct *work) +{ + int rc = 0; + struct bgrsb_priv *dev = container_of(work, struct bgrsb_priv, + rsb_down_work); + + mutex_lock(&dev->rsb_state_mutex); + dev->pending_enable = false; + if (dev->bgrsb_current_state == BGRSB_STATE_RSB_ENABLED) { + rc = bgrsb_enable(dev, false); + if (rc != 0) { + pr_err("Failed to send disable command to BG\n"); + goto unlock; + } + rc = bgrsb_set_ldo(dev, BGRSB_DISABLE_WHEEL_EVENTS); + if (rc != 0) + goto unlock; + + dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED; + pr_debug("RSB Disabled\n"); + } + +unlock: + mutex_unlock(&dev->rsb_state_mutex); +} + +static void bgrsb_calibration(struct work_struct *work) +{ + int rc = 0; + struct bgrsb_msg req = {0}; + struct bgrsb_priv *dev = + container_of(work, struct bgrsb_priv, + rsb_calibration_work); + + mutex_lock(&dev->rsb_state_mutex); + if (!dev->is_cnfgrd) { + pr_err("RSB is not configured\n"); + goto unlock; + } + + req.cmd_id = 0x03; + req.data = dev->calbrtion_cpi; + + rc = bgrsb_tx_msg(dev, &req, 5); + if (rc != 0) { + pr_err("Failed to send resolution value to BG %d\n", rc); + goto unlock; + } + + req.cmd_id = 0x04; + req.data = dev->calbrtion_intrvl; + + rc = bgrsb_tx_msg(dev, &req, 5); + if (rc != 0) { + pr_err("Failed to send interval value to BG %d\n", rc); + goto unlock; + } + dev->is_calibrd = true; + pr_debug("RSB Calibrated\n"); + +unlock: + mutex_unlock(&dev->rsb_state_mutex); +} + +static void bgrsb_buttn_configration(struct work_struct *work) +{ + int rc = 0; + struct bgrsb_msg req = {0}; + struct bgrsb_priv *dev = + container_of(work, struct bgrsb_priv, + bttn_configr_work); + + mutex_lock(&dev->rsb_state_mutex); + if (!dev->is_cnfgrd) { + pr_err("RSB is not configured\n"); + goto unlock; + } + + req.cmd_id = 0x05; + req.data = dev->bttn_configs; + + rc = bgrsb_tx_msg(dev, &req, 5); + if (rc != 0) { + pr_err("configuration cmnd failed %d\n", + rc); + goto unlock; + } + + dev->bttn_configs = 0; + pr_debug("RSB Button configured\n"); + +unlock: + mutex_unlock(&dev->rsb_state_mutex); +} + +static int bgrsb_handle_cmd_in_ssr(struct bgrsb_priv *dev, char *str) +{ + long val; + int ret; + char *tmp; + + tmp = strsep(&str, ":"); + if (!tmp) + return -EINVAL; + + ret = kstrtol(tmp, 10, &val); + if (ret < 0) + return ret; + + if (val == BGRSB_POWER_ENABLE) + dev->pending_enable = true; + else if (val == BGRSB_POWER_DISABLE) + dev->pending_enable = false; + + return 0; +} + +static int split_bg_work(struct bgrsb_priv *dev, char *str) +{ + long val; + int ret; + char *tmp; + + tmp = strsep(&str, ":"); + if (!tmp) + return -EINVAL; + + ret = kstrtol(tmp, 10, &val); + if (ret < 0) + return ret; + + switch (val) { + case BGRSB_POWER_DISABLE: + queue_work(dev->bgrsb_wq, &dev->rsb_down_work); + break; + case BGRSB_POWER_ENABLE: + queue_work(dev->bgrsb_wq, &dev->rsb_up_work); + break; + case BGRSB_POWER_CALIBRATION: + tmp = strsep(&str, ":"); + if (!tmp) + return -EINVAL; + + ret = kstrtol(tmp, 10, &val); + if (ret < 0) + return ret; + + dev->calbrtion_intrvl = (uint32_t)val; + + tmp = strsep(&str, ":"); + if (!tmp) + return -EINVAL; + + ret = kstrtol(tmp, 10, &val); + if (ret < 0) + return ret; + + dev->calbrtion_cpi = (uint32_t)val; + + queue_work(dev->bgrsb_wq, &dev->rsb_calibration_work); + break; + case BGRSB_BTTN_CONFIGURE: + tmp = strsep(&str, ":"); + if (!tmp) + return -EINVAL; + + ret = kstrtol(tmp, 10, &val); + if (ret < 0) + return ret; + + dev->bttn_configs = (uint8_t)val; + queue_work(dev->bgrsb_wq, &dev->bttn_configr_work); + break; + case BGRSB_IN_TWM: + dev->is_in_twm = true; + case BGRSB_GLINK_POWER_DISABLE: + break; + case BGRSB_OUT_TWM: + dev->is_in_twm = false; + case BGRSB_GLINK_POWER_ENABLE: + break; + } + return 0; +} + +static int store_enable(struct device *pdev, struct device_attribute *attr, + const char *buff, size_t count) +{ + int rc; + struct bgrsb_priv *dev = dev_get_drvdata(pdev); + char *arr; + + if (dev->blk_rsb_cmnds) { + pr_err("Device is in TWM state\n"); + return count; + } + arr = kstrdup(buff, GFP_KERNEL); + if (!arr) + return -ENOMEM; + + rc = split_bg_work(dev, arr); + if (!dev->is_cnfgrd) { + bgrsb_handle_cmd_in_ssr(dev, arr); + kfree(arr); + return -ENOMEDIUM; + } + + if (rc != 0) + pr_err("Not able to process request\n"); + + kfree(arr); + return count; +} + +static int show_enable(struct device *dev, struct device_attribute *attr, + char *buff) +{ + return 0; +} + +static struct device_attribute dev_attr_rsb = { + .attr = { + .name = "enable", + .mode = 00660, + }, + .show = show_enable, + .store = store_enable, +}; + +static int bgrsb_init(struct bgrsb_priv *dev) +{ + bgrsb_drv = &dev->lhndl; + mutex_init(&dev->glink_mutex); + mutex_init(&dev->rsb_state_mutex); + + dev->ldo_action = BGRSB_NO_ACTION; + + dev->bgrsb_wq = + create_singlethread_workqueue("bg-work-queue"); + if (!dev->bgrsb_wq) { + pr_err("Failed to init BG-RSB work-queue\n"); + return -ENOMEM; + } + + init_waitqueue_head(&dev->link_state_wait); + + /* set default bgrsb state */ + dev->bgrsb_current_state = BGRSB_STATE_INIT; + + /* Init all works */ + INIT_WORK(&dev->bg_up_work, bgrsb_bgup_work); + INIT_WORK(&dev->bg_down_work, bgrsb_bgdown_work); + INIT_WORK(&dev->rsb_up_work, bgrsb_enable_rsb); + INIT_WORK(&dev->rsb_down_work, bgrsb_disable_rsb); + INIT_WORK(&dev->rsb_calibration_work, bgrsb_calibration); + INIT_WORK(&dev->bttn_configr_work, bgrsb_buttn_configration); + + return 0; +} + +static int bg_rsb_probe(struct platform_device *pdev) +{ + struct bgrsb_priv *dev; + struct input_dev *input; + struct device_node *node; + int rc; + unsigned int rsb_gpio; + + node = pdev->dev.of_node; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + /* Add wake lock for PM suspend */ + wakeup_source_init(&dev->bgrsb_ws, "BGRSB_wake_lock"); + + dev->bgrsb_current_state = BGRSB_STATE_UNKNOWN; + rc = bgrsb_init(dev); + if (rc) + goto err_ret_dev; + /* Set up input device */ + input = devm_input_allocate_device(&pdev->dev); + if (!input) + goto err_ret_dev; + + input_set_capability(input, EV_REL, REL_WHEEL); + input_set_capability(input, EV_KEY, KEY_VOLUMEUP); + input_set_capability(input, EV_KEY, KEY_VOLUMEDOWN); + input->name = "bg-spi"; + + rc = input_register_device(input); + if (rc) { + pr_err("Input device registration failed\n"); + goto err_ret_inp; + } + dev->input = input; + + /* register device for bg-wear ssr */ + rc = bgrsb_ssr_register(dev); + if (rc) { + pr_err("Failed to register for bg ssr\n"); + goto err_ret_inp; + } + rc = device_create_file(&pdev->dev, &dev_attr_rsb); + if (rc) { + pr_err("Not able to create the file bg-rsb/enable\n"); + goto err_ret_inp; + } + + dev->rsb_use_msm_gpio = + of_property_read_bool(node, "qcom,rsb-use-msm-gpio"); + + if (dev->rsb_use_msm_gpio == true) { + rsb_gpio = of_get_named_gpio(node, "qcom,bg-rsb-gpio", 0); + pr_debug("gpio %d is configured\n", rsb_gpio); + + if (!gpio_is_valid(rsb_gpio)) { + pr_err("gpio %d found is not valid\n", rsb_gpio); + goto err_ret; + } + + if (gpio_request(rsb_gpio, "msm_rsb_gpio")) { + pr_err("gpio %d request failed\n", rsb_gpio); + goto err_ret; + } + + if (gpio_direction_output(rsb_gpio, 1)) { + pr_err("gpio %d direction not set\n", rsb_gpio); + goto err_ret; + } + pr_debug("rsb gpio successfully requested\n"); + dev->msmrsb_gpio = rsb_gpio; + } + dev_set_drvdata(&pdev->dev, dev); + rc = bgrsb_init_regulators(&pdev->dev); + if (rc) { + pr_err("Failed to set regulators\n"); + goto err_ret_inp; + } + + pr_debug("RSB probe successfully\n"); + return 0; +err_ret: + return 0; +err_ret_inp: + input_free_device(input); +err_ret_dev: + devm_kfree(&pdev->dev, dev); + return -ENODEV; +} + +static int bg_rsb_remove(struct platform_device *pdev) +{ + struct bgrsb_priv *dev = platform_get_drvdata(pdev); + + destroy_workqueue(dev->bgrsb_wq); + input_free_device(dev->input); + wakeup_source_trash(&dev->bgrsb_ws); + return 0; +} + +static int bg_rsb_resume(struct device *pldev) +{ + struct platform_device *pdev = to_platform_device(pldev); + struct bgrsb_priv *dev = platform_get_drvdata(pdev); + + mutex_lock(&dev->rsb_state_mutex); + if (dev->bgrsb_current_state == BGRSB_STATE_RSB_CONFIGURED) + goto ret_success; + + if (dev->bgrsb_current_state == BGRSB_STATE_INIT) { + if (dev->is_cnfgrd && + bgrsb_set_ldo(dev, BGRSB_HW_TURN_ON) == 0) { + dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED; + pr_debug("RSB Cofigured\n"); + goto ret_success; + } + pr_err("RSB failed to resume\n"); + } + mutex_unlock(&dev->rsb_state_mutex); + return -EINVAL; + +ret_success: + mutex_unlock(&dev->rsb_state_mutex); + return 0; +} + +static int bg_rsb_suspend(struct device *pldev) +{ + struct platform_device *pdev = to_platform_device(pldev); + struct bgrsb_priv *dev = platform_get_drvdata(pdev); + + mutex_lock(&dev->rsb_state_mutex); + if (dev->bgrsb_current_state == BGRSB_STATE_INIT) + goto ret_success; + + if (dev->bgrsb_current_state == BGRSB_STATE_RSB_ENABLED) { + if (bgrsb_set_ldo(dev, BGRSB_DISABLE_WHEEL_EVENTS) != 0) + goto ret_err; + } + + if (bgrsb_set_ldo(dev, BGRSB_HW_TURN_OFF) == 0) { + dev->bgrsb_current_state = BGRSB_STATE_INIT; + pr_debug("RSB Init\n"); + goto ret_success; + } + +ret_err: + pr_err("RSB failed to suspend\n"); + mutex_unlock(&dev->rsb_state_mutex); + return -EINVAL; + +ret_success: + mutex_unlock(&dev->rsb_state_mutex); + return 0; +} + +static const struct of_device_id bg_rsb_of_match[] = { + { .compatible = "qcom,bg-rsb", }, + { } +}; + +static const struct dev_pm_ops pm_rsb = { + .resume = bg_rsb_resume, + .suspend = bg_rsb_suspend, +}; + +static struct platform_driver bg_rsb_driver = { + .driver = { + .name = "bg-rsb", + .of_match_table = bg_rsb_of_match, + .pm = &pm_rsb, + }, + .probe = bg_rsb_probe, + .remove = bg_rsb_remove, +}; module_platform_driver(bg_rsb_driver); +MODULE_DESCRIPTION("SoC BG RSB driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/bgrsb.h b/drivers/soc/qcom/bgrsb.h index 93a58b34bd28..ba0b88492ded 100644 --- a/drivers/soc/qcom/bgrsb.h +++ b/drivers/soc/qcom/bgrsb.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2018,2020 The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,23 @@ #ifndef BGRSB_H #define BGRSB_H +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include struct event { uint8_t sub_id; @@ -20,17 +37,52 @@ struct event { uint32_t evnt_tm; }; +#define BGRSB_GLINK_INTENT_SIZE 0x04 +#define BGRSB_MSG_SIZE 0x08 +#define TIMEOUT_MS 2000 +#define BGRSB_LDO15_VTG_MIN_UV 3000000 +#define BGRSB_LDO15_VTG_MAX_UV 3000000 +#define BGRSB_LDO11_VTG_MIN_UV 1800000 +#define BGRSB_LDO11_VTG_MAX_UV 1800000 -struct bg_glink_chnl { - char *chnl_name; - char *chnl_edge; - char *chnl_trnsprt; +#define BGRSB_BGWEAR_SUBSYS "bg-wear" + +#define BGRSB_POWER_DISABLE 0 +#define BGRSB_POWER_ENABLE 1 +#define BGRSB_POWER_CALIBRATION 2 +#define BGRSB_BTTN_CONFIGURE 5 +#define BGRSB_GLINK_POWER_ENABLE 6 +#define BGRSB_GLINK_POWER_DISABLE 7 +#define BGRSB_IN_TWM 8 +#define BGRSB_OUT_TWM 9 + + +struct bgrsb_regulator { + struct regulator *regldo11; + struct regulator *regldo15; }; -/** - * bgrsb_send_input() - send the recived input to input framework - * @evnt: pointer to the event structure - */ -int bgrsb_send_input(struct event *evnt); +enum ldo_task { + BGRSB_HW_TURN_ON, + BGRSB_ENABLE_WHEEL_EVENTS, + BGRSB_HW_TURN_OFF, + BGRSB_DISABLE_WHEEL_EVENTS, + BGRSB_NO_ACTION +}; +enum bgrsb_state { + BGRSB_STATE_UNKNOWN, + BGRSB_STATE_INIT, + BGRSB_STATE_LDO11_ENABLED, + BGRSB_STATE_RSB_CONFIGURED, + BGRSB_STATE_RSB_ENABLED +}; + +struct bgrsb_msg { + uint32_t cmd_id; + uint32_t data; +}; + +void bgrsb_send_input(struct event *evnt); +void bgrsb_notify_glink_channel_state(bool state); #endif /* BGCOM_H */ From 3865166cdd47bbe0b3d858464a0cdbc3dc5c9922 Mon Sep 17 00:00:00 2001 From: Venkata Rao Kakani Date: Sun, 16 Aug 2020 13:10:33 +0530 Subject: [PATCH 051/141] ARM: dts: msm: disable disk rename in LV GVM disable disk renaming while booting up as lv gvm does not support avb. Change-Id: Ida3739b5d4e0d9cf80e3ddcc7d57ce419f49510d Signed-off-by: Venkata Rao Kakani --- arch/arm64/boot/dts/qcom/sa8155-vm-lv-mt.dtsi | 1 + arch/arm64/boot/dts/qcom/sa8155-vm-lv.dtsi | 2 ++ arch/arm64/boot/dts/qcom/sa8195-vm-lv-mt.dtsi | 1 + arch/arm64/boot/dts/qcom/sa8195-vm-lv.dtsi | 1 + 4 files changed, 5 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sa8155-vm-lv-mt.dtsi b/arch/arm64/boot/dts/qcom/sa8155-vm-lv-mt.dtsi index c2ef8679006a..55c498669e61 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-vm-lv-mt.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-vm-lv-mt.dtsi @@ -21,6 +21,7 @@ }; /delete-node/ cpus; + /delete-node/ rename_blk; }; &hab { diff --git a/arch/arm64/boot/dts/qcom/sa8155-vm-lv.dtsi b/arch/arm64/boot/dts/qcom/sa8155-vm-lv.dtsi index 5ef00509798f..f03c5e50c1ec 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-vm-lv.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-vm-lv.dtsi @@ -19,6 +19,8 @@ label = "pmem_shared_mem"; }; }; + + /delete-node/ rename_blk; }; &hab { diff --git a/arch/arm64/boot/dts/qcom/sa8195-vm-lv-mt.dtsi b/arch/arm64/boot/dts/qcom/sa8195-vm-lv-mt.dtsi index 074d40913c28..a678e6c4afc7 100644 --- a/arch/arm64/boot/dts/qcom/sa8195-vm-lv-mt.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195-vm-lv-mt.dtsi @@ -20,6 +20,7 @@ }; /delete-node/ cpus; + /delete-node/ rename_blk; }; &hab { diff --git a/arch/arm64/boot/dts/qcom/sa8195-vm-lv.dtsi b/arch/arm64/boot/dts/qcom/sa8195-vm-lv.dtsi index f55efa749ab1..097af7244c89 100644 --- a/arch/arm64/boot/dts/qcom/sa8195-vm-lv.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195-vm-lv.dtsi @@ -19,6 +19,7 @@ }; }; + /delete-node/ rename_blk; }; &hab { From 1e1f40918479d0f28678d29539368687338d0e94 Mon Sep 17 00:00:00 2001 From: Rishi Gupta Date: Fri, 7 Aug 2020 23:00:15 +0530 Subject: [PATCH 052/141] ARM: dts: sa2150p: enable rgmii level shifter on nand vt som The NAND based sa2150p SOM has a level shifter which is enabled through GPIO 16. This commit configures GPIO 16 to low state by-default to enable the shifter. Change-Id: I770e6b49e9dca2e9d7f6076ef772a279a4c2a25e Signed-off-by: Rishi Gupta --- .../boot/dts/qcom/sa2145p-ccard-nand-dc.dts | 30 +++++++++++++++++++ .../boot/dts/qcom/sa2150p-ccard-nand-dc.dts | 30 +++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sa2145p-ccard-nand-dc.dts b/arch/arm64/boot/dts/qcom/sa2145p-ccard-nand-dc.dts index 19b33f9f06ba..4f84d3aa500d 100644 --- a/arch/arm64/boot/dts/qcom/sa2145p-ccard-nand-dc.dts +++ b/arch/arm64/boot/dts/qcom/sa2145p-ccard-nand-dc.dts @@ -81,3 +81,33 @@ rx-dll-bypass; }; }; + +&tlmm { + /delete-node/ mdss_hdmi_ddc_active; + /delete-node/ mdss_hdmi_ddc_suspend; + rgmii_level_shifter: rgmii_level_shifter { + mux { + pins = "gpio16"; + function = "gpio"; + }; + config { + pins = "gpio16"; + drive-strength = <2>; + bias-pull-down; + output-low; + }; + }; +}; + +ðqos_hw { + pinctrl-names = "dev-emac-mdc", "dev-emac-mdio", + "dev-emac-rgmii_txd0_state", "dev-emac-rgmii_txd1_state", + "dev-emac-rgmii_txd2_state", "dev-emac-rgmii_txd3_state", + "dev-emac-rgmii_txc_state", "dev-emac-rgmii_tx_ctl_state", + "dev-emac-rgmii_rxd0_state", "dev-emac-rgmii_rxd1_state", + "dev-emac-rgmii_rxd2_state", "dev-emac-rgmii_rxd3_state", + "dev-emac-rgmii_rxc_state", "dev-emac-rgmii_rx_ctl_state", + "dev-emac-phy_intr", "dev-emac-phy_reset_state", + "dev-emac-rgmii_lvl_shift_state"; + pinctrl-16 = <&rgmii_level_shifter>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand-dc.dts b/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand-dc.dts index 418d9bb685a5..3f9b8b2428e7 100644 --- a/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand-dc.dts +++ b/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand-dc.dts @@ -155,3 +155,33 @@ rx-dll-bypass; }; }; + +&tlmm { + /delete-node/ mdss_hdmi_ddc_active; + /delete-node/ mdss_hdmi_ddc_suspend; + rgmii_level_shifter: rgmii_level_shifter { + mux { + pins = "gpio16"; + function = "gpio"; + }; + config { + pins = "gpio16"; + drive-strength = <2>; + bias-pull-down; + output-low; + }; + }; +}; + +ðqos_hw { + pinctrl-names = "dev-emac-mdc", "dev-emac-mdio", + "dev-emac-rgmii_txd0_state", "dev-emac-rgmii_txd1_state", + "dev-emac-rgmii_txd2_state", "dev-emac-rgmii_txd3_state", + "dev-emac-rgmii_txc_state", "dev-emac-rgmii_tx_ctl_state", + "dev-emac-rgmii_rxd0_state", "dev-emac-rgmii_rxd1_state", + "dev-emac-rgmii_rxd2_state", "dev-emac-rgmii_rxd3_state", + "dev-emac-rgmii_rxc_state", "dev-emac-rgmii_rx_ctl_state", + "dev-emac-phy_intr", "dev-emac-phy_reset_state", + "dev-emac-rgmii_lvl_shift_state"; + pinctrl-16 = <&rgmii_level_shifter>; +}; From a78753197b7cfd67d4c929075d55504317fcf637 Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Sat, 15 Aug 2020 20:59:30 +0530 Subject: [PATCH 053/141] Use correct endianness for encryption keys From ICE 3.0 onwards the keys are used in little endian format but legacy ICE driver in trustzone reverses the endianness. So reverse the endianness of keys before passing it to trusted ICE driver. Change-Id: I189680c588d31fd2549d08094208e55dce45ffbf Signed-off-by: Neeraj Soni --- drivers/md/dm-default-key.c | 18 ++++++++++++++++-- drivers/soc/qcom/crypto-qti-tz.c | 13 ++++++++++++- fs/crypto/keysetup_v1.c | 12 +++++++++++- 3 files changed, 39 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 4b47f25a257e..c1fe775ef9d5 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -136,13 +136,26 @@ static int default_key_ctr_optional(struct dm_target *ti, } void default_key_adjust_sector_size_and_iv(char **argv, struct dm_target *ti, - struct default_key_c **dkc) + struct default_key_c **dkc, u8 *raw, + u32 size) { struct dm_dev *dev; + int i; + union { + u8 bytes[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; + u32 words[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE / sizeof(u32)]; + } key_new; dev = (*dkc)->dev; if (!strcmp(argv[0], "AES-256-XTS")) { + memcpy(key_new.bytes, raw, size); + + for (i = 0; i < ARRAY_SIZE(key_new.words); i++) + __cpu_to_be32s(&key_new.words[i]); + + memcpy(raw, key_new.bytes, size); + if (ti->len & (((*dkc)->sector_size >> SECTOR_SHIFT) - 1)) (*dkc)->sector_size = SECTOR_SIZE; @@ -242,7 +255,8 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - default_key_adjust_sector_size_and_iv(argv, ti, &dkc); + default_key_adjust_sector_size_and_iv(argv, ti, &dkc, raw_key, + raw_key_size); dkc->sector_bits = ilog2(dkc->sector_size); if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) { diff --git a/drivers/soc/qcom/crypto-qti-tz.c b/drivers/soc/qcom/crypto-qti-tz.c index 154a08389274..0ebdd1a1c9f8 100644 --- a/drivers/soc/qcom/crypto-qti-tz.c +++ b/drivers/soc/qcom/crypto-qti-tz.c @@ -35,10 +35,21 @@ int crypto_qti_program_key(struct crypto_vops_qti_entry *ice_entry, uint32_t smc_id = 0; char *tzbuf = NULL; struct scm_desc desc = {0}; + int i; + union { + u8 bytes[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; + u32 words[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE / sizeof(u32)]; + } key_new; tzbuf = ice_buffer; - memcpy(tzbuf, key->raw, key->size); + memcpy(key_new.bytes, key->raw, key->size); + if (!key->is_hw_wrapped) { + for (i = 0; i < ARRAY_SIZE(key_new.words); i++) + __cpu_to_be32s(&key_new.words[i]); + } + + memcpy(tzbuf, key_new.bytes, key->size); dmac_flush_range(tzbuf, tzbuf + key->size); smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID; diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index ac549eeb1444..38e54313653d 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "fscrypt_private.h" @@ -268,14 +269,23 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci, { u8 *derived_key; int err; + int i; + union { + u8 bytes[FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE]; + u32 words[FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE / sizeof(u32)]; + } key_new; /*Support legacy ice based content encryption mode*/ if ((fscrypt_policy_contents_mode(&ci->ci_policy) == FSCRYPT_MODE_PRIVATE) && fscrypt_using_inline_encryption(ci)) { + memcpy(key_new.bytes, raw_master_key, ci->ci_mode->keysize); + + for (i = 0; i < ARRAY_SIZE(key_new.words); i++) + __cpu_to_be32s(&key_new.words[i]); err = fscrypt_prepare_inline_crypt_key(&ci->ci_key, - raw_master_key, + key_new.bytes, ci->ci_mode->keysize, false, ci); From 096142a231e67c5266db5f0774766535bf5fc7e7 Mon Sep 17 00:00:00 2001 From: Wanteng Zhang Date: Thu, 13 Aug 2020 12:34:11 +0800 Subject: [PATCH 054/141] ARM: dts: msm: Add multiple dri device nodes for sa8195 lxc gvm Multiple device nodes of dri card will be created for sa8195 lxc gvm Change-Id: I07e45c04867fa1c8ae371247b56cefdfd9694c09 Signed-off-by: Wanteng Zhang --- arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dts | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dts b/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dts index 99342867dcbb..fc7e93ba2874 100644 --- a/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dts +++ b/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dts @@ -21,3 +21,20 @@ qcom,pmic-name = "PM8195"; qcom,board-id = <0x1000002 0>; }; + +&soc { + sde_kms_hyp1: qcom,sde_kms_hyp@ae10000 { + compatible = "qcom,sde-kms-hyp"; + qcom,client-id = "7815"; + }; + + sde_kms_hyp2: qcom,sde_kms_hyp@ae20000 { + compatible = "qcom,sde-kms-hyp"; + qcom,client-id = "7818"; + }; + + sde_kms_hyp3: qcom,sde_kms_hyp@ae30000 { + compatible = "qcom,sde-kms-hyp"; + qcom,client-id = "7819"; + }; +}; From d34b4787bd5c718c6fb0809a124bc9c406d7597e Mon Sep 17 00:00:00 2001 From: Puranam V G Tejaswi Date: Thu, 6 Aug 2020 23:35:06 +0530 Subject: [PATCH 055/141] msm: kgsl: Correctly clean up dma buffer attachment in case of error In kgsl_ioctl_gpuobj_import(), user memory of type KGSL_USER_MEM_TYPE_ADDR can also lead to setting up a dma buffer. When attaching mem entry to process fails, dma buffer attachment is cleaned up only in case of KGSL_USER_MEM_TYPE_DMABUF. Similar situation can arise in case of kgsl_ioctl_map_user_mem(). Fix this by obtaining user memory type from the memdesc flags. Change-Id: I502bd0ae19241802e8f835f20391b2ce67999418 Signed-off-by: Puranam V G Tejaswi --- drivers/gpu/msm/kgsl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 5ddf8cd3774b..0efbbdb40709 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -2683,7 +2683,7 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv, return 0; unmap: - if (param->type == KGSL_USER_MEM_TYPE_DMABUF) { + if (kgsl_memdesc_usermem_type(&entry->memdesc) == KGSL_MEM_ENTRY_ION) { kgsl_destroy_ion(entry->priv_data); entry->memdesc.sgt = NULL; } @@ -2997,7 +2997,7 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv, return result; error_attach: - switch (memtype) { + switch (kgsl_memdesc_usermem_type(&entry->memdesc)) { case KGSL_MEM_ENTRY_ION: kgsl_destroy_ion(entry->priv_data); entry->memdesc.sgt = NULL; From 2251f3cadd5454f8b1722ab24ecbed8bf4879888 Mon Sep 17 00:00:00 2001 From: Ajay Agarwal Date: Mon, 17 Aug 2020 12:41:37 +0530 Subject: [PATCH 056/141] ARM: dts: msm: Add vbus_detect as USB extcon for Telematics AU MTP Add vbus_detect as USB extcon phandle to enable USB detection. Change-Id: I7379832b52a7b73154e8e991aff4c245e4f6e924 Signed-off-by: Ajay Agarwal --- arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-au-dsda.dts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-au-dsda.dts b/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-au-dsda.dts index b81e0dd144ed..8ed717eb02d4 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-au-dsda.dts +++ b/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-au-dsda.dts @@ -21,4 +21,10 @@ qcom,board-id = <0x08010008 0x0>; }; +&usb { + extcon = <&vbus_detect>; +}; +&vbus_detect { + status = "okay"; +}; From 0bae5c190df7a00dcfab788e074ee49876f0b0eb Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 6 May 2020 14:15:06 -0700 Subject: [PATCH 057/141] ANDROID: block: backport the ability to specify max_dun_bytes Backport a fix from the v7 inline crypto patchset which ensures that the block layer knows the number of DUN bytes the inline encryption hardware supports, so that hardware isn't used when it shouldn't be. (This unfortunately means introducing some increasing long argument lists; this was all already fixed up in later versions of the patchset.) To avoid breaking the KMI for drivers, don't add a dun_bytes argument to keyslot_manager_create() but rather allow drivers to call keyslot_manager_set_max_dun_bytes() to override the default. Also, don't add dun_bytes as a new field in 'struct blk_crypto_key' but rather pack it into the existing 'hash' field which is for block layer use. Bug: 144046242 Bug: 153512828 Change-Id: I285f36557fb3eafc5f2f64727ef1740938b59dd7 Signed-off-by: Eric Biggers Git-commit: 72091967bfbbc37fceb1a3208457ba26633606ae Git-repo: https://android.googlesource.com/kernel/common/+/refs/heads/android-4.14-stable [neersoni@codeaurora.org: back port the changes and update ufshcd-crypto-qti.c file to specify max dun byte support] Signed-off-by: Neeraj Soni --- block/blk-crypto.c | 22 +++++++++++++++++----- block/keyslot-manager.c | 24 ++++++++++++++++++++++-- drivers/md/dm-default-key.c | 6 +++--- drivers/scsi/ufs/ufshcd-crypto-qti.c | 2 ++ drivers/scsi/ufs/ufshcd-crypto.c | 1 + fs/crypto/inline_crypt.c | 24 ++++++++++++++++++++++-- include/linux/bio-crypt-ctx.h | 28 ++++++++++++++++++++++++++++ include/linux/blk-crypto.h | 2 ++ include/linux/keyslot-manager.h | 4 ++++ 9 files changed, 101 insertions(+), 12 deletions(-) diff --git a/block/blk-crypto.c b/block/blk-crypto.c index f56bbec1132f..e07a37cf8b5f 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -108,9 +108,10 @@ int blk_crypto_submit_bio(struct bio **bio_ptr) /* Get device keyslot if supported */ if (keyslot_manager_crypto_mode_supported(q->ksm, - bc->bc_key->crypto_mode, - bc->bc_key->data_unit_size, - bc->bc_key->is_hw_wrapped)) { + bc->bc_key->crypto_mode, + blk_crypto_key_dun_bytes(bc->bc_key), + bc->bc_key->data_unit_size, + bc->bc_key->is_hw_wrapped)) { err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm); if (!err) return 0; @@ -180,6 +181,8 @@ bool blk_crypto_endio(struct bio *bio) * @is_hw_wrapped has to be set for such keys) * @is_hw_wrapped: Denotes @raw_key is wrapped. * @crypto_mode: identifier for the encryption algorithm to use + * @dun_bytes: number of bytes that will be used to specify the DUN when this + * key is used * @data_unit_size: the data unit size to use for en/decryption * * Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error. When @@ -189,10 +192,12 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, unsigned int raw_key_size, bool is_hw_wrapped, enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size) { const struct blk_crypto_mode *mode; static siphash_key_t hash_key; + u32 hash; memset(blk_key, 0, sizeof(*blk_key)); @@ -211,6 +216,9 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, return -EINVAL; } + if (dun_bytes <= 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE) + return -EINVAL; + if (!is_power_of_2(data_unit_size)) return -EINVAL; @@ -227,7 +235,8 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, * precomputed here so that it only needs to be computed once per key. */ get_random_once(&hash_key, sizeof(hash_key)); - blk_key->hash = siphash(raw_key, raw_key_size, &hash_key); + hash = (u32)siphash(raw_key, raw_key_size, &hash_key); + blk_crypto_key_set_hash_and_dun_bytes(blk_key, hash, dun_bytes); return 0; } @@ -236,6 +245,7 @@ EXPORT_SYMBOL_GPL(blk_crypto_init_key); /** * blk_crypto_start_using_mode() - Start using blk-crypto on a device * @crypto_mode: the crypto mode that will be used + * @dun_bytes: number of bytes that will be used to specify the DUN * @data_unit_size: the data unit size that will be used * @is_hw_wrapped_key: whether the key will be hardware-wrapped * @q: the request queue for the device @@ -249,12 +259,13 @@ EXPORT_SYMBOL_GPL(blk_crypto_init_key); * algorithm is disabled in the crypto API; or another -errno code. */ int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key, struct request_queue *q) { if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode, - data_unit_size, + dun_bytes, data_unit_size, is_hw_wrapped_key)) return 0; if (is_hw_wrapped_key) { @@ -285,6 +296,7 @@ int blk_crypto_evict_key(struct request_queue *q, { if (q->ksm && keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode, + blk_crypto_key_dun_bytes(key), key->data_unit_size, key->is_hw_wrapped)) return keyslot_manager_evict_key(q->ksm, key); diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c index 1999c503b954..13d34b857625 100644 --- a/block/keyslot-manager.c +++ b/block/keyslot-manager.c @@ -45,6 +45,7 @@ struct keyslot_manager { struct keyslot_mgmt_ll_ops ksm_ll_ops; unsigned int features; unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX]; + unsigned int max_dun_bytes_supported; void *ll_priv_data; /* Protects programming and evicting keys from the device */ @@ -123,6 +124,7 @@ struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, ksm->features = features; memcpy(ksm->crypto_mode_supported, crypto_mode_supported, sizeof(ksm->crypto_mode_supported)); + ksm->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; ksm->ll_priv_data = ll_priv_data; init_rwsem(&ksm->lock); @@ -154,11 +156,19 @@ err_free_ksm: } EXPORT_SYMBOL_GPL(keyslot_manager_create); +void keyslot_manager_set_max_dun_bytes(struct keyslot_manager *ksm, + unsigned int max_dun_bytes) +{ + ksm->max_dun_bytes_supported = max_dun_bytes; +} +EXPORT_SYMBOL_GPL(keyslot_manager_set_max_dun_bytes); + static inline struct hlist_head * hash_bucket_for_key(struct keyslot_manager *ksm, const struct blk_crypto_key *key) { - return &ksm->slot_hashtable[key->hash & (ksm->slot_hashtable_size - 1)]; + return &ksm->slot_hashtable[blk_crypto_key_hash(key) & + (ksm->slot_hashtable_size - 1)]; } static void remove_slot_from_lru_list(struct keyslot_manager *ksm, int slot) @@ -331,6 +341,7 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot) * combination is supported by a ksm. * @ksm: The keyslot manager to check * @crypto_mode: The crypto mode to check for. + * @dun_bytes: The number of bytes that will be used to specify the DUN * @data_unit_size: The data_unit_size for the mode. * @is_hw_wrapped_key: Whether a hardware-wrapped key will be used. * @@ -342,6 +353,7 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot) */ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key) { @@ -358,7 +370,10 @@ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, if (!(ksm->features & BLK_CRYPTO_FEATURE_STANDARD_KEYS)) return false; } - return ksm->crypto_mode_supported[crypto_mode] & data_unit_size; + if (!(ksm->crypto_mode_supported[crypto_mode] & data_unit_size)) + return false; + + return ksm->max_dun_bytes_supported >= dun_bytes; } /** @@ -501,6 +516,7 @@ struct keyslot_manager *keyslot_manager_create_passthrough( ksm->features = features; memcpy(ksm->crypto_mode_supported, crypto_mode_supported, sizeof(ksm->crypto_mode_supported)); + ksm->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; ksm->ll_priv_data = ll_priv_data; init_rwsem(&ksm->lock); @@ -527,12 +543,16 @@ void keyslot_manager_intersect_modes(struct keyslot_manager *parent, unsigned int i; parent->features &= child->features; + parent->max_dun_bytes_supported = + min(parent->max_dun_bytes_supported, + child->max_dun_bytes_supported); for (i = 0; i < ARRAY_SIZE(child->crypto_mode_supported); i++) { parent->crypto_mode_supported[i] &= child->crypto_mode_supported[i]; } } else { parent->features = 0; + parent->max_dun_bytes_supported = 0; memset(parent->crypto_mode_supported, 0, sizeof(parent->crypto_mode_supported)); } diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 4b47f25a257e..85bc78d4d81c 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -253,14 +253,14 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size, dkc->is_hw_wrapped, cipher->mode_num, - dkc->sector_size); + sizeof(u64), dkc->sector_size); if (err) { ti->error = "Error initializing blk-crypto key"; goto bad; } - err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size, - dkc->is_hw_wrapped, + err = blk_crypto_start_using_mode(cipher->mode_num, sizeof(u64), + dkc->sector_size, dkc->is_hw_wrapped, dkc->dev->bdev->bd_queue); if (err) { ti->error = "Error starting to use blk-crypto"; diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.c b/drivers/scsi/ufs/ufshcd-crypto-qti.c index f06f2899dcac..cfae1e5dede2 100644 --- a/drivers/scsi/ufs/ufshcd-crypto-qti.c +++ b/drivers/scsi/ufs/ufshcd-crypto-qti.c @@ -245,6 +245,8 @@ static int ufshcd_hba_init_crypto_qti_spec(struct ufs_hba *hba, err = -ENOMEM; goto out; } + keyslot_manager_set_max_dun_bytes(hba->ksm, sizeof(u64)); + pr_debug("%s: keyslot manager created\n", __func__); return 0; diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c index 4fb86fbf097e..240745526135 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.c +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -346,6 +346,7 @@ int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba, err = -ENOMEM; goto out_free_caps; } + keyslot_manager_set_max_dun_bytes(hba->ksm, sizeof(u64)); return 0; diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index e1bbaeff1c43..976617112d52 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -42,6 +42,17 @@ static void fscrypt_get_devices(struct super_block *sb, int num_devs, sb->s_cop->get_devices(sb, devs); } +static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) +{ + unsigned int dun_bytes = 8; + + if (fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_DIRECT_KEY) + dun_bytes += FS_KEY_DERIVATION_NONCE_SIZE; + + return dun_bytes; +} + /* Enable inline encryption for this file if supported. */ int fscrypt_select_encryption_impl(struct fscrypt_info *ci, bool is_hw_wrapped_key) @@ -49,6 +60,7 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci, const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + unsigned int dun_bytes; struct request_queue **devs; int num_devs; int i; @@ -84,9 +96,12 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci, fscrypt_get_devices(sb, num_devs, devs); + dun_bytes = fscrypt_get_dun_bytes(ci); + for (i = 0; i < num_devs; i++) { if (!keyslot_manager_crypto_mode_supported(devs[i]->ksm, crypto_mode, + dun_bytes, sb->s_blocksize, is_hw_wrapped_key)) goto out_free_devs; @@ -107,6 +122,7 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + unsigned int dun_bytes; int num_devs; int queue_refs = 0; struct fscrypt_blk_crypto_key *blk_key; @@ -124,11 +140,14 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, blk_key->num_devs = num_devs; fscrypt_get_devices(sb, num_devs, blk_key->devs); + dun_bytes = fscrypt_get_dun_bytes(ci); + BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE); err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size, - is_hw_wrapped, crypto_mode, sb->s_blocksize); + is_hw_wrapped, crypto_mode, dun_bytes, + sb->s_blocksize); if (err) { fscrypt_err(inode, "error %d initializing blk-crypto key", err); goto fail; @@ -149,7 +168,8 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, } queue_refs++; - err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize, + err = blk_crypto_start_using_mode(crypto_mode, dun_bytes, + sb->s_blocksize, is_hw_wrapped, blk_key->devs[i]); if (err) { diff --git a/include/linux/bio-crypt-ctx.h b/include/linux/bio-crypt-ctx.h index d10c5ad5e07e..45d331bcc2e4 100644 --- a/include/linux/bio-crypt-ctx.h +++ b/include/linux/bio-crypt-ctx.h @@ -45,7 +45,15 @@ struct blk_crypto_key { unsigned int data_unit_size; unsigned int data_unit_size_bits; unsigned int size; + + /* + * Hack to avoid breaking KMI: pack both hash and dun_bytes into the + * hash field... + */ +#define BLK_CRYPTO_KEY_HASH_MASK 0xffffff +#define BLK_CRYPTO_KEY_DUN_BYTES_SHIFT 24 unsigned int hash; + bool is_hw_wrapped; u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; }; @@ -53,6 +61,26 @@ struct blk_crypto_key { #define BLK_CRYPTO_MAX_IV_SIZE 32 #define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE/sizeof(u64)) +static inline void +blk_crypto_key_set_hash_and_dun_bytes(struct blk_crypto_key *key, + u32 hash, unsigned int dun_bytes) +{ + key->hash = (dun_bytes << BLK_CRYPTO_KEY_DUN_BYTES_SHIFT) | + (hash & BLK_CRYPTO_KEY_HASH_MASK); +} + +static inline u32 +blk_crypto_key_hash(const struct blk_crypto_key *key) +{ + return key->hash & BLK_CRYPTO_KEY_HASH_MASK; +} + +static inline unsigned int +blk_crypto_key_dun_bytes(const struct blk_crypto_key *key) +{ + return key->hash >> BLK_CRYPTO_KEY_DUN_BYTES_SHIFT; +} + /** * struct bio_crypt_ctx - an inline encryption context * @bc_key: the key, algorithm, and data unit size to use diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h index 7dc478a8c3ed..6062002555e1 100644 --- a/include/linux/blk-crypto.h +++ b/include/linux/blk-crypto.h @@ -20,9 +20,11 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, unsigned int raw_key_size, bool is_hw_wrapped, enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size); int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key, struct request_queue *q); diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h index f022bd6d2497..57000863beb7 100644 --- a/include/linux/keyslot-manager.h +++ b/include/linux/keyslot-manager.h @@ -56,6 +56,9 @@ struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data); +void keyslot_manager_set_max_dun_bytes(struct keyslot_manager *ksm, + unsigned int max_dun_bytes); + int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm, const struct blk_crypto_key *key); @@ -65,6 +68,7 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot); bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key); From 9922845d60cbc1b1b8829af84fbf4a63a3879c74 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 6 May 2020 14:15:07 -0700 Subject: [PATCH 058/141] ANDROID: dm-default-key: set dun_bytes more precisely Make dm-default-key set dun_bytes to only what it actually needs, so that it can make use of inline crypto hardware in more cases. Bug: 144046242 Bug: 153512828 Change-Id: I338e6444e71be9c7c16ce70172d14a8e05301023 Signed-off-by: Eric Biggers Git-commit: 8711e464f7dfcd3c18cbbbfa95127bc8868e0430 Git-repo: https://android.googlesource.com/kernel/common/+/refs/heads/android-4.14-stable [neersoni@codeaurora.org: back port the changes and fix merge conflicts in dm-default-key.c file] Signed-off-by: Neeraj Soni --- drivers/md/dm-default-key.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 85bc78d4d81c..f88c5e2e0331 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -40,6 +40,7 @@ static const struct dm_default_key_cipher { * @sector_size: crypto sector size in bytes (usually 4096) * @sector_bits: log2(sector_size) * @key: the encryption key to use + * @max_dun: the maximum DUN that may be used (computed from other params) */ struct default_key_c { struct dm_dev *dev; @@ -50,6 +51,7 @@ struct default_key_c { unsigned int sector_bits; struct blk_crypto_key key; bool is_hw_wrapped; + u64 max_dun; }; static const struct dm_default_key_cipher * @@ -165,6 +167,7 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) const struct dm_default_key_cipher *cipher; u8 raw_key[DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE]; unsigned int raw_key_size; + unsigned int dun_bytes; unsigned long long tmpll; char dummy; int err; @@ -251,15 +254,19 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size, + dkc->max_dun = (dkc->iv_offset + ti->len - 1) >> + (dkc->sector_bits - SECTOR_SHIFT); + dun_bytes = DIV_ROUND_UP(fls64(dkc->max_dun), 8); + + err = blk_crypto_init_key(&dkc->key, raw_key, raw_key_size, dkc->is_hw_wrapped, cipher->mode_num, - sizeof(u64), dkc->sector_size); + dun_bytes, dkc->sector_size); if (err) { ti->error = "Error initializing blk-crypto key"; goto bad; } - err = blk_crypto_start_using_mode(cipher->mode_num, sizeof(u64), + err = blk_crypto_start_using_mode(cipher->mode_num, dun_bytes, dkc->sector_size, dkc->is_hw_wrapped, dkc->dev->bdev->bd_queue); if (err) { @@ -321,6 +328,13 @@ static int default_key_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_KILL; dun[0] >>= dkc->sector_bits - SECTOR_SHIFT; /* crypto sectors */ + /* + * This check isn't necessary as we should have calculated max_dun + * correctly, but be safe. + */ + if (WARN_ON_ONCE(dun[0] > dkc->max_dun)) + return DM_MAPIO_KILL; + bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO); return DM_MAPIO_REMAPPED; From 9baaaa3e707b6751ee11338192404ed53b4a3b9d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 6 May 2020 14:15:07 -0700 Subject: [PATCH 059/141] ANDROID: fscrypt: set dun_bytes more precisely Make fscrypt set dun_bytes to only what it actually needs, so that it can make use of inline crypto hardware in more cases. Bug: 144046242 Bug: 153512828 Change-Id: I36f90ea6b64ef51a9d58ffb069d2cba74965c239 Signed-off-by: Eric Biggers Git-commit: 6be68d89b4d524dde1476be8f895a69cc08237ee Git-repo: https://android.googlesource.com/kernel/common/+/refs/heads/android-4.14-stable [neersoni@codeaurora.org: back port the changes and fix merge conflicts in inline-crypt.c file] Signed-off-by: Neeraj Soni --- fs/crypto/inline_crypt.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index 976617112d52..f96e2972a003 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -44,13 +44,20 @@ static void fscrypt_get_devices(struct super_block *sb, int num_devs, static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) { - unsigned int dun_bytes = 8; + struct super_block *sb = ci->ci_inode->i_sb; + unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); + int ino_bits = 64, lblk_bits = 64; - if (fscrypt_policy_flags(&ci->ci_policy) & - FSCRYPT_POLICY_FLAG_DIRECT_KEY) - dun_bytes += FS_KEY_DERIVATION_NONCE_SIZE; + if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) + return offsetofend(union fscrypt_iv, nonce); - return dun_bytes; + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) + return sizeof(__le64); + + /* Default case: IVs are just the file logical block number */ + if (sb->s_cop->get_ino_and_lblk_bits) + sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); + return DIV_ROUND_UP(lblk_bits, 8); } /* Enable inline encryption for this file if supported. */ From fb8bfe480c57ef95d9588c1bb5027bcf7867d944 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 15 May 2020 13:41:41 -0700 Subject: [PATCH 060/141] BACKPORT: FROMLIST: fscrypt: add support for IV_INO_LBLK_32 policies The eMMC inline crypto standard will only specify 32 DUN bits (a.k.a. IV bits), unlike UFS's 64. IV_INO_LBLK_64 is therefore not applicable, but an encryption format which uses one key per policy and permits the moving of encrypted file contents (as f2fs's garbage collector requires) is still desirable. To support such hardware, add a new encryption format IV_INO_LBLK_32 that makes the best use of the 32 bits: the IV is set to 'SipHash-2-4(inode_number) + file_logical_block_number mod 2^32', where the SipHash key is derived from the fscrypt master key. We hash only the inode number and not also the block number, because we need to maintain contiguity of DUNs to merge bios. Unlike with IV_INO_LBLK_64, with this format IV reuse is possible; this is unavoidable given the size of the DUN. This means this format should only be used where the requirements of the first paragraph apply. However, the hash spreads out the IVs in the whole usable range, and the use of a keyed hash makes it difficult for an attacker to determine which files use which IVs. Besides the above differences, this flag works like IV_INO_LBLK_64 in that on ext4 it is only allowed if the stable_inodes feature has been enabled to prevent inode numbers and the filesystem UUID from changing. Signed-off-by: Eric Biggers Link: https://lore.kernel.org/r/20200515204141.251098-1-ebiggers@kernel.org (Resolved conflicts with inline encryption support. Besides the necessary "straightforward" merge resolutions, also made fscrypt_get_dun_bytes() aware of IV_INO_LBLK_32 and made IV_INO_LBLK_32 usable with wrapped keys.) Test: 'atest vts_kernel_encryption_test' on Cuttlefish with the IV_INO_LBLK_32 test added (http://aosp/1315024). Also tested enabling this in the fstab for Cuttlefish (using http://aosp/1315886). Also ran 'kvm-xfstests -c ext4,f2fs -g encrypt', including my work-in-progress xfstest for IV_INO_LBLK_32. Bug: 144046242 Change-Id: I57df71d502bde0475efc906a0812102063ff2f2a Signed-off-by: Eric Biggers Git-commit: a52238353e6711ae8ef8f3b462cece3b05aead8f Git-repo: https://android.googlesource.com/kernel/common/+/refs/heads/android-4.14-stable [neersoni@codeaurora.org: back port the changes and fixed merge conflicts in fscrypt_privat.h and inline_crypt.c files] Signed-off-by: Neeraj Soni --- Documentation/filesystems/fscrypt.rst | 33 +++++++++++++++-- fs/crypto/crypto.c | 6 ++- fs/crypto/fscrypt_private.h | 18 +++++++-- fs/crypto/inline_crypt.c | 3 ++ fs/crypto/keyring.c | 1 + fs/crypto/keysetup.c | 53 ++++++++++++++++++++++++--- fs/crypto/policy.c | 51 +++++++++++++++++++------- include/uapi/linux/fscrypt.h | 3 +- 8 files changed, 139 insertions(+), 29 deletions(-) diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index dc444b8d3704..7f9a372031fd 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -292,8 +292,22 @@ files' data differently, inode numbers are included in the IVs. Consequently, shrinking the filesystem may not be allowed. This format is optimized for use with inline encryption hardware -compliant with the UFS or eMMC standards, which support only 64 IV -bits per I/O request and may have only a small number of keyslots. +compliant with the UFS standard, which supports only 64 IV bits per +I/O request and may have only a small number of keyslots. + +IV_INO_LBLK_32 policies +----------------------- + +IV_INO_LBLK_32 policies work like IV_INO_LBLK_64, except that for +IV_INO_LBLK_32, the inode number is hashed with SipHash-2-4 (where the +SipHash key is derived from the master key) and added to the file +logical block number mod 2^32 to produce a 32-bit IV. + +This format is optimized for use with inline encryption hardware +compliant with the eMMC v5.2 standard, which supports only 32 IV bits +per I/O request and may have only a small number of keyslots. This +format results in some level of IV reuse, so it should only be used +when necessary due to hardware limitations. Key identifiers --------------- @@ -369,6 +383,10 @@ a little endian number, except that: to 32 bits and is placed in bits 0-31 of the IV. The inode number (which is also limited to 32 bits) is placed in bits 32-63. +- With `IV_INO_LBLK_32 policies`_, the logical block number is limited + to 32 bits and is placed in bits 0-31 of the IV. The inode number + is then hashed and added mod 2^32. + Note that because file logical block numbers are included in the IVs, filesystems must enforce that blocks are never shifted around within encrypted files, e.g. via "collapse range" or "insert range". @@ -465,8 +483,15 @@ This structure must be initialized as follows: (0x3). - FSCRYPT_POLICY_FLAG_DIRECT_KEY: See `DIRECT_KEY policies`_. - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64: See `IV_INO_LBLK_64 - policies`_. This is mutually exclusive with DIRECT_KEY and is not - supported on v1 policies. + policies`_. + - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32: See `IV_INO_LBLK_32 + policies`_. + + v1 encryption policies only support the PAD_* and DIRECT_KEY flags. + The other flags are only supported by v2 encryption policies. + + The DIRECT_KEY, IV_INO_LBLK_64, and IV_INO_LBLK_32 flags are + mutually exclusive. - For v2 encryption policies, ``__reserved`` must be zeroed. diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index cc8e334165f5..8f3dd023ddff 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -82,8 +82,12 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 || ((fscrypt_policy_contents_mode(&ci->ci_policy) == FSCRYPT_MODE_PRIVATE) && inlinecrypt)) { - WARN_ON_ONCE((u32)lblk_num != lblk_num); + WARN_ON_ONCE(lblk_num > U32_MAX); + WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX); lblk_num |= (u64)ci->ci_inode->i_ino << 32; + } else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) { + WARN_ON_ONCE(lblk_num > U32_MAX); + lblk_num = (u32)(ci->ci_hashed_ino + lblk_num); } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); } diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index ae03c7fc7e52..67bcdfa16095 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -245,7 +245,9 @@ struct fscrypt_info { /* This inode's nonce, copied from the fscrypt_context */ u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE]; - u8 ci_raw_key[FSCRYPT_MAX_KEY_SIZE]; + + /* Hashed inode number. Only set for IV_INO_LBLK_32 */ + u32 ci_hashed_ino; }; typedef enum { @@ -317,6 +319,8 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, #define HKDF_CONTEXT_DIRECT_KEY 3 #define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 #define HKDF_CONTEXT_DIRHASH_KEY 5 +#define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 +#define HKDF_CONTEXT_INODE_HASH_KEY 7 extern int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, @@ -513,11 +517,17 @@ struct fscrypt_master_key { struct list_head mk_decrypted_inodes; spinlock_t mk_decrypted_inodes_lock; - /* Per-mode keys for DIRECT_KEY policies, allocated on-demand */ + /* + * Per-mode encryption keys for the various types of encryption policies + * that use them. Allocated and derived on-demand. + */ struct fscrypt_prepared_key mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; - - /* Per-mode keys for IV_INO_LBLK_64 policies, allocated on-demand */ struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_iv_ino_lblk_32_keys[__FSCRYPT_MODE_MAX + 1]; + + /* Hash key for inode numbers. Initialized only when needed. */ + siphash_key_t mk_ino_hash_key; + bool mk_ino_hash_key_initialized; } __randomize_layout; diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index f96e2972a003..a58c120d77f1 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -54,6 +54,9 @@ static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) return sizeof(__le64); + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) + return sizeof(__le32); + /* Default case: IVs are just the file logical block number */ if (sb->s_cop->get_ino_and_lblk_bits) sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 9257ea1102b1..fc9ea71b50f7 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -46,6 +46,7 @@ static void free_master_key(struct fscrypt_master_key *mk) for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]); fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_32_keys[i]); } key_put(mk->mk_users); diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index a3626425d633..4cac429e7adb 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -56,6 +56,8 @@ struct fscrypt_mode fscrypt_modes[] = { }, }; +static DEFINE_MUTEX(fscrypt_mode_key_setup_mutex); + static struct fscrypt_mode * select_encryption_mode(const union fscrypt_policy *policy, const struct inode *inode) @@ -186,7 +188,7 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, return 0; } - mutex_lock(&mode_key_setup_mutex); + mutex_lock(&fscrypt_mode_key_setup_mutex); if (fscrypt_is_key_prepared(prep_key, ci)) goto done_unlock; @@ -237,7 +239,7 @@ done_unlock: ci->ci_key = *prep_key; err = 0; out_unlock: - mutex_unlock(&mode_key_setup_mutex); + mutex_unlock(&fscrypt_mode_key_setup_mutex); return err; } @@ -256,15 +258,53 @@ int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, return 0; } +static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci, + struct fscrypt_master_key *mk) +{ + int err; + + err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_32_keys, + HKDF_CONTEXT_IV_INO_LBLK_32_KEY, true); + if (err) + return err; + + /* pairs with smp_store_release() below */ + if (!smp_load_acquire(&mk->mk_ino_hash_key_initialized)) { + + mutex_lock(&fscrypt_mode_key_setup_mutex); + + if (mk->mk_ino_hash_key_initialized) + goto unlock; + + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, + HKDF_CONTEXT_INODE_HASH_KEY, NULL, 0, + (u8 *)&mk->mk_ino_hash_key, + sizeof(mk->mk_ino_hash_key)); + if (err) + goto unlock; + /* pairs with smp_load_acquire() above */ + smp_store_release(&mk->mk_ino_hash_key_initialized, true); +unlock: + mutex_unlock(&fscrypt_mode_key_setup_mutex); + if (err) + return err; + } + + ci->ci_hashed_ino = (u32)siphash_1u64(ci->ci_inode->i_ino, + &mk->mk_ino_hash_key); + return 0; +} + static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, struct fscrypt_master_key *mk) { int err; if (mk->mk_secret.is_hw_wrapped && - !(ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)) { + !(ci->ci_policy.v2.flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 | + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))) { fscrypt_warn(ci->ci_inode, - "Hardware-wrapped keys are only supported with IV_INO_LBLK_64 policies"); + "Hardware-wrapped keys are only supported with IV_INO_LBLK policies"); return -EINVAL; } @@ -285,11 +325,14 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, * IV_INO_LBLK_64: encryption keys are derived from (master_key, * mode_num, filesystem_uuid), and inode number is included in * the IVs. This format is optimized for use with inline - * encryption hardware compliant with the UFS or eMMC standards. + * encryption hardware compliant with the UFS standard. */ err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_64_keys, HKDF_CONTEXT_IV_INO_LBLK_64_KEY, true); + } else if (ci->ci_policy.v2.flags & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) { + err = fscrypt_setup_iv_ino_lblk_32_key(ci, mk); } else { u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 10ccf945020c..04d2f531a3a1 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -66,18 +66,14 @@ static bool supported_direct_key_modes(const struct inode *inode, return true; } -static bool supported_iv_ino_lblk_64_policy( - const struct fscrypt_policy_v2 *policy, - const struct inode *inode) +static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy, + const struct inode *inode, + const char *type, + int max_ino_bits, int max_lblk_bits) { struct super_block *sb = inode->i_sb; int ino_bits = 64, lblk_bits = 64; - if (policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { - fscrypt_warn(inode, - "The DIRECT_KEY and IV_INO_LBLK_64 flags are mutually exclusive"); - return false; - } /* * It's unsafe to include inode numbers in the IVs if the filesystem can * potentially renumber inodes, e.g. via filesystem shrinking. @@ -85,16 +81,22 @@ static bool supported_iv_ino_lblk_64_policy( if (!sb->s_cop->has_stable_inodes || !sb->s_cop->has_stable_inodes(sb)) { fscrypt_warn(inode, - "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't have stable inode numbers", - sb->s_id); + "Can't use %s policy on filesystem '%s' because it doesn't have stable inode numbers", + type, sb->s_id); return false; } if (sb->s_cop->get_ino_and_lblk_bits) sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); - if (ino_bits > 32 || lblk_bits > 32) { + if (ino_bits > max_ino_bits) { fscrypt_warn(inode, - "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't use 32-bit inode and block numbers", - sb->s_id); + "Can't use %s policy on filesystem '%s' because its inode numbers are too long", + type, sb->s_id); + return false; + } + if (lblk_bits > max_lblk_bits) { + fscrypt_warn(inode, + "Can't use %s policy on filesystem '%s' because its block numbers are too long", + type, sb->s_id); return false; } return true; @@ -137,6 +139,8 @@ static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy, static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, const struct inode *inode) { + int count = 0; + if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, policy->filenames_encryption_mode)) { fscrypt_warn(inode, @@ -152,13 +156,29 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, return false; } + count += !!(policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY); + count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64); + count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32); + if (count > 1) { + fscrypt_warn(inode, "Mutually exclusive encryption flags (0x%02x)", + policy->flags); + return false; + } + if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && !supported_direct_key_modes(inode, policy->contents_encryption_mode, policy->filenames_encryption_mode)) return false; if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) && - !supported_iv_ino_lblk_64_policy(policy, inode)) + !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_64", + 32, 32)) + return false; + + if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && + /* This uses hashed inode numbers, so ino_bits doesn't matter. */ + !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32", + INT_MAX, 32)) return false; if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) { @@ -354,6 +374,9 @@ static int set_encryption_policy(struct inode *inode, policy->v2.master_key_identifier); if (err) return err; + if (policy->v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) + pr_warn_once("%s (pid %d) is setting an IV_INO_LBLK_32 encryption policy. This should only be used if there are certain hardware limitations.\n", + current->comm, current->pid); break; default: WARN_ON(1); diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index b134bfc90912..b2b6ee53d578 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -19,7 +19,8 @@ #define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03 #define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 #define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08 -#define FSCRYPT_POLICY_FLAGS_VALID 0x0F +#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 0x10 +#define FSCRYPT_POLICY_FLAGS_VALID 0x1F /* Encryption algorithms */ #define FSCRYPT_MODE_AES_256_XTS 1 From 8700f864c231d92937be95bf19c56b012dfc7f32 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 19 May 2020 16:11:59 -0700 Subject: [PATCH 061/141] ANDROID: fscrypt: handle direct I/O with IV_INO_LBLK_32 With the existing fscrypt IV generation methods, each file's data blocks have contiguous DUNs. Therefore the direct I/O code "just worked" because it only submits logically contiguous bios. But with IV_INO_LBLK_32, the direct I/O code breaks because the DUN can wrap from 0xffffffff to 0. We can't submit bios across such boundaries. This is especially difficult to handle when block_size != PAGE_SIZE, since in that case the DUN can wrap in the middle of a page. Punt on this case for now and just handle block_size == PAGE_SIZE. Add and use a new function fscrypt_dio_supported() to check whether a direct I/O request is unsupported due to encryption constraints. Then, update fs/direct-io.c (used by f2fs, and by ext4 in kernel v5.4 and earlier) and fs/iomap/direct-io.c (used by ext4 in kernel v5.5 and later) to avoid submitting I/O across a DUN discontinuity. (This is needed in ACK now because ACK already supports direct I/O with inline crypto. I'll be sending this upstream along with the encrypted direct I/O support itself once its prerequisites are closer to landing.) (cherry picked from android-mainline commit 8d6c90c9d68b985fa809626d12f8c9aff3c9dcb1) Conflicts: fs/ext4/file.c fs/iomap/direct-io.c (Dropped the iomap changes because in kernel v5.4 and earlier, ext4 doesn't use iomap for direct I/O) Test: For now, just manually tested direct I/O on ext4 and f2fs in the DUN discontinuity case. Bug: 144046242 Change-Id: I0c0b0b20a73ade35c3660cc6f9c09d49d3853ba5 Signed-off-by: Eric Biggers Git-commit: 09075917fb5d01f326862e2eb73bf46c393c6ebb Git-repo: https://android.googlesource.com/kernel/common/+/refs/heads/android-4.14-stable [neersoni@codeaurora.org: back ported and fixed the merged conflicts in inline_crypt.c file] Signed-off-by: Neeraj Soni --- fs/crypto/crypto.c | 8 ++++ fs/crypto/inline_crypt.c | 82 ++++++++++++++++++++++++++++++++++++++++ fs/direct-io.c | 10 ++++- fs/ext4/inode.c | 9 ++--- fs/f2fs/f2fs.h | 8 +--- include/linux/fscrypt.h | 19 ++++++++++ 6 files changed, 123 insertions(+), 13 deletions(-) diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 8f3dd023ddff..02ab7b76d157 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -67,6 +67,14 @@ void fscrypt_free_bounce_page(struct page *bounce_page) } EXPORT_SYMBOL(fscrypt_free_bounce_page); +/* + * Generate the IV for the given logical block number within the given file. + * For filenames encryption, lblk_num == 0. + * + * Keep this in sync with fscrypt_limit_dio_pages(). fscrypt_limit_dio_pages() + * needs to know about any IV generation methods where the low bits of IV don't + * simply contain the lblk_num (e.g., IV_INO_LBLK_32). + */ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, const struct fscrypt_info *ci) { diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index a58c120d77f1..69c281a331e5 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "fscrypt_private.h" @@ -429,3 +430,84 @@ bool fscrypt_mergeable_bio_bh(struct bio *bio, return fscrypt_mergeable_bio(bio, inode, next_lblk); } EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); + +/** + * fscrypt_dio_supported() - check whether a direct I/O request is unsupported + * due to encryption constraints + * @iocb: the file and position the I/O is targeting + * @iter: the I/O data segment(s) + * + * Return: true if direct I/O is supported + */ +bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter) +{ + const struct inode *inode = file_inode(iocb->ki_filp); + const struct fscrypt_info *ci = inode->i_crypt_info; + const unsigned int blocksize = i_blocksize(inode); + + /* If the file is unencrypted, no veto from us. */ + if (!fscrypt_needs_contents_encryption(inode)) + return true; + + /* We only support direct I/O with inline crypto, not fs-layer crypto */ + if (!fscrypt_inode_uses_inline_crypto(inode)) + return false; + + /* + * Since the granularity of encryption is filesystem blocks, the I/O + * must be block aligned -- not just disk sector aligned. + */ + if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize)) + return false; + + /* + * With IV_INO_LBLK_32 and sub-page blocks, the DUN can wrap around in + * the middle of a page. This isn't handled by the direct I/O code yet. + */ + if (blocksize != PAGE_SIZE && + (fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) + return false; + + return true; +} +EXPORT_SYMBOL_GPL(fscrypt_dio_supported); + +/** + * fscrypt_limit_dio_pages() - limit I/O pages to avoid discontiguous DUNs + * @inode: the file on which I/O is being done + * @pos: the file position (in bytes) at which the I/O is being done + * @nr_pages: the number of pages we want to submit starting at @pos + * + * For direct I/O: limit the number of pages that will be submitted in the bio + * targeting @pos, in order to avoid crossing a data unit number (DUN) + * discontinuity. This is only needed for certain IV generation methods. + * + * This assumes block_size == PAGE_SIZE; see fscrypt_dio_supported(). + * + * Return: the actual number of pages that can be submitted + */ +int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, int nr_pages) +{ + const struct fscrypt_info *ci = inode->i_crypt_info; + u32 dun; + + if (!fscrypt_inode_uses_inline_crypto(inode)) + return nr_pages; + + if (nr_pages <= 1) + return nr_pages; + + if (!(fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) + return nr_pages; + + if (WARN_ON_ONCE(i_blocksize(inode) != PAGE_SIZE)) + return 1; + + /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */ + + dun = ci->ci_hashed_ino + (pos >> inode->i_blkbits); + + return min_t(u64, nr_pages, (u64)U32_MAX + 1 - dun); +} diff --git a/fs/direct-io.c b/fs/direct-io.c index 729c59213d2e..094421f05fda 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -815,9 +815,17 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, * current logical offset in the file does not equal what would * be the next logical offset in the bio, submit the bio we * have. + * + * When fscrypt inline encryption is used, data unit number + * (DUN) contiguity is also required. Normally that's implied + * by logical contiguity. However, certain IV generation + * methods (e.g. IV_INO_LBLK_32) don't guarantee it. So, we + * must explicitly check fscrypt_mergeable_bio() too. */ if (sdio->final_block_in_bio != sdio->cur_page_block || - cur_offset != bio_next_offset) + cur_offset != bio_next_offset || + !fscrypt_mergeable_bio(sdio->bio, dio->inode, + cur_offset >> dio->inode->i_blkbits)) dio_bio_submit(dio, sdio); } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 096e4cc053dc..fe84dd8a74bc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3855,12 +3855,9 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ssize_t ret; int rw = iov_iter_rw(iter); - if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode)) { - if (!fscrypt_inode_uses_inline_crypto(inode) || - !IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), - i_blocksize(inode))) - return 0; - } + if (!fscrypt_dio_supported(iocb, iter)) + return 0; + if (fsverity_active(inode)) return 0; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index a3529e3e7286..f55818a8c263 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -4030,12 +4030,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int rw = iov_iter_rw(iter); - if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && f2fs_encrypted_file(inode)) { - if (!fscrypt_inode_uses_inline_crypto(inode) || - !IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), - F2FS_BLKSIZE)) - return true; - } + if (!fscrypt_dio_supported(iocb, iter)) + return true; if (fsverity_active(inode)) return true; if (f2fs_is_multi_device(sbi)) diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 9f791a4b4ad3..db99db6e9458 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -532,6 +532,11 @@ extern bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, extern bool fscrypt_mergeable_bio_bh(struct bio *bio, const struct buffer_head *next_bh); +bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter); + +int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, + int nr_pages); + #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) { @@ -564,6 +569,20 @@ static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, { return true; } + +static inline bool fscrypt_dio_supported(struct kiocb *iocb, + struct iov_iter *iter) +{ + const struct inode *inode = file_inode(iocb->ki_filp); + + return !fscrypt_needs_contents_encryption(inode); +} + +static inline int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, + int nr_pages) +{ + return nr_pages; +} #endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ #if IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_DM_DEFAULT_KEY) From b3dc768908f4607b90e2db5c49d580261bd84ead Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Tue, 11 Aug 2020 22:55:15 +0530 Subject: [PATCH 062/141] mmc: host: Set the supported dun size for crypto Driver need to register the supported data unit number (dun) size with keyslot manager so that encryption request with unsupported dun size can fallback to fs or block crypto. Change-Id: If0b724a80049fb3544a6338345a55bf7d1084127 Signed-off-by: Neeraj Soni --- drivers/mmc/host/cmdq_hci-crypto-qti.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/mmc/host/cmdq_hci-crypto-qti.c b/drivers/mmc/host/cmdq_hci-crypto-qti.c index 9921a14c9cef..84718e8da7f3 100644 --- a/drivers/mmc/host/cmdq_hci-crypto-qti.c +++ b/drivers/mmc/host/cmdq_hci-crypto-qti.c @@ -231,6 +231,8 @@ int cmdq_host_init_crypto_qti_spec(struct cmdq_host *host, err = -ENOMEM; goto out; } + keyslot_manager_set_max_dun_bytes(host->ksm, sizeof(u32)); + /* * In case host controller supports cryptographic operations * then, it uses 128bit task descriptor. Upper 64 bits of task From 02522c7c1421f542625141e51ec691f47ec9e6a5 Mon Sep 17 00:00:00 2001 From: Sachin Prakash Gejji Date: Mon, 17 Aug 2020 13:38:15 +0530 Subject: [PATCH 063/141] Documentation: devicetree: net: Add doc for switch driver Add device tree documentation for ethernet switch driver needed for mac-to-mac communication. Change-Id: Ib68b0953f87c79660c37073c85663507e4b4c921 Signed-off-by: Sachin Prakash Gejji --- .../bindings/net/qcom,sja1105p-eth-switch.txt | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/qcom,sja1105p-eth-switch.txt diff --git a/Documentation/devicetree/bindings/net/qcom,sja1105p-eth-switch.txt b/Documentation/devicetree/bindings/net/qcom,sja1105p-eth-switch.txt new file mode 100644 index 000000000000..701ba523fd59 --- /dev/null +++ b/Documentation/devicetree/bindings/net/qcom,sja1105p-eth-switch.txt @@ -0,0 +1,64 @@ +* NXP SJA1105P 10/100/1000 Ethernet Switch Driver + +Required properties: +### Properties of top level +- compatible: Should be "qcom,nxp,sja1105p-switch". +- reg: Should contain SPI chip select. +- spi-max-frequency: Should contain maximum spi clock frequency + for slave device. +- spi-cpha: SPI configuration to enable shift clock phase (CPHA) mode. +- switch-speed: Should contain switch ports speed 10/100/1000 Mbps. +- pinctrl-names : Names corresponding to the numbered pinctrl states +- pinctrl- : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt +- qcom,reset-gpio: Reference to the GPIO connected to the reset input. + +### Properties of the port-X child node +- `null-phy`: Determines if the port has a PHY connected to it or not +- `phy-ref`: _phandle_ to the connected ethernet PHY + **NOTE**: Must be `0x00` in case there is no PHY connected to port-X + (for example if port-X is a host port or a cascaded port) +- `logical-port-num`: logical port number, used for the port mapping + **NOTE**: Must be `0xff` in case port-X is a cascaded port. + +Example: + + sja1105: ethernet-switch@0{ + compatible = "qcom,nxp,sja1105p-switch"; + reg = <0>; + spi-max-frequency = <12000000>; + spi-cpha; + switch-speed = <1000>; + pinctrl-names = "default"; + pinctrl-0 = <&sja1105_default>; + qcom,reset-gpio = <&tlmm 91 0x1>; + + port-0 { + null-phy = <0x1>; + phy-ref = < 0 >; + logical-port-num = < 0 >; + }; + + port-1 { + null-phy = <0x1>; + phy-ref = < 0 >; + logical-port-num = < 1 >; + }; + + port-2 { + null-phy = <0x1>; + phy-ref = < 0 >; + logical-port-num = < 2 >; + }; + + port-3 { + null-phy = <0x1>; + phy-ref = < 0 >; + logical-port-num = < 3 >; + }; + + port-4 { + null-phy = <0x1>; + phy-ref = < 0 >; + logical-port-num = < 4 >; + }; + }; From 2c947c65640bd97a598397d7443d8a6bbd485d19 Mon Sep 17 00:00:00 2001 From: Vijayavardhan Vennapusa Date: Mon, 17 Aug 2020 15:16:58 +0530 Subject: [PATCH 064/141] UVC: Increase usb requests for better throughput Currently UVC driver is using 4 usb requests for data transfers. Increase number of usb requests to 8 to improve throughput in UVC driver. Change-Id: Iea94a1c62e6c615cc88c4cc9f3a53e3719642e0f Signed-off-by: Vijayavardhan Vennapusa --- drivers/usb/gadget/function/uvc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h index 11d70dead32b..bf5f25349531 100644 --- a/drivers/usb/gadget/function/uvc.h +++ b/drivers/usb/gadget/function/uvc.h @@ -94,7 +94,7 @@ extern unsigned int uvc_gadget_trace_param; * Driver specific constants */ -#define UVC_NUM_REQUESTS 4 +#define UVC_NUM_REQUESTS 8 #define UVC_MAX_REQUEST_SIZE 64 #define UVC_MAX_EVENTS 4 From 56d53422cbe03fa99d75b9356e78028d420fc7f1 Mon Sep 17 00:00:00 2001 From: Vijayavardhan Vennapusa Date: Mon, 17 Aug 2020 15:26:45 +0530 Subject: [PATCH 065/141] ARM: msm: dts: Disable U1U2 low power modes for QCS610 Disable U1U2 low power modes for QCS610 which is required for avoiding flickering issues with UVC over isochronous endpoint. Change-Id: Iceb4328141714553574b5729c73775d917688469 Signed-off-by: Vijayavardhan Vennapusa --- arch/arm64/boot/dts/qcom/qcs610-iot.dtsi | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/qcs610-iot.dtsi b/arch/arm64/boot/dts/qcom/qcs610-iot.dtsi index 6c1d8c88de2a..4fc9b0252bf2 100644 --- a/arch/arm64/boot/dts/qcom/qcs610-iot.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs610-iot.dtsi @@ -332,6 +332,12 @@ status = "ok"; }; +&usb0 { + dwc3@a600000 { + snps,usb3-u1u2-disable; + }; +}; + &L16A { regulator-max-microvolt = <3304000>; }; From b9b2adc7101ee1d7e2230c8995ebeaf0a50a0001 Mon Sep 17 00:00:00 2001 From: Protik Biswas Date: Mon, 17 Aug 2020 16:28:46 +0530 Subject: [PATCH 066/141] soc: qcom: bgcom: change BG TWM firmware name Blackghost TWM firmware name has been changed as from META scripts the usage of firmware name having more than 8 characters is not allowed. Change-Id: I4c7b9091de68b9ef9a7f0750a01b26923c05299a Signed-off-by: Protik Biswas --- drivers/soc/qcom/bgcom_interface.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/soc/qcom/bgcom_interface.c b/drivers/soc/qcom/bgcom_interface.c index 0705bc7ad16d..c207738e0d23 100644 --- a/drivers/soc/qcom/bgcom_interface.c +++ b/drivers/soc/qcom/bgcom_interface.c @@ -134,9 +134,9 @@ static void bgcom_load_twm_bg_work(struct work_struct *work) } else { dev->bg_twm_wear_load = true; dev->pil_h = subsystem_get_with_fwname("bg-wear", - "bg-twm-wear"); + "bg-twm"); if (!dev->pil_h) - pr_err("failed to load bg-twm-wear\n"); + pr_err("failed to load bg-twm\n"); } } @@ -617,7 +617,7 @@ static int ssr_bg_cb(struct notifier_block *this, break; case SUBSYS_AFTER_SHUTDOWN: if (dev->pending_bg_twm_wear_load) { - /* Load bg-twm-wear */ + /* Load bg-twm */ dev->pending_bg_twm_wear_load = false; queue_work(dev->bgdaemon_wq, &dev->bgdaemon_load_twm_bg_work); From 1b1d175854958a9952ab1f3b1a8f644ff0c2cdc6 Mon Sep 17 00:00:00 2001 From: Sachin Prakash Gejji Date: Tue, 11 Aug 2020 12:16:24 +0530 Subject: [PATCH 067/141] net: stmmac: Add mac2mac feature support Add mac2mac feature support. Change-Id: I97329a07d70419c342b289689e8fc8371db30f1a Signed-off-by: Sachin Prakash Gejji --- .../stmicro/stmmac/dwmac-qcom-ethqos.c | 17 +++++++-- .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 35 ++++++++++++++++--- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 34 ++++++++++++++---- .../ethernet/stmicro/stmmac/stmmac_platform.c | 9 +++-- include/linux/stmmac.h | 2 ++ 5 files changed, 82 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index b6c8996c99b5..cf5091a3289d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -2111,8 +2111,13 @@ inline bool qcom_ethqos_is_phy_link_up(struct qcom_ethqos *ethqos) */ struct stmmac_priv *priv = qcom_ethqos_get_priv(ethqos); - return ((priv->oldlink != -1) && - (priv->dev->phydev && priv->dev->phydev->link)); + if (priv->plat->mac2mac_en) { + return true; + } else { + return ((priv->oldlink != -1) && + (priv->dev->phydev && + priv->dev->phydev->link)); + } } static void qcom_ethqos_phy_resume_clks(struct qcom_ethqos *ethqos) @@ -2429,6 +2434,14 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->tso_en = of_property_read_bool(np, "snps,tso"); plat_dat->early_eth = ethqos->early_eth_enabled; + /* Get rgmii interface speed for mac2c from device tree */ + if (of_property_read_u32(np, "mac2mac-rgmii-speed", + &plat_dat->mac2mac_rgmii_speed)) + plat_dat->mac2mac_rgmii_speed = -1; + else + ETHQOSINFO("mac2mac rgmii speed = %d\n", + plat_dat->mac2mac_rgmii_speed); + if (of_property_read_bool(pdev->dev.of_node, "qcom,arm-smmu")) { stmmac_emb_smmu_ctx.pdev_master = pdev; ret = of_platform_populate(pdev->dev.of_node, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 9057ef572cf6..089572fafbed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -286,6 +286,12 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phy = dev->phydev; + if (!phy) { + pr_err("%s: %s: PHY is not registered\n", + __func__, dev->name); + return -ENODEV; + } + if (priv->hw->pcs & STMMAC_PCS_RGMII || priv->hw->pcs & STMMAC_PCS_SGMII) { struct rgmii_adv adv; @@ -365,11 +371,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, return 0; } - if (phy == NULL) { - pr_err("%s: %s: PHY is not registered\n", - __func__, dev->name); - return -ENODEV; - } if (!netif_running(dev)) { pr_err("%s: interface is disabled: we cannot track " "link speed / duplex setting\n", dev->name); @@ -388,6 +389,12 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, int rc; u32 cmd_speed = cmd->base.speed; + if (!phy) { + pr_err("%s: %s: PHY is not registered\n", + __func__, dev->name); + return -ENODEV; + } + if (priv->hw->pcs & STMMAC_PCS_RGMII || priv->hw->pcs & STMMAC_PCS_SGMII) { u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; @@ -505,6 +512,12 @@ stmmac_set_pauseparam(struct net_device *netdev, struct phy_device *phy = netdev->phydev; int new_pause = FLOW_OFF; + if (!phy) { + pr_err("%s: %s: PHY is not registered\n", + __func__, netdev->name); + return -ENODEV; + } + if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { struct rgmii_adv adv_lp; @@ -634,6 +647,12 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); + if (!priv->phydev) { + pr_err("%s: %s: PHY is not registered\n", + __func__, dev->name); + return; + } + phy_ethtool_get_wol(priv->phydev, wol); mutex_lock(&priv->lock); if (device_can_wakeup(priv->device)) { @@ -650,6 +669,12 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) u32 emac_wol_support = 0; int ret; + if (!priv->phydev) { + pr_err("%s: %s: PHY is not registered\n", + __func__, dev->name); + return -ENODEV; + } + if (ethqos->phy_state == PHY_IS_OFF) { ETHQOSINFO("Phy is in off state Wol set not possible\n"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f6348e04a4f2..076ca27562b4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2736,9 +2736,10 @@ static int stmmac_open(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); int ret; - if (priv->hw->pcs != STMMAC_PCS_RGMII && - priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) { + if (!priv->plat->mac2mac_en && + (priv->hw->pcs != STMMAC_PCS_RGMII && + priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI)) { ret = stmmac_init_phy(dev); if (ret) { netdev_err(priv->dev, @@ -2826,6 +2827,26 @@ static int stmmac_open(struct net_device *dev) if (priv->tx_queue[IPA_DMA_TX_CH].skip_sw) ethqos_ipa_offload_event_handler(priv, EV_DEV_OPEN); + if (priv->plat->mac2mac_en) { + u32 ctrl = readl_relaxed(priv->ioaddr + MAC_CTRL_REG); + + ctrl &= ~priv->hw->link.speed_mask; + + if (priv->plat->mac2mac_rgmii_speed == SPEED_1000) { + ctrl |= priv->hw->link.speed1000; + priv->speed = SPEED_1000; + } else if (priv->plat->mac2mac_rgmii_speed == SPEED_100) { + ctrl |= priv->hw->link.speed100; + priv->speed = SPEED_100; + } else { + ctrl |= priv->hw->link.speed10; + priv->speed = SPEED_10; + } + + stmmac_hw_fix_mac_speed(priv); + writel_relaxed(ctrl, priv->ioaddr + MAC_CTRL_REG); + } + return 0; lpiirq_error: @@ -4594,9 +4615,10 @@ int stmmac_dvr_probe(struct device *device, stmmac_check_pcs_mode(priv); - if (priv->hw->pcs != STMMAC_PCS_RGMII && - priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) { + if (!priv->plat->mac2mac_en && + (priv->hw->pcs != STMMAC_PCS_RGMII && + priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI)) { /* MDIO bus Registration */ ret = stmmac_mdio_register(ndev); if (ret < 0) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 54893d12a22d..dbbf2857831d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -400,6 +400,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) /* Default to phy auto-detection */ plat->phy_addr = -1; + /* Flag for mac2mac feature support*/ + plat->mac2mac_en = of_property_read_bool(np, "mac2mac"); + /* "snps,phy-addr" is not a standard property. Mark it as deprecated * and warn of its use. Remove this when phy node support is added. */ @@ -407,8 +410,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); /* To Configure PHY by using all device-tree supported properties */ - if (stmmac_dt_phy(plat, np, &pdev->dev)) - return ERR_PTR(-ENODEV); + if (!plat->mac2mac_en) { + if (stmmac_dt_phy(plat, np, &pdev->dev)) + return ERR_PTR(-ENODEV); + } of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 9ed7771ba212..3dc7d602ec15 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -201,5 +201,7 @@ struct plat_stmmacenet_data { bool early_eth; bool crc_strip_en; bool phy_intr_en; + int mac2mac_rgmii_speed; + bool mac2mac_en; }; #endif From c6d20626fb2624038897c0cad14d7e17ee1f63d9 Mon Sep 17 00:00:00 2001 From: Anurag Chouhan Date: Mon, 17 Aug 2020 18:31:23 +0530 Subject: [PATCH 068/141] defconfig: sdm429: Update configs related to DCC Update configs related to DCC for sdm429w target. Change-Id: I7dfdc7db7a9d903b5f7bf152f4df18ce7133f379 Signed-off-by: Anurag Chouhan --- arch/arm/configs/vendor/sdm429-bg-perf_defconfig | 2 +- arch/arm/configs/vendor/sdm429-bg_defconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig index 7bb95bea9784..c2e470bf4344 100644 --- a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig @@ -541,7 +541,7 @@ CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000 CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000 CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y CONFIG_MSM_BOOT_STATS=y -CONFIG_QCOM_DCC_V2=y +CONFIG_QCOM_DCC=y CONFIG_QCOM_SECURE_BUFFER=y CONFIG_ICNSS=y CONFIG_ICNSS_QMI=y diff --git a/arch/arm/configs/vendor/sdm429-bg_defconfig b/arch/arm/configs/vendor/sdm429-bg_defconfig index ff60e088faef..ad05c2c8c721 100644 --- a/arch/arm/configs/vendor/sdm429-bg_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg_defconfig @@ -561,7 +561,7 @@ CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000 CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y CONFIG_MSM_BOOT_STATS=y CONFIG_MSM_CORE_HANG_DETECT=y -CONFIG_QCOM_DCC_V2=y +CONFIG_QCOM_DCC=y CONFIG_MSM_GLADIATOR_HANG_DETECT=y CONFIG_MSM_GLADIATOR_ERP=y CONFIG_PANIC_ON_GLADIATOR_ERROR=y From a82c6c2a5789d9348ec9a27d229a1c4e057b7e65 Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Tue, 12 May 2020 14:02:20 +0530 Subject: [PATCH 069/141] dm: default-key: Adapt legacy disk format for new set of arguments User now have option to choose between legacy and new version of on disk data format. So arguments passed in case of legacy format will not match with the count check in default key driver. So adapt the legacy support for new set of arguments. Also check for legacy encryption mode for file encryption. Change-Id: Ie6f9f683c048a2a1c64d46716433bcff359dc3bf Signed-off-by: Neeraj Soni --- drivers/md/dm-default-key.c | 30 +++++++++++++++++++++++++----- fs/crypto/policy.c | 4 ++++ 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index c1fe775ef9d5..2309cd40ee7c 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -135,9 +135,11 @@ static int default_key_ctr_optional(struct dm_target *ti, return 0; } -void default_key_adjust_sector_size_and_iv(char **argv, struct dm_target *ti, - struct default_key_c **dkc, u8 *raw, - u32 size) +static void default_key_adjust_sector_size_and_iv(char **argv, + struct dm_target *ti, + struct default_key_c **dkc, + u8 *raw, u32 size, + bool is_legacy) { struct dm_dev *dev; int i; @@ -148,7 +150,7 @@ void default_key_adjust_sector_size_and_iv(char **argv, struct dm_target *ti, dev = (*dkc)->dev; - if (!strcmp(argv[0], "AES-256-XTS")) { + if (is_legacy) { memcpy(key_new.bytes, raw, size); for (i = 0; i < ARRAY_SIZE(key_new.words); i++) @@ -181,6 +183,24 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) unsigned long long tmpll; char dummy; int err; + char *_argv[10]; + bool is_legacy = false; + + if (argc >= 4 && !strcmp(argv[0], "AES-256-XTS")) { + argc = 0; + _argv[argc++] = "aes-xts-plain64"; + _argv[argc++] = argv[1]; + _argv[argc++] = "0"; + _argv[argc++] = argv[2]; + _argv[argc++] = argv[3]; + _argv[argc++] = "3"; + _argv[argc++] = "allow_discards"; + _argv[argc++] = "sector_size:4096"; + _argv[argc++] = "iv_large_sectors"; + _argv[argc] = NULL; + argv = _argv; + is_legacy = true; + } if (argc < 5) { ti->error = "Not enough arguments"; @@ -256,7 +276,7 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) } default_key_adjust_sector_size_and_iv(argv, ti, &dkc, raw_key, - raw_key_size); + raw_key_size, is_legacy); dkc->sector_bits = ilog2(dkc->sector_size); if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) { diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 10ccf945020c..4ea152ddf268 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -43,6 +43,10 @@ static bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode) filenames_mode == FSCRYPT_MODE_ADIANTUM) return true; + if (contents_mode == FSCRYPT_MODE_PRIVATE && + filenames_mode == FSCRYPT_MODE_AES_256_CTS) + return true; + return false; } From 5abedd3ab36eadfc2ed2ab791591b2a91bc5699a Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Mon, 10 Aug 2020 16:50:40 -0700 Subject: [PATCH 070/141] msm: ipa3: add support on detour lan2lan traffic to sw Add the header file change for user-space module to specify is this bridge interface has to detour lan2lan traffic to SW-path. Change-Id: I5b0bf59c312f09bc44a52a74824e4973c1ac23d0 Signed-off-by: Skylar Chang --- include/uapi/linux/msm_ipa.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 6d86d7357807..276aee4f900f 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -2844,12 +2844,14 @@ struct ipa_ioc_get_vlan_mode { * @vlan_id: vlan ID bridge is mapped to * @bridge_ipv4: bridge interface ipv4 address * @subnet_mask: bridge interface subnet mask + * @lan2lan_sw: indicate lan2lan traffic take sw-path or not */ struct ipa_ioc_bridge_vlan_mapping_info { char bridge_name[IPA_RESOURCE_NAME_MAX]; uint16_t vlan_id; uint32_t bridge_ipv4; uint32_t subnet_mask; + uint8_t lan2lan_sw; }; struct ipa_coalesce_info { From a12323b02f4fe203dd64d6e39322f039af87d2b4 Mon Sep 17 00:00:00 2001 From: Sunil Paidimarri Date: Wed, 22 Jul 2020 17:43:18 -0700 Subject: [PATCH 071/141] msm: eth: Add user space interface for eth Add user space interface using structure. Add messages for apps to read IPA connection state. Change-Id: Ia11be481e17b450e65d030a7ccc458bf20c164f6 Acked-by: Rahul Kawadgave Signed-off-by: Sunil Paidimarri --- include/uapi/linux/msm_eth.h | 37 ++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 include/uapi/linux/msm_eth.h diff --git a/include/uapi/linux/msm_eth.h b/include/uapi/linux/msm_eth.h new file mode 100644 index 000000000000..e60214ad5d5a --- /dev/null +++ b/include/uapi/linux/msm_eth.h @@ -0,0 +1,37 @@ +#ifndef _UAPI_MSM_ETH_H_ +#define _UAPI_MSM_ETH_H_ + +#include + +/** + * defines eth_meta_event - Events for eth + * + * CV2X pipe connect: CV2X pipe connected + * CV2X pipe disconnect: CV2X pipe disconnected + */ +#define ETH_EVT_START 0 +#define ETH_EVT_CV2X_PIPE_CONNECTED (ETH_EVT_START + 1) +#define ETH_EVT_CV2X_PIPE_DISCONNECTED (ETH_EVT_CV2X_PIPE_CONNECTED + 1) +#define ETH_EVT_CV2X_MODE_NOT_ENABLED (ETH_EVT_CV2X_PIPE_DISCONNECTED + 1) + +/** + * struct eth_msg_meta - Format of the message meta-data. + * @msg_type: the type of the message + * @rsvd: reserved bits for future use. + * @msg_len: the length of the message in bytes + * + * For push model: + * Client in user-space should issue a read on the device (/dev/emac) with a + * sufficiently large buffer in a continuous loop, call will block when there is + * no message to read. Upon return, client can read the eth_msg_meta from start + * of buffer to find out type and length of message + * size of buffer supplied >= (size of largest message + size of metadata) + * + */ +struct eth_msg_meta { + __u8 msg_type; + __u8 rsvd; + __u16 msg_len; +}; + +#endif /* _UAPI_MSM_ETH_H_ */ From 641be7d2db3518e6ba82e1b46d36aeb0838bb6eb Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 18 Aug 2020 11:39:49 +0800 Subject: [PATCH 072/141] binderfs: use refcount for binder control devices too Binderfs binder-control devices are cleaned up via binderfs_evict_inode too() which will use refcount_dec_and_test(). However, we missed to set the refcount for binderfs binder-control devices and so we underflowed when the binderfs instance got unmounted. Pretty obvious oversight and should have been part of the more general UAF fix. The good news is that having test cases (suprisingly) helps. Technically, we could detect that we're about to cleanup the binder-control dentry in binderfs_evict_inode() and then simply clean it up. But that makes the assumption that the binder driver itself will never make use of a binderfs binder-control device after the binderfs instance it belongs to has been unmounted and the superblock for it been destroyed. While it is unlikely to ever come to this let's be on the safe side. Performance-wise this also really doesn't matter since the binder-control device is only every really when creating the binderfs filesystem or creating additional binder devices. Both operations are pretty rare. Change-Id: Ia2eedd8b11621c9358c372b024ffb776055600f6 Fixes: f0fe2c0f050d ("binder: prevent UAF for binderfs devices II") Link: https://lore.kernel.org/r/CA+G9fYusdfg7PMfC9Xce-xLT7NiyKSbgojpK35GOm=Pf9jXXrA@mail.gmail.com Reported-by: Naresh Kamboju Cc: stable@vger.kernel.org Signed-off-by: Christian Brauner Acked-by: Todd Kjos Link: https://lore.kernel.org/r/20200311105309.1742827-1-christian.brauner@ubuntu.com Signed-off-by: Greg Kroah-Hartman Git-repo: git://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git Git-commit: 211b64e4b5b6bd5fdc19cd525c2cc9a90e6b0ec9 Signed-off-by: Lei wang --- drivers/android/binderfs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index c25ab8bc8b4b..bfdbc9c640ff 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -448,6 +448,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb) inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; + refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->miscdev.minor = minor; From ead6cfb92a656b6ef0099b4a26f54d18a387b243 Mon Sep 17 00:00:00 2001 From: Venkata Rao Kakani Date: Tue, 18 Aug 2020 10:07:07 +0530 Subject: [PATCH 073/141] ARM: dts: msm: disable avb for lv container disable avb for lv lxc container as avb is not supported for lv gvm. Change-Id: Ie76cde5a8f8c5736f3df5313f9ca82baaa49d27e Signed-off-by: Venkata Rao Kakani --- arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dtsi b/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dtsi index d2eceb74af93..98522181a703 100644 --- a/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dtsi @@ -19,6 +19,7 @@ }; }; + /delete-node/ rename_blk; }; &hab { From 87131b3c9eda8748cddf679712785431b5d8cdf7 Mon Sep 17 00:00:00 2001 From: Terence Ho Date: Thu, 13 Aug 2020 16:45:50 -0400 Subject: [PATCH 074/141] msm: ais: restrict cci user interface to VIDEOC_CAM_CONTROL Restrict cci user interface to VIDEOC_CAM_CONTROL. Kernel clients would access via functions directly. Acked-by: Abderahmane Allalou . Change-Id: I607d4a78ad453f4191745d88e942f84bc2488d2c Signed-off-by: Terence Ho --- .../cam_sensor_module/cam_cci/cam_cci_core.c | 19 ++++++---- .../cam_sensor_module/cam_cci/cam_cci_dev.c | 3 -- .../cam_sensor_module/cam_cci/cam_cci_dev.h | 2 - .../cam_sensor_io/cam_sensor_cci_i2c.c | 38 ++++++++++--------- 4 files changed, 33 insertions(+), 29 deletions(-) diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_core.c index 45eb1f449d35..419d748f5f1d 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_core.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_core.c @@ -2015,7 +2015,14 @@ int32_t cam_cci_core_cfg(struct v4l2_subdev *sd, struct cam_cci_ctrl *cci_ctrl) { int32_t rc = 0; - struct cci_device *cci_dev = v4l2_get_subdevdata(sd); + struct cci_device *cci_dev; + + if (sd == NULL || cci_ctrl == NULL) { + CAM_ERR(CAM_CCI, "cci_ctrl or sd null"); + rc = -ENODEV; + return rc; + } + cci_dev = v4l2_get_subdevdata(sd); CAM_DBG(CAM_CCI, "cmd %d", cci_ctrl->cmd); @@ -2073,12 +2080,10 @@ int32_t cam_cci_core_cam_ctrl(struct v4l2_subdev *sd, CAM_DBG(CAM_CCI, "cmd %d", cmd->op_code); - if (cmd->op_code != AIS_SENSOR_I2C_POWER_DOWN) { - if (cmd->handle_type != CAM_HANDLE_USER_POINTER) { - CAM_ERR(CAM_CCI, "Invalid handle type: %d", + if (cmd->handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_CCI, "Invalid handle type: %d", cmd->handle_type); - return -EINVAL; - } + return -EINVAL; } cci_ctrl.cci_info = kzalloc(sizeof(struct cam_sensor_cci_client), @@ -2092,7 +2097,7 @@ int32_t cam_cci_core_cam_ctrl(struct v4l2_subdev *sd, sensor_cap.slot_info = cci_dev->soc_info.index; if (copy_to_user(u64_to_user_ptr(cmd->handle), - &sensor_cap, sizeof(struct cam_sensor_query_cap))) { + &sensor_cap, sizeof(sensor_cap))) { CAM_ERR(CAM_CCI, "Failed Copy to User"); rc = -EFAULT; } diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.c index 4a6edef2f0fb..6f6b7b2890e9 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.c @@ -37,9 +37,6 @@ static long cam_cci_subdev_ioctl(struct v4l2_subdev *sd, } switch (cmd) { - case VIDIOC_MSM_CCI_CFG: - rc = cam_cci_core_cfg(sd, arg); - break; case VIDIOC_CAM_CONTROL: rc = cam_cci_core_cam_ctrl(sd, arg); break; diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.h index 1c21e035447e..3e410b30e0ac 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.h +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.h @@ -324,7 +324,5 @@ static inline struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index) } #endif -#define VIDIOC_MSM_CCI_CFG \ - _IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct cam_cci_ctrl *) #endif /* _CAM_CCI_DEV_H_ */ diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c index 16edb38d0f88..0078142eead3 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018,2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -12,7 +12,7 @@ #include "cam_sensor_cmn_header.h" #include "cam_sensor_i2c.h" -#include "cam_cci_dev.h" +#include "cam_cci_core.h" int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *cci_client, uint32_t addr, uint32_t *data, @@ -36,8 +36,8 @@ int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *cci_client, cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type; cci_ctrl.cfg.cci_i2c_read_cfg.data = buf; cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type; - rc = v4l2_subdev_call(cci_client->cci_subdev, - core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl); + + rc = cam_cci_core_cfg(cci_client->cci_subdev, &cci_ctrl); if (rc < 0) { CAM_ERR(CAM_SENSOR, "rc = %d", rc); return rc; @@ -88,14 +88,18 @@ int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *cci_client, cci_ctrl.cfg.cci_i2c_read_cfg.data = buf; cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte; cci_ctrl.status = -EFAULT; - rc = v4l2_subdev_call(cci_client->cci_subdev, - core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl); - rc = cci_ctrl.status; + + rc = cam_cci_core_cfg(cci_client->cci_subdev, &cci_ctrl); CAM_DBG(CAM_SENSOR, "addr = 0x%x, rc = %d", addr, rc); - for (i = 0; i < num_byte; i++) { - data[i] = buf[i]; - CAM_DBG(CAM_SENSOR, "Byte %d: Data: 0x%x\n", i, data[i]); + + if (!rc) { + for (i = 0; i < num_byte; i++) { + data[i] = buf[i]; + CAM_DBG(CAM_SENSOR, "Byte %d: Data: 0x%x", + i, data[i]); + } } + kfree(buf); return rc; } @@ -124,13 +128,13 @@ static int32_t cam_cci_i2c_write_table_cmd( cci_ctrl.cfg.cci_i2c_write_cfg.data_type = write_setting->data_type; cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = write_setting->addr_type; cci_ctrl.cfg.cci_i2c_write_cfg.size = write_setting->size; - rc = v4l2_subdev_call(cci_client->cci_subdev, - core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl); + + rc = cam_cci_core_cfg(cci_client->cci_subdev, &cci_ctrl); if (rc < 0) { CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc); return rc; } - rc = cci_ctrl.status; + if (write_setting->delay > 20) msleep(write_setting->delay); else if (write_setting->delay) @@ -229,11 +233,11 @@ int32_t cam_sensor_cci_i2c_util(struct cam_sensor_cci_client *cci_client, cci_ctrl.cmd = cci_cmd; cci_ctrl.cci_info = cci_client; - rc = v4l2_subdev_call(cci_client->cci_subdev, - core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl); + + rc = cam_cci_core_cfg(cci_client->cci_subdev, &cci_ctrl); if (rc < 0) { CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc); - return rc; } - return cci_ctrl.status; + + return rc; } From 81eeebf5588feee5fe959c48032abe3196057373 Mon Sep 17 00:00:00 2001 From: Manaf Meethalavalappu Pallikunhi Date: Mon, 17 Aug 2020 23:49:14 +0530 Subject: [PATCH 075/141] ARM: dts: msm: Update pmic alarm thermal zone mitigation configs for GEN3 The pmic alarm sensor generates interrupt and notifies the thermal framework only for 1st stage alarm. But thermal mitigation is enabled only for second stage alarm for GEN3 targets. Since first trip doesn't have mitigation, passive delay polling is also not enabled for this zone. It leads to a case where alarm sensor reaches above second trip, but mitigation is not applied. Add dummy cooling map configuration for first trip threshold for these alarm sensor thermal zones. It activates passive delay polling from first trip violation and it will be active until it clears first trip violation. Change-Id: Iff1b0fcdb5206cb9499e97757b1822bfd2203fdb Signed-off-by: Manaf Meethalavalappu Pallikunhi --- arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi | 9 +++++++ .../boot/dts/qcom/sa8155-pmic-overlay.dtsi | 18 +++++++++++++ arch/arm64/boot/dts/qcom/sa8195-pmic.dtsi | 27 +++++++++++++++++++ 3 files changed, 54 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi b/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi index ccef59f575bc..7b54f7d59b81 100644 --- a/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi @@ -338,6 +338,15 @@ pm6155-1-tz { cooling-maps { + /* + * trip0 cooling map is dummy node to enable + * passive polling on trip0 violation. + */ + trip0_cpu0 { + trip = <&pm6155_trip0>; + cooling-device = <&CPU0 0 0>; + }; + trip1_cpu0 { trip = <&pm6155_trip1>; cooling-device = diff --git a/arch/arm64/boot/dts/qcom/sa8155-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sa8155-pmic-overlay.dtsi index 41df55dbe659..516b606cfcd3 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-pmic-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-pmic-overlay.dtsi @@ -193,6 +193,15 @@ pm8150_1_gpios: &pm8150_gpios { }; cooling-maps { + /* + * trip0 cooling map is dummy node to enable + * passive polling on trip0 violation. + */ + trip0_cpu0 { + trip = <&pm8150_2_trip0>; + cooling-device = <&CPU0 0 0>; + }; + trip1_cpu0 { trip = <&pm8150_2_trip1>; cooling-device = @@ -253,6 +262,15 @@ pm8150_1_gpios: &pm8150_gpios { pm8150_tz { cooling-maps { + /* + * trip0 cooling map is dummy node to enable + * passive polling on trip0 violation. + */ + trip0_cpu0 { + trip = <&pm8150_trip0>; + cooling-device = <&CPU0 0 0>; + }; + trip1_cpu0 { trip = <&pm8150_trip1>; cooling-device = diff --git a/arch/arm64/boot/dts/qcom/sa8195-pmic.dtsi b/arch/arm64/boot/dts/qcom/sa8195-pmic.dtsi index ebeac6853f38..ff1d341d6729 100644 --- a/arch/arm64/boot/dts/qcom/sa8195-pmic.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195-pmic.dtsi @@ -122,6 +122,15 @@ }; cooling-maps { + /* + * trip0 cooling map is dummy node to enable + * passive polling on trip0 violation. + */ + trip0_cpu0 { + trip = <&pm8195_1_trip0>; + cooling-device = <&CPU0 0 0>; + }; + trip1_cpu0 { trip = <&pm8195_1_trip1>; cooling-device = @@ -206,6 +215,15 @@ }; cooling-maps { + /* + * trip0 cooling map is dummy node to enable + * passive polling on trip0 violation. + */ + trip0_cpu0 { + trip = <&pm8195_2_trip0>; + cooling-device = <&CPU0 0 0>; + }; + trip1_cpu0 { trip = <&pm8195_2_trip1>; cooling-device = @@ -290,6 +308,15 @@ }; cooling-maps { + /* + * trip0 cooling map is dummy node to enable + * passive polling on trip0 violation. + */ + trip0_cpu0 { + trip = <&pm8195_3_trip0>; + cooling-device = <&CPU0 0 0>; + }; + trip1_cpu0 { trip = <&pm8195_3_trip1>; cooling-device = From 55a2eec506f40694f89c9622be596c42239ea24e Mon Sep 17 00:00:00 2001 From: Arun Prakash Date: Wed, 12 Aug 2020 16:46:51 +0530 Subject: [PATCH 076/141] rpmsg: glink: Enable irq wake for glink interrupt Enable irq wake option for glink interrupt to wake the target from suspended state in case of any glink interrupt from remote sub system. Change-Id: I30b99c6841a341d49fadf4ee6cf218aeb6fb3a75 Signed-off-by: Arun Prakash --- drivers/rpmsg/qcom_glink_native.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 2b051b54ae4b..b138be016ca5 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -2040,7 +2040,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, if (vm_support) irqflags = IRQF_TRIGGER_RISING; else - irqflags = IRQF_NO_SUSPEND | IRQF_SHARED; + irqflags = IRQF_SHARED; ret = devm_request_irq(dev, irq, qcom_glink_native_intr, @@ -2053,6 +2053,10 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, glink->irq = irq; + ret = enable_irq_wake(irq); + if (ret < 0) + dev_err(dev, "enable_irq_wake() failed on %d\n", irq); + size = of_property_count_u32_elems(dev->of_node, "cpu-affinity"); if (size > 0) { arr = kmalloc_array(size, sizeof(u32), GFP_KERNEL); From 733ab12f97e9a24ff52e07c95e7ca20704de1b2e Mon Sep 17 00:00:00 2001 From: Santosh Dronamraju Date: Tue, 18 Aug 2020 13:45:21 +0530 Subject: [PATCH 077/141] defconfig: Enable new file encryption flags for msmnile New file encryption architecture and hardware support for it are enabled with these flags. Change-Id: Ib5aafaa4be27c8cc0cf1dcbb5891173e8f2e5488 Signed-off-by: Santosh Dronamraju --- arch/arm64/configs/vendor/sa8155-perf_defconfig | 8 ++++++++ arch/arm64/configs/vendor/sa8155_defconfig | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/arch/arm64/configs/vendor/sa8155-perf_defconfig b/arch/arm64/configs/vendor/sa8155-perf_defconfig index b3dc60f70e86..1a8b42da637a 100644 --- a/arch/arm64/configs/vendor/sa8155-perf_defconfig +++ b/arch/arm64/configs/vendor/sa8155-perf_defconfig @@ -52,6 +52,8 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y @@ -278,8 +280,11 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -578,6 +583,8 @@ CONFIG_MSM_PERFORMANCE=y CONFIG_MSM_DRM_TECHPACK=y CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_CDSP_RM=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -608,6 +615,7 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y diff --git a/arch/arm64/configs/vendor/sa8155_defconfig b/arch/arm64/configs/vendor/sa8155_defconfig index 13d5861e2017..b7f96a3555da 100644 --- a/arch/arm64/configs/vendor/sa8155_defconfig +++ b/arch/arm64/configs/vendor/sa8155_defconfig @@ -54,6 +54,8 @@ CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_CFQ_GROUP_IOSCHED=y @@ -291,8 +293,11 @@ CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -611,6 +616,8 @@ CONFIG_MSM_PERFORMANCE=y CONFIG_MSM_DRM_TECHPACK=y CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_CDSP_RM=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -642,6 +649,7 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y From d0d707d0ce1cfc041fe9bb2d8b1d31bf38928b40 Mon Sep 17 00:00:00 2001 From: gkiranku Date: Wed, 29 Jul 2020 18:28:57 +0530 Subject: [PATCH 078/141] msm: kgsl: skip if requested address doesn't fall in the svm range User should not be provided address out of SVM region. Return error for any such requests from user. Change-Id: If149044039b156f8192f405714f5c1a0571004e7 Signed-off-by: gkiranku Signed-off-by: Deepak Kumar --- drivers/gpu/msm/kgsl_iommu.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index ca65e8fc0dfd..a493c4fbe8d8 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -2502,6 +2502,22 @@ static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable, return addr; } +static bool iommu_addr_in_svm_ranges(struct kgsl_iommu_pt *pt, + u64 gpuaddr, u64 size) +{ + if ((gpuaddr >= pt->compat_va_start && gpuaddr < pt->compat_va_end) && + ((gpuaddr + size) > pt->compat_va_start && + (gpuaddr + size) <= pt->compat_va_end)) + return true; + + if ((gpuaddr >= pt->svm_start && gpuaddr < pt->svm_end) && + ((gpuaddr + size) > pt->svm_start && + (gpuaddr + size) <= pt->svm_end)) + return true; + + return false; +} + static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable, uint64_t gpuaddr, uint64_t size) { @@ -2509,9 +2525,8 @@ static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable, struct kgsl_iommu_pt *pt = pagetable->priv; struct rb_node *node; - /* Make sure the requested address doesn't fall in the global range */ - if (ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr) || - ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr + size)) + /* Make sure the requested address doesn't fall out of SVM range */ + if (!iommu_addr_in_svm_ranges(pt, gpuaddr, size)) return -ENOMEM; spin_lock(&pagetable->lock); From 583fa31aae546e4784fdbf8deda6ba3aef6688a5 Mon Sep 17 00:00:00 2001 From: Anirudh Ghayal Date: Wed, 12 Aug 2020 21:05:15 +0530 Subject: [PATCH 079/141] power: qpnp-qg/fg-gen3/gen4: Report TIME_TO_FULL_NOW property Report TIME_TO_FULL_NOW power-supply property required by userspace. Change-Id: I97d37e8feb50e16d02a122211e90bdcede2b7f12 Signed-off-by: Anirudh Ghayal --- drivers/power/supply/qcom/qpnp-fg-gen3.c | 4 ++++ drivers/power/supply/qcom/qpnp-fg-gen4.c | 4 ++++ drivers/power/supply/qcom/qpnp-qg.c | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index 8c4a2ace9f56..7134225ffbd5 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -3663,6 +3663,9 @@ static int fg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: rc = fg_get_time_to_full(fg, &pval->intval); break; + case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: + rc = fg_get_time_to_full(fg, &pval->intval); + break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: rc = fg_get_time_to_empty(fg, &pval->intval); break; @@ -3888,6 +3891,7 @@ static enum power_supply_property fg_psy_props[] = { POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_SOC_REPORTING_READY, POWER_SUPPLY_PROP_DEBUG_BATTERY, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c index 42c0ccf4ba54..85a62678b79b 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen4.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c @@ -4437,6 +4437,9 @@ static int fg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: rc = ttf_get_time_to_full(chip->ttf, &pval->intval); break; + case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: + rc = ttf_get_time_to_full(chip->ttf, &pval->intval); + break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: rc = ttf_get_time_to_empty(chip->ttf, &pval->intval); break; @@ -4621,6 +4624,7 @@ static enum power_supply_property fg_psy_props[] = { POWER_SUPPLY_PROP_DEBUG_BATTERY, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, POWER_SUPPLY_PROP_CC_STEP, POWER_SUPPLY_PROP_CC_STEP_SEL, diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c index e2bff0c0f48c..5651c8c0ccf9 100644 --- a/drivers/power/supply/qcom/qpnp-qg.c +++ b/drivers/power/supply/qcom/qpnp-qg.c @@ -2150,6 +2150,9 @@ static int qg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: rc = ttf_get_time_to_full(chip->ttf, &pval->intval); break; + case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: + rc = ttf_get_time_to_full(chip->ttf, &pval->intval); + break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: rc = ttf_get_time_to_empty(chip->ttf, &pval->intval); break; @@ -2235,6 +2238,7 @@ static enum power_supply_property qg_psy_props[] = { POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, POWER_SUPPLY_PROP_ESR_ACTUAL, POWER_SUPPLY_PROP_ESR_NOMINAL, From f212f6b4648585d3b3df4d887eb1fcb3201b654e Mon Sep 17 00:00:00 2001 From: Anirudh Ghayal Date: Tue, 18 Aug 2020 16:22:37 +0530 Subject: [PATCH 080/141] power: qpnp-smb2/5: Report TIME_TO_FULL_NOW and CHARGE_FULL_DESIGN Report TIME_TO_FULL_NOW and CHARGE_FULL_DESIGN from battery power-supply class property required by userspace. Change-Id: I97d37e8feb50e16d02a122211e90bdcede2b7f13 Signed-off-by: Anirudh Ghayal --- drivers/power/supply/qcom/qpnp-smb2.c | 4 ++++ drivers/power/supply/qcom/qpnp-smb5.c | 10 ++++++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c index b54c20d8c04e..c0e59cec26a6 100644 --- a/drivers/power/supply/qcom/qpnp-smb2.c +++ b/drivers/power/supply/qcom/qpnp-smb2.c @@ -1020,6 +1020,8 @@ static enum power_supply_property smb2_batt_props[] = { POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE, }; @@ -1130,9 +1132,11 @@ static int smb2_batt_get_prop(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CHARGE_COUNTER: case POWER_SUPPLY_PROP_CHARGE_FULL: + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_CYCLE_COUNT: case POWER_SUPPLY_PROP_VOLTAGE_NOW: case POWER_SUPPLY_PROP_TEMP: + case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: rc = smblib_get_prop_from_bms(chg, psp, val); break; case POWER_SUPPLY_PROP_CURRENT_NOW: diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index 176b5258f20f..f0e19d9108fe 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -1587,6 +1587,8 @@ static enum power_supply_property smb5_batt_props[] = { POWER_SUPPLY_PROP_RECHARGE_SOC, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_FORCE_RECHARGE, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE, }; @@ -1724,6 +1726,14 @@ static int smb5_batt_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_FORCE_RECHARGE: val->intval = 0; break; + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + rc = smblib_get_prop_from_bms(chg, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, val); + break; + case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: + rc = smblib_get_prop_from_bms(chg, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, val); + break; case POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE: val->intval = chg->fcc_stepper_enable; break; From ca9ba89e0cca7d34723312b70cfa44678170d103 Mon Sep 17 00:00:00 2001 From: Arun Prakash Date: Fri, 14 Aug 2020 17:07:28 +0530 Subject: [PATCH 081/141] ARM: dts: msm: Disable disp_rsc for sa8155-capture Disp_rsc was added only to meet the dependency for common bus nodes which refers both apps_rsc and disp_rsc. It is not needed for capture kernel. so keeping it disabled. Change-Id: Ic57b832c442c88beb4e7b950a77caf58c826e264 Signed-off-by: Arun Prakash --- arch/arm64/boot/dts/qcom/sa8155-capture.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/qcom/sa8155-capture.dtsi b/arch/arm64/boot/dts/qcom/sa8155-capture.dtsi index b876b34ae0c9..ce36343a5d8d 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-capture.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-capture.dtsi @@ -186,6 +186,7 @@ , , ; + status = "disabled"; }; qmp_aop: qcom,qmp-aop@c300000 { From 98980485aa23863962799ce8378ea367764e3b36 Mon Sep 17 00:00:00 2001 From: Sunil Paidimarri Date: Wed, 12 Aug 2020 16:54:35 -0700 Subject: [PATCH 082/141] ARM: dts: sdxprairie: Update num of tx queues to 4 Change num of queues supported to 4 on emac. Change-Id: Ie4908603956ffb25b5739c3fccfd87739c1492ae Acked-by: Rahul Kawadgave Signed-off-by: Sunil Paidimarri --- arch/arm64/boot/dts/qcom/sdxprairie.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi index 3206654f92a7..06d4c5e2ad92 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi @@ -1459,7 +1459,7 @@ }; mtl_tx_setup: tx-queues-config { - snps,tx-queues-to-use = <5>; + snps,tx-queues-to-use = <4>; snps,tx-sched-sp; queue0 { snps,dcb-algorithm; From ac338cdeb0122d1aabf86abf8f9bce3a8f3b171c Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Mon, 17 Aug 2020 10:58:09 -0700 Subject: [PATCH 083/141] dfc: Enable TX when grant is received When TX is OFF from one client but a positive grant is received from another client, treat it as TX ON. This is to prevent data stall due to a race condition that TX status from different transports could arrive out-of-order. Change-Id: Id257b79569390a47f863d713e89b2c868561af7b Acked-by: Weiyi Chen Signed-off-by: Subash Abhinov Kasiviswanathan --- drivers/soc/qcom/dfc_qmi.c | 25 ++++++++++++++++++------- drivers/soc/qcom/qmi_rmnet_i.h | 1 + 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c index ef4470ea9445..8ca4db08fd5b 100644 --- a/drivers/soc/qcom/dfc_qmi.c +++ b/drivers/soc/qcom/dfc_qmi.c @@ -989,7 +989,8 @@ static u32 dfc_adjust_grant(struct rmnet_bearer_map *bearer, static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos, u8 ack_req, u32 ancillary, struct dfc_flow_status_info_type_v01 *fc_info, - bool is_query) + bool is_query, + int index) { struct rmnet_bearer_map *itm = NULL; int rc = 0; @@ -1010,9 +1011,16 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos, if (itm->rat_switch) return 0; - /* If TX is OFF but we received grant, ignore it */ - if (itm->tx_off && fc_info->num_bytes > 0) - return 0; + /* If TX is OFF but we received grant from the same modem, + * ignore it. If the grant is from a different modem, + * assume TX had become ON. + */ + if (itm->tx_off && fc_info->num_bytes > 0) { + if (itm->tx_status_index == index) + return 0; + itm->tx_off = false; + itm->tx_status_index = index; + } /* Adjuste grant for query */ if (dfc_qmap && is_query) { @@ -1120,7 +1128,7 @@ void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc, else dfc_update_fc_map( dev, qos, ack_req, ancillary, flow_status, - is_query); + is_query, dfc->index); spin_unlock_bh(&qos->qos_lock); } @@ -1131,7 +1139,8 @@ clean_out: static void dfc_update_tx_link_status(struct net_device *dev, struct qos_info *qos, u8 tx_status, - struct dfc_bearer_info_type_v01 *binfo) + struct dfc_bearer_info_type_v01 *binfo, + int index) { struct rmnet_bearer_map *itm = NULL; @@ -1139,6 +1148,8 @@ static void dfc_update_tx_link_status(struct net_device *dev, if (!itm) return; + itm->tx_status_index = index; + /* If no change in tx status, ignore */ if (itm->tx_off == !tx_status) return; @@ -1190,7 +1201,7 @@ void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc, spin_lock_bh(&qos->qos_lock); dfc_update_tx_link_status( - dev, qos, ind->tx_status, bearer_info); + dev, qos, ind->tx_status, bearer_info, dfc->index); spin_unlock_bh(&qos->qos_lock); } diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h index d28c5c99aaee..c87eed4f361d 100644 --- a/drivers/soc/qcom/qmi_rmnet_i.h +++ b/drivers/soc/qcom/qmi_rmnet_i.h @@ -51,6 +51,7 @@ struct rmnet_bearer_map { bool tcp_bidir; bool rat_switch; bool tx_off; + int tx_status_index; u32 ack_txid; u32 mq_idx; u32 ack_mq_idx; From 6b58183d2a29c1dd37f453d86ad3894300adf4dd Mon Sep 17 00:00:00 2001 From: Sunil Paidimarri Date: Wed, 12 Aug 2020 17:48:30 -0700 Subject: [PATCH 084/141] Arm: dts: qsc405: Update num of tx queues to 4 Change num of tx queues supported to 4 on emac. Change-Id: Ie67040f5bdda03954299ed7bd489e0cdc60700bc Acked-by: Ning Cai Signed-off-by: Sunil Paidimarri --- arch/arm64/boot/dts/qcom/qcs405.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi index 45574c4891c6..f3ec33797b6f 100644 --- a/arch/arm64/boot/dts/qcom/qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi @@ -1584,7 +1584,7 @@ }; mtl_tx_setup: tx-queues-config { - snps,tx-queues-to-use = <5>; + snps,tx-queues-to-use = <4>; snps,tx-sched-sp; queue0 { snps,dcb-algorithm; From 6445ed8b088009d9fece6910178e1d88d4fb745d Mon Sep 17 00:00:00 2001 From: Sunil Paidimarri Date: Thu, 16 Jul 2020 19:30:48 -0700 Subject: [PATCH 085/141] ARM: dts: msm: add qmi and v2x over eth support for sa2150p Override mtl_rx_setup and mtl_tx_setup for cv2x. Add qoe_mode and and qoe vlan filtering config. Add cv2x_mode AP and cv2x vlan filtering config. Change-Id: I4e2e466ea6a547894695483508ab286bcfa69626 Acked-by: Ning Cai Signed-off-by: Sunil Paidimarri --- arch/arm64/boot/dts/qcom/qcs405.dtsi | 2 +- arch/arm64/boot/dts/qcom/sa2150p-ccard.dtsi | 28 +++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi index 45574c4891c6..e873c1518f3a 100644 --- a/arch/arm64/boot/dts/qcom/qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi @@ -1611,7 +1611,7 @@ }; }; - ethqos_hw: qcom,ethernet@00020000 { + ethqos_hw: qcom,ethernet@07A80000 { compatible = "qcom,stmmac-ethqos"; reg = <0x07A80000 0x10000>, <0x7A96000 0x100>; diff --git a/arch/arm64/boot/dts/qcom/sa2150p-ccard.dtsi b/arch/arm64/boot/dts/qcom/sa2150p-ccard.dtsi index 29e8955016f2..5988b66988b7 100644 --- a/arch/arm64/boot/dts/qcom/sa2150p-ccard.dtsi +++ b/arch/arm64/boot/dts/qcom/sa2150p-ccard.dtsi @@ -213,11 +213,39 @@ extcon = <&usb2_extcon>; }; +&mtl_rx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +&mtl_tx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + ðqos_hw { status = "okay"; vreg_emac_phy-supply = <&vreg_emac_phy>; vreg_rgmii_io_pads-supply = <&vreg_rgmii_io_pads>; rxc-skew-ps = <0>; + qcom,qoe_mode = <1>; + qcom,qoe-queue = <2>; + qcom,qoe-vlan-offset = <0>; + snps,mtl-rx-config = <&mtl_rx_setup>; + snps,mtl-tx-config = <&mtl_tx_setup>; + qcom,cv2x_mode = <2>; + qcom,cv2x-queue = <3>; + qcom,cv2x-vlan-offset = <1>; pinctrl-names = "dev-emac-mdc", "dev-emac-mdio", "dev-emac-rgmii_txd0_state", "dev-emac-rgmii_txd1_state", From b55966fcdc98b37041ab1112b60a591310cd5877 Mon Sep 17 00:00:00 2001 From: Sunil Paidimarri Date: Tue, 14 Jul 2020 19:59:31 -0700 Subject: [PATCH 086/141] ARM: dts: msm: add qoe and cv2x over eth support for sa515m Override mtl_rx_setup and mtl_tx_setup for cv2x. Add qoe_mode and and qoe vlan filtering config. Add cv2x_mode mdm and cv2x vlan filtering config. Change-Id: If60646883b25e2d217c2bff0f9b52dc7ac0c19a1 Acked-by: Ning Cai Signed-off-by: Sunil Paidimarri --- .../boot/dts/qcom/sa515m-ccard-eth-ep.dts | 31 +++++++++++++++++++ .../boot/dts/qcom/sa515m-v2-ccard-eth-ep.dts | 31 +++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sa515m-ccard-eth-ep.dts b/arch/arm64/boot/dts/qcom/sa515m-ccard-eth-ep.dts index 7523ffe8312f..f00de0ec2110 100644 --- a/arch/arm64/boot/dts/qcom/sa515m-ccard-eth-ep.dts +++ b/arch/arm64/boot/dts/qcom/sa515m-ccard-eth-ep.dts @@ -54,3 +54,34 @@ qcom,default-policy-nop; status = "okay"; }; + +&mtl_rx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +&mtl_tx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +ðqos_hw { + qcom,qoe_mode = <1>; + qcom,qoe-queue = <2>; + qcom,qoe-vlan-offset = <0>; + snps,mtl-rx-config = <&mtl_rx_setup>; + snps,mtl-tx-config = <&mtl_tx_setup>; + qcom,cv2x_mode = <1>; + qcom,cv2x-queue = <3>; + qcom,cv2x-vlan-offset = <1>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa515m-v2-ccard-eth-ep.dts b/arch/arm64/boot/dts/qcom/sa515m-v2-ccard-eth-ep.dts index e0a7d29a6b27..3b2746771558 100644 --- a/arch/arm64/boot/dts/qcom/sa515m-v2-ccard-eth-ep.dts +++ b/arch/arm64/boot/dts/qcom/sa515m-v2-ccard-eth-ep.dts @@ -54,3 +54,34 @@ qcom,default-policy-nop; status = "okay"; }; + +&mtl_rx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +&mtl_tx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +ðqos_hw { + qcom,qoe_mode = <1>; + qcom,qoe-queue = <2>; + qcom,qoe-vlan-offset = <0>; + snps,mtl-rx-config = <&mtl_rx_setup>; + snps,mtl-tx-config = <&mtl_tx_setup>; + qcom,cv2x_mode = <1>; + qcom,cv2x-queue = <3>; + qcom,cv2x-vlan-offset = <1>; +}; From 21f8b5053493d80b47f2eae27b32103d88e56c09 Mon Sep 17 00:00:00 2001 From: Anant Goel Date: Mon, 3 Aug 2020 23:32:18 -0700 Subject: [PATCH 087/141] ARM: dts: msm: Add new QUPv3 SIDs for SA8155 VM Add the new SIDs that are used by the QUPv3 tiles for the SA8155 VM platform. Change-Id: I167ddf9997a649b32a331637a118dc5c8ae962d5 Signed-off-by: Anant Goel --- arch/arm64/boot/dts/qcom/sa8155-vm.dtsi | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sa8155-vm.dtsi b/arch/arm64/boot/dts/qcom/sa8155-vm.dtsi index 6982f506569d..009d2c30df29 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-vm.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-vm.dtsi @@ -1046,3 +1046,19 @@ &tlmm { dirconn-list = <37 216 1>; }; + +&iommu_qupv3_0_geni_se_cb { + iommus = <&apps_smmu 0xd8 0x0>; +}; + +&iommu_qupv3_1_geni_se_cb { + iommus = <&apps_smmu 0x618 0x0>; +}; + +&iommu_qupv3_2_geni_se_cb { + iommus = <&apps_smmu 0x7b8 0x0>; +}; + +&iommu_qupv3_3_geni_se_cb { + iommus = <&apps_smmu 0x4f8 0x0>; +}; From 006eb7beec39501b637a5490f61a64de41d6f39f Mon Sep 17 00:00:00 2001 From: Karthik Rudrapatna Date: Thu, 13 Aug 2020 10:10:48 -0700 Subject: [PATCH 088/141] net : stmmac : random mac addr assignment Assigning mac addr during device boot up in emac probe function. Change-Id: I0937354a1011ed60ee608693c3d6044d85203984 Signed-off-by: Karthik Rudrapatna --- .../stmicro/stmmac/dwmac-qcom-ethqos.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index b6c8996c99b5..1a6ae88ab643 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -73,7 +73,7 @@ int stmmac_enable_ipc_low; char tmp_buff[MAX_PROC_SIZE]; static struct qmp_pkt pkt; static char qmp_buf[MAX_QMP_MSG_SIZE + 1] = {0}; -static struct ip_params pparams = {"", "", "", ""}; +static struct ip_params pparams; static void qcom_ethqos_read_iomacro_por_values(struct qcom_ethqos *ethqos) { @@ -2283,8 +2283,9 @@ static void ethqos_is_ipv6_NW_stack_ready(struct work_struct *work) flush_delayed_work(ðqos->ipv6_addr_assign_wq); } -static int ethqos_set_early_eth_param(struct stmmac_priv *priv, - struct qcom_ethqos *ethqos) +static void ethqos_set_early_eth_param( + struct stmmac_priv *priv, + struct qcom_ethqos *ethqos) { int ret = 0; @@ -2311,12 +2312,7 @@ static int ethqos_set_early_eth_param(struct stmmac_priv *priv, schedule_delayed_work(ðqos->ipv6_addr_assign_wq, msecs_to_jiffies(1000)); } - - if (pparams.is_valid_mac_addr) { - ether_addr_copy(dev_addr, pparams.mac_addr); - memcpy(priv->dev->dev_addr, dev_addr, ETH_ALEN); - } - return ret; + return; } bool qcom_ethqos_ipa_enabled(void) @@ -2511,6 +2507,11 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ndev = dev_get_drvdata(ðqos->pdev->dev); priv = netdev_priv(ndev); + if (pparams.is_valid_mac_addr) { + ether_addr_copy(dev_addr, pparams.mac_addr); + memcpy(priv->dev->dev_addr, dev_addr, ETH_ALEN); + } + if (ethqos->early_eth_enabled) { /* Initialize work*/ INIT_WORK(ðqos->early_eth, From c000ced86942453919b3269c5f368bc9be3de563 Mon Sep 17 00:00:00 2001 From: Karthik Rudrapatna Date: Thu, 13 Aug 2020 10:24:31 -0700 Subject: [PATCH 089/141] arch: arm64 : boot: dts : Removing mac addr entry Removed mac addr entry in dtsi file in order to generate random mac addr. Change-Id: Ibfe7a76953d9ff6eec75a1d13323d7d92a530083 Signed-off-by: Karthik Rudrapatna --- arch/arm64/boot/dts/qcom/qcs405.dtsi | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi index 45574c4891c6..839d8e80f4a5 100644 --- a/arch/arm64/boot/dts/qcom/qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi @@ -1642,7 +1642,6 @@ qcom,bus-vector-names = "0", "10", "100", "1000"; snps,tso; snps,pbl = <32>; - mac-address = [00 55 7B B5 7D f7]; clocks = <&clock_gcc GCC_ETH_AXI_CLK>, <&clock_gcc GCC_ETH_SLAVE_AHB_CLK>, <&clock_gcc GCC_ETH_PTP_CLK>, From 9ab141145ea5c7d4cfa47002ae9b625ef3e773c7 Mon Sep 17 00:00:00 2001 From: Jay Jayanna Date: Fri, 24 Jul 2020 17:04:38 -0700 Subject: [PATCH 090/141] soc: qcom: qrtr: APIs for ethernet transport Provide APIs to be used by the ethernet transport/adaption layer to interface with qrtr-ethernet driver. Ethernet transport/adaption layer will use these APIs to: 1. Let qrtr-ethernet know when the ethernet link is up/down so that endpoint registration/deregistration is done as needed. 2. Provide a function pointer for qrtr-ethernet module to transmit data to the ethernet adaption layer. Change-Id: Ifd1a07aef2fc5cd906128d2bed68574a2ab4c29b Signed-off-by: Jay Jayanna --- include/soc/qcom/qrtr_ethernet.h | 51 ++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 include/soc/qcom/qrtr_ethernet.h diff --git a/include/soc/qcom/qrtr_ethernet.h b/include/soc/qcom/qrtr_ethernet.h new file mode 100644 index 000000000000..023453eaf0b4 --- /dev/null +++ b/include/soc/qcom/qrtr_ethernet.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * QRTR-Ethernet API header + */ + +#ifndef _QRTR_ETHERNET_H +#define _QRTR_ETHERNET_H + +#include + +/** + * qrtr_ethernet_cb_info - struct to pass transport layer information to qrtr + * @eth_send: function pointer to send qrtr packets to transport layer + */ +struct qrtr_ethernet_cb_info { + int (*eth_send)(struct sk_buff *skb); +}; + +/** + * eth_adapt_result - struct to pass on buffer from external ap to qrtr + * buf_addr - address of the buffer that holds the data from external ap + * bytes_xferd - size of the above buffer + */ +struct eth_adapt_result { + void *buf_addr; + size_t bytes_xferd; +}; + +#if IS_ENABLED(CONFIG_QRTR_ETHERNET) || IS_ENABLED(CONFIG_QTI_QRTR_ETHERNET) +void qcom_ethernet_init_cb(struct qrtr_ethernet_cb_info *cbinfo); +void qcom_ethernet_qrtr_dl_cb(struct eth_adapt_result *eth_res); +void qcom_ethernet_qrtr_status_cb(unsigned int event); +#else +static inline void qcom_ethernet_init_cb(struct qrtr_ethernet_cb_info *cbinfo) +{ +} +static inline void qcom_ethernet_qrtr_dl_cb(struct eth_adapt_result *eth_res) {} +static inline void qcom_ethernet_qrtr_status_cb(unsigned int event) {} +#endif /* CONFIG_QRTR_ETHERNET or CONFIG_QTI_QRTR_ETHERNET */ + +#endif /* _QRTR_ETHERNET_H */ From 192c6a05369ebe4ffa2387a306dbba157e9da4d0 Mon Sep 17 00:00:00 2001 From: Yadu MG Date: Mon, 10 Aug 2020 16:50:24 +0800 Subject: [PATCH 091/141] coresight: cti: Move CTI DEVID register read from cti_probe CTI devid register read is currently done in cti_probe. This can be done dynamically inside sysfs callbacks rather than of reading from the probe. Also, we must take into to account when accessing IP's which are in low power state. This patch tries to address both cases by moving cti devid register read inside sysfs callback and by using SMP cross calls. Change-Id: Ic963e21f91e14a17673e77dfef7dc6bab141ee65 Signed-off-by: Yadu MG --- drivers/hwtracing/coresight/coresight-cti.c | 50 +++++++++++++++++---- 1 file changed, 41 insertions(+), 9 deletions(-) diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c index 605e76a0f184..4b67f35fd077 100644 --- a/drivers/hwtracing/coresight/coresight-cti.c +++ b/drivers/hwtracing/coresight/coresight-cti.c @@ -94,8 +94,6 @@ struct cti_drvdata { struct coresight_cti cti; int refcnt; int cpu; - unsigned int trig_num_max; - unsigned int ch_num_max; bool cti_save; bool cti_hwclk; bool l2_off; @@ -1363,14 +1361,52 @@ static ssize_t cti_store_disable_gate(struct device *dev, } static DEVICE_ATTR(disable_gate, 0200, NULL, cti_store_disable_gate); -static ssize_t show_info_show(struct device *dev, struct device_attribute *attr, - char *buf) +struct cti_reg { + void __iomem *addr; + u32 data; +}; + +static void do_smp_cross_read(void *data) +{ + struct cti_reg *reg = data; + + reg->data = readl_relaxed(reg->addr); +} + +static u32 cti_devid_cross_read(const struct cti_drvdata *drvdata) +{ + struct cti_reg reg; + + reg.addr = drvdata->base + DEVID; + smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1); + return reg.data; +} + +static ssize_t show_info_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); ssize_t size = 0; + unsigned int ctidevid, trig_num_max, chan_num_max; + + mutex_lock(&drvdata->mutex); + + pm_runtime_get_sync(drvdata->dev); + + if (drvdata->cpu == -ENODEV) + ctidevid = cti_readl(drvdata, DEVID); + else + ctidevid = cti_devid_cross_read(drvdata); + + pm_runtime_put_sync(drvdata->dev); + + trig_num_max = (ctidevid & GENMASK(15, 8)) >> 8; + chan_num_max = (ctidevid & GENMASK(21, 16)) >> 16; size = scnprintf(&buf[size], PAGE_SIZE, "%d %d\n", - drvdata->trig_num_max, drvdata->ch_num_max); + trig_num_max, chan_num_max); + + mutex_unlock(&drvdata->mutex); return size; } @@ -1433,7 +1469,6 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) { int ret; int trig; - unsigned int ctidevid; struct device *dev = &adev->dev; struct coresight_platform_data *pdata; struct cti_drvdata *drvdata; @@ -1541,9 +1576,6 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) registered++; } - ctidevid = cti_readl(drvdata, DEVID); - drvdata->trig_num_max = (ctidevid & GENMASK(15, 8)) >> 8; - drvdata->ch_num_max = (ctidevid & GENMASK(21, 16)) >> 16; pm_runtime_put(&adev->dev); dev_dbg(dev, "CTI initialized\n"); return 0; From 583d50ff83b82da8da2e7258af84b88ab3318c03 Mon Sep 17 00:00:00 2001 From: Mahesh Reddy Kodidi Date: Fri, 21 Aug 2020 16:30:58 +0530 Subject: [PATCH 092/141] ARM: dts: msm: Changing the pet timeout as per granularity limit With the new timer implementation,there is extra granularity for each level and hence changing the granularity for pet-time * HZ 100 * Level Offset Granularity Range * 0 0 10 ms 0 ms - 630 ms * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s) * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s). Change-Id: I945b2a40aaaffe45ee8e030d5c92a17a73272831 Signed-off-by: Mahesh Reddy Kodidi --- arch/arm64/boot/dts/qcom/sdxprairie.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi index 3206654f92a7..88db3c625473 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi @@ -264,7 +264,7 @@ reg-names = "wdt-base"; interrupts = <1 3 0>, <1 2 0>; qcom,bark-time = <11000>; - qcom,pet-time = <10000>; + qcom,pet-time = <9360>; qcom,wakeup-enable; }; From d72ba76b2792e801c8debda55a7c0aaaf276754f Mon Sep 17 00:00:00 2001 From: Karthik Rudrapatna Date: Wed, 19 Aug 2020 21:41:13 -0700 Subject: [PATCH 093/141] net : stmmac: rgmii clock was not setting to low On Link down event the rgmii clock is not setting to low speed. Rgmii clock should change to 50Mhz when Link down event is received. Change-Id: I62da6683642ee5625aadc793bb8ea72e37e60b37 Signed-off-by: Karthik Rudrapatna --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8a1a05b60e97..51bd0b938879 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -295,8 +295,14 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) */ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) { - if (likely(priv->plat->fix_mac_speed)) - priv->plat->fix_mac_speed(priv->plat->bsp_priv, priv->speed); + if (likely(priv->plat->fix_mac_speed)) { + if (priv->phydev->link) + priv->plat->fix_mac_speed(priv->plat->bsp_priv, + priv->speed); + else + priv->plat->fix_mac_speed(priv->plat->bsp_priv, + SPEED_10); + } } /** @@ -854,6 +860,7 @@ static void stmmac_adjust_link(struct net_device *dev) priv->oldlink = true; } } else if (priv->oldlink) { + stmmac_hw_fix_mac_speed(priv); new_state = true; priv->oldlink = false; priv->speed = SPEED_UNKNOWN; From 17cb32c22633086fae94c88b303c292ae5396b8e Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Sat, 22 Aug 2020 18:36:05 -0700 Subject: [PATCH 094/141] msm: ipa3: fix the unmap logic Ethenet driver npn phy address was same for UL/DL pipes, currently this address was unmapped during DL pipe disconnect where UL pipe still avtive, move the logic to do the unmamp to the last pipe disconnect. Change-Id: I697edb844b8134d2a1d52e3c2bcfd0a7066edb2f Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c index e3be643c3d0c..601ee86cea12 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c @@ -606,7 +606,7 @@ int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, } /* unmap the DL pipe */ - result = ipa3_smmu_map_uc_ntn_pipes(¶ms->dl, false, true); + result = ipa3_smmu_map_uc_ntn_pipes(¶ms->dl, false, false); if (result) { IPAERR("failed to unmap SMMU for DL %d\n", result); goto fail; @@ -627,7 +627,7 @@ int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, } /* unmap the UL pipe */ - result = ipa3_smmu_map_uc_ntn_pipes(¶ms->ul, false, false); + result = ipa3_smmu_map_uc_ntn_pipes(¶ms->ul, false, true); if (result) { IPAERR("failed to unmap SMMU for UL %d\n", result); goto fail; From 15e40516ac513ea51439776e043304bf9c2e6f45 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Thu, 27 Aug 2020 11:05:05 +0530 Subject: [PATCH 095/141] Revert "Revert "lib/string.c: implement a basic bcmp"" This reverts commit 474119c9cd74ffd59cd1f53cefcb20961c1f41d1. Signed-off-by: UtsavBalar1231 --- include/linux/string.h | 3 +++ lib/string.c | 20 ++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/include/linux/string.h b/include/linux/string.h index 1e6dd1b3d232..40318121f97e 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -146,6 +146,9 @@ extern void * memscan(void *,int,__kernel_size_t); #ifndef __HAVE_ARCH_MEMCMP extern int memcmp(const void *,const void *,__kernel_size_t); #endif +#ifndef __HAVE_ARCH_BCMP +extern int bcmp(const void *,const void *,__kernel_size_t); +#endif #ifndef __HAVE_ARCH_MEMCHR extern void * memchr(const void *,int,__kernel_size_t); #endif diff --git a/lib/string.c b/lib/string.c index fbfe0e1b27ae..e5ba3f214d23 100644 --- a/lib/string.c +++ b/lib/string.c @@ -886,6 +886,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count) EXPORT_SYMBOL(memcmp); #endif +#ifndef __HAVE_ARCH_BCMP +/** + * bcmp - returns 0 if and only if the buffers have identical contents. + * @a: pointer to first buffer. + * @b: pointer to second buffer. + * @len: size of buffers. + * + * The sign or magnitude of a non-zero return value has no particular + * meaning, and architectures may implement their own more efficient bcmp(). So + * while this particular implementation is a simple (tail) call to memcmp, do + * not rely on anything but whether the return value is zero or non-zero. + */ +#undef bcmp +int bcmp(const void *a, const void *b, size_t len) +{ + return memcmp(a, b, len); +} +EXPORT_SYMBOL(bcmp); +#endif + #ifndef __HAVE_ARCH_MEMSCAN /** * memscan - Find a character in an area of memory. From 84bb251c0e7b1f493df9b0467fc4d2d2a782e8e1 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Thu, 27 Aug 2020 11:05:14 +0530 Subject: [PATCH 096/141] Revert "Makefile: add -fno-builtin-bcmp" This reverts commit 9764a2f097e2a92bfa5dbef0352769cb9a0a5947. Signed-off-by: UtsavBalar1231 --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 8ce39e9174d6..094b7fd44a12 100644 --- a/Makefile +++ b/Makefile @@ -527,7 +527,6 @@ ifeq ($(ld-name),lld) KBUILD_CFLAGS += -fuse-ld=lld endif CLANG_FLAGS += -fno-builtin-stpcpy -CLANG_FLAGS += -fno-builtin-bcmp KBUILD_CPPFLAGS += -Qunused-arguments endif From fc0c11e2fe1300dca6f28251c2e884197cd476f7 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Thu, 27 Aug 2020 11:05:18 +0530 Subject: [PATCH 097/141] Revert "Makefile: add -fno-builtin-stpcpy" This reverts commit 1570aa51fb5b6bf251cc8e497aa8731b77c0ad5c. Signed-off-by: UtsavBalar1231 --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 094b7fd44a12..5b528c7c8451 100644 --- a/Makefile +++ b/Makefile @@ -526,7 +526,6 @@ export CLANG_FLAGS ifeq ($(ld-name),lld) KBUILD_CFLAGS += -fuse-ld=lld endif -CLANG_FLAGS += -fno-builtin-stpcpy KBUILD_CPPFLAGS += -Qunused-arguments endif From dfaada5971376865656b5e2edf66901ee47a0ae4 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Tue, 25 Aug 2020 06:58:36 -0700 Subject: [PATCH 098/141] lib/string.c: implement stpcpy LLVM implemented a recent "libcall optimization" that lowers calls to `sprintf(dest, "%s", str)` where the return value is used to `stpcpy(dest, str) - dest`. This generally avoids the machinery involved in parsing format strings. `stpcpy` is just like `strcpy` except it returns the pointer to the new tail of `dest`. This optimization was introduced into clang-12. Implement this so that we don't observe linkage failures due to missing symbol definitions for `stpcpy`. Similar to last year's fire drill with: commit 5f074f3e192f ("lib/string.c: implement a basic bcmp") The kernel is somewhere between a "freestanding" environment (no full libc) and "hosted" environment (many symbols from libc exist with the same type, function signature, and semantics). As H. Peter Anvin notes, there's not really a great way to inform the compiler that you're targeting a freestanding environment but would like to opt-in to some libcall optimizations (see pr/47280 below), rather than opt-out. Arvind notes, -fno-builtin-* behaves slightly differently between GCC and Clang, and Clang is missing many __builtin_* definitions, which I consider a bug in Clang and am working on fixing. Masahiro summarizes the subtle distinction between compilers justly: To prevent transformation from foo() into bar(), there are two ways in Clang to do that; -fno-builtin-foo, and -fno-builtin-bar. There is only one in GCC; -fno-buitin-foo. (Any difference in that behavior in Clang is likely a bug from a missing __builtin_* definition.) Masahiro also notes: We want to disable optimization from foo() to bar(), but we may still benefit from the optimization from foo() into something else. If GCC implements the same transform, we would run into a problem because it is not -fno-builtin-bar, but -fno-builtin-foo that disables that optimization. In this regard, -fno-builtin-foo would be more future-proof than -fno-built-bar, but -fno-builtin-foo is still potentially overkill. We may want to prevent calls from foo() being optimized into calls to bar(), but we still may want other optimization on calls to foo(). It seems that compilers today don't quite provide the fine grain control over which libcall optimizations pseudo-freestanding environments would prefer. Finally, Kees notes that this interface is unsafe, so we should not encourage its use. As such, I've removed the declaration from any header, but it still needs to be exported to avoid linkage errors in modules. Cc: stable@vger.kernel.org Link: https://bugs.llvm.org/show_bug.cgi?id=47162 Link: https://bugs.llvm.org/show_bug.cgi?id=47280 Link: https://github.com/ClangBuiltLinux/linux/issues/1126 Link: https://man7.org/linux/man-pages/man3/stpcpy.3.html Link: https://pubs.opengroup.org/onlinepubs/9699919799/functions/stpcpy.html Link: https://reviews.llvm.org/D85963 Suggested-by: Andy Lavr Suggested-by: Arvind Sankar Suggested-by: Joe Perches Suggested-by: Masahiro Yamada Suggested-by: Rasmus Villemoes Reported-by: Sami Tolvanen Signed-off-by: Nick Desaulniers Signed-off-by: Danny Lin Signed-off-by: UtsavBalar1231 --- lib/string.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/lib/string.c b/lib/string.c index e5ba3f214d23..c9fd2c59c30b 100644 --- a/lib/string.c +++ b/lib/string.c @@ -267,7 +267,30 @@ ssize_t strscpy_pad(char *dest, const char *src, size_t count) return written; } -EXPORT_SYMBOL(strscpy_pad); + +/** + * stpcpy - copy a string from src to dest returning a pointer to the new end + * of dest, including src's %NUL-terminator. May overrun dest. + * @dest: pointer to end of string being copied into. Must be large enough + * to receive copy. + * @src: pointer to the beginning of string being copied from. Must not overlap + * dest. + * + * stpcpy differs from strcpy in a key way: the return value is the new + * %NUL-terminated character. (for strcpy, the return value is a pointer to + * src. This interface is considered unsafe as it doesn't perform bounds + * checking of the inputs. As such it's not recommended for usage. Instead, + * its definition is provided in case the compiler lowers other libcalls to + * stpcpy. + */ +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src); +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) +{ + while ((*dest++ = *src++) != '\0') + /* nothing */; + return --dest; +} +EXPORT_SYMBOL(stpcpy); #ifndef __HAVE_ARCH_STRCAT /** From ea0e991731b96f33502d5168e0cb7ec6451eded0 Mon Sep 17 00:00:00 2001 From: Danny Lin Date: Tue, 25 Aug 2020 22:28:23 -0700 Subject: [PATCH 099/141] Makefile: Remove obsolete -fno-builtin flag CAF cherry-picked an obsolete LLVMLinux patch from 2014 in commit 96d47aed062d6b0940aa575d6b5b6cd8ccfdbef9 that introduced this unnecessary flag. Now that the functions used by Clang's libcall optimizations have been implemented, we can allow Clang to perform said optimizations for a minor performance bump in certain code paths. Signed-off-by: Danny Lin Signed-off-by: UtsavBalar1231 --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 5b528c7c8451..1dde0eb2e3bb 100644 --- a/Makefile +++ b/Makefile @@ -793,7 +793,6 @@ ifeq ($(cc-name),clang) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier) -KBUILD_CFLAGS += -fno-builtin KBUILD_CFLAGS += $(call cc-option, -Wno-undefined-optimized) KBUILD_CFLAGS += $(call cc-option, -Wno-tautological-constant-out-of-range-compare) KBUILD_CFLAGS += $(call cc-option, -mllvm -disable-struct-const-merge) From 14950f0ccd5ba9e9e9a017c00ec2f2fe99689503 Mon Sep 17 00:00:00 2001 From: Danny Lin Date: Tue, 25 Aug 2020 01:16:42 -0700 Subject: [PATCH 100/141] drm/msm/sde: Clean up non-60 Hz panel reset code Signed-off-by: Danny Lin Signed-off-by: UtsavBalar1231 --- drivers/gpu/drm/msm/sde/sde_encoder.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 72007a282191..4a0ace1488f8 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -225,6 +225,7 @@ enum sde_enc_rc_states { * @cur_conn_roi: current connector roi * @prv_conn_roi: previous connector roi to optimize if unchanged * @crtc pointer to drm_crtc + * @first_kickoff_done: boolean for whether the first kickoff is done * @recovery_events_enabled: status of hw recovery feature enable by client * @elevated_ahb_vote: increase AHB bus speed for the first frame * after power collapse @@ -285,6 +286,7 @@ struct sde_encoder_virt { struct sde_rect cur_conn_roi; struct sde_rect prv_conn_roi; struct drm_crtc *crtc; + bool first_kickoff_done; bool recovery_events_enabled; bool elevated_ahb_vote; @@ -4753,17 +4755,15 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc, bool is_error) * Trigger a panel reset if this is the first kickoff and the refresh * rate is not 60 Hz */ - if (cmpxchg(&first_run, true, false) && + if (!cmpxchg(&sde_enc->first_kickoff_done, false, true) && sde_enc->crtc->mode.vrefresh != 60) { - struct sde_connector *conn = container_of(phys->connector, struct sde_connector, base); + struct sde_connector *conn = to_sde_connector(phys->connector); struct drm_event event = { .type = DRM_EVENT_PANEL_DEAD, .length = sizeof(bool) }; conn->panel_dead = true; - event.type = DRM_EVENT_PANEL_DEAD; - event.length = sizeof(bool); msm_mode_object_event_notify(&conn->base.base, conn->base.dev, &event, (u8 *) &conn->panel_dead); } From 6431d60afe641af7abd0053b745a4fcd3590576a Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Thu, 27 Aug 2020 23:34:47 +0530 Subject: [PATCH 101/141] teckpack: audio: tfa98xx: read original memtrack data from device Signed-off-by: UtsavBalar1231 --- techpack/audio/asoc/codecs/tfa98xx/src/tfa98xx.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/techpack/audio/asoc/codecs/tfa98xx/src/tfa98xx.c b/techpack/audio/asoc/codecs/tfa98xx/src/tfa98xx.c index c6bb4763e5b7..ff861ad08f09 100644 --- a/techpack/audio/asoc/codecs/tfa98xx/src/tfa98xx.c +++ b/techpack/audio/asoc/codecs/tfa98xx/src/tfa98xx.c @@ -3806,6 +3806,13 @@ static long tfa98xx_misc_device_control_ioctl(struct file *file, /* clear buffer and read livedata from dsp.*/ memset((char *)(&livedata[0]), 0x00, sizeof(livedata)); + /* read original memtrack data from device. */ + if (Tfa98xx_Error_Ok == tfa98xx_read_memtrack_data(tfa98xx, &livedata[0])) { + pr_debug("Device 0x%x read memtrack data sucessed.\n", tfa98xx->i2c->addr); + } else { + pr_err("Device 0x%x read memtrack data failed.\n", tfa98xx->i2c->addr); + } + /* copy data to user spcace. if copied is successed, will be returned actual size to user space. */ livedata_length_bytes = sizeof(int) * MEMTRACK_ITEM_MAX; From 43a2a257cdfb43dcee100e58541ad429cd704305 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 00:16:18 +0530 Subject: [PATCH 102/141] sm8150: fix gcc LTO warnings Signed-off-by: UtsavBalar1231 --- drivers/clk/clk.c | 2 +- drivers/gpu/drm/drm_plane_helper.c | 2 +- drivers/gpu/drm/msm/sde/sde_encoder.c | 2 +- drivers/gpu/msm/kgsl.c | 2 +- drivers/i2c/busses/i2c-qcom-geni.c | 1 - drivers/spi/spi-geni-qcom.c | 1 - drivers/tty/serial/msm_geni_serial.c | 1 - drivers/usb/gadget/function/Makefile | 2 +- net/ipv4/ip_options.c | 2 +- 9 files changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 7ee62043cab9..296e1f263328 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -69,7 +69,7 @@ static struct hlist_head *all_lists[] = { NULL, }; -static struct hlist_head *orphan_list[] = { +static struct hlist_head __maybe_unused *orphan_list[] = { &clk_orphan_list, NULL, }; diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 759ed93f4ba8..cc58505b3eca 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -332,7 +332,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, }; struct drm_connector **connector_list; int num_connectors, ret; - bool visible; + bool visible = false; ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 4a0ace1488f8..6b03e755790a 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -4709,7 +4709,7 @@ static int _sde_encoder_reset_ctl_hw(struct drm_encoder *drm_enc) void sde_encoder_kickoff(struct drm_encoder *drm_enc, bool is_error) { - static bool first_run = true; + static __maybe_unused bool first_run = true; struct sde_encoder_virt *sde_enc; struct sde_encoder_phys *phys; ktime_t wakeup_time; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index c346f4174e41..472d5192d2b3 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -4901,7 +4901,7 @@ int kgsl_device_platform_probe(struct kgsl_device *device) { int status = -EINVAL; struct resource *res; - int cpu; + __maybe_unused int cpu; status = _register_device(device); if (status) diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index dd7ee148b780..a9008cf2dd6b 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -803,7 +803,6 @@ static int geni_i2c_probe(struct platform_device *pdev) struct platform_device *wrapper_pdev; struct device_node *wrapper_ph_node; int ret; - char boot_marker[40]; u32 geni_i2c_clk_map_dt[5]; gi2c = devm_kzalloc(&pdev->dev, sizeof(*gi2c), GFP_KERNEL); diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index 2d5ca183da3a..205920d55e03 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -1577,7 +1577,6 @@ static int spi_geni_probe(struct platform_device *pdev) struct platform_device *wrapper_pdev; struct device_node *wrapper_ph_node; bool rt_pri, slave_en; - char boot_marker[40]; spi = spi_alloc_master(&pdev->dev, sizeof(struct spi_geni_master)); if (!spi) { diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 7ec5da547958..9bbc7cae39d6 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -2409,7 +2409,6 @@ static int msm_geni_serial_probe(struct platform_device *pdev) struct platform_device *wrapper_pdev; struct device_node *wrapper_ph_node; u32 wake_char = 0; - char boot_marker[40]; id = of_match_device(msm_geni_device_tbl, &pdev->dev); if (id) { diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile index 7cd7afe1910e..bdca28e5dfec 100644 --- a/drivers/usb/gadget/function/Makefile +++ b/drivers/usb/gadget/function/Makefile @@ -5,6 +5,7 @@ ccflags-y := -I$(srctree)/drivers/usb/gadget/ ccflags-y += -I$(srctree)/drivers/usb/gadget/udc/ +ccflags-y += -Wno-unused-variable # USB Functions usb_f_acm-y := f_acm.o @@ -33,7 +34,6 @@ obj-$(CONFIG_USB_F_RNDIS) += usb_f_rndis.o usb_f_mass_storage-y := f_mass_storage.o storage_common.o obj-$(CONFIG_USB_F_MASS_STORAGE)+= usb_f_mass_storage.o usb_f_fs-y := f_fs.o -CFLAGS_f_fs.o += $(call cc-disable-warning, unused-variable) obj-$(CONFIG_USB_F_FS) += usb_f_fs.o obj-$(CONFIG_USB_U_AUDIO) += u_audio.o usb_f_uac1-y := f_uac1.o diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 3db31bb9df50..b7fdcde35590 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -496,7 +496,7 @@ void ip_options_undo(struct ip_options *opt) if (opt->srr) { unsigned char *optptr = opt->__data+opt->srr-sizeof(struct iphdr); memmove(optptr+7, optptr+3, optptr[1]-7); - memcpy(optptr+3, &opt->faddr, 4); + memcpy(optptr+3, &opt->faddr, 0); } if (opt->rr_needaddr) { unsigned char *optptr = opt->__data+opt->rr-sizeof(struct iphdr); From 07abce33448013c719583ad277d2f49d1418b24f Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 11:24:23 +0530 Subject: [PATCH 103/141] drm: msm: dsi_parser: Fix strlcpy usage ../drivers/gpu/drm/msm/dsi-staging/dsi_parser.c:245:33: warning: size argument in 'strlcpy' call appears to be size of the source; expected the size of the destination [-Wstrlcpy-strlcat-size] strlcpy(prop->raw, buf, strlen(buf) + 1); Signed-off-by: UtsavBalar1231 --- drivers/gpu/drm/msm/dsi-staging/dsi_parser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_parser.c b/drivers/gpu/drm/msm/dsi-staging/dsi_parser.c index d6e743754da7..94f787bfeb7f 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_parser.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_parser.c @@ -242,7 +242,7 @@ static bool dsi_parser_parse_prop(struct device *dev, if (!prop->raw) goto end; - strlcpy(prop->raw, buf, strlen(buf) + 1); + strlcpy(prop->raw, buf, strlen(prop->raw)); found = true; From 3758df0721c132de5e8e1c82d824def4a4487610 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 10:30:00 +0530 Subject: [PATCH 104/141] ARM64: configs: raphael: Enable HL/TTL nftable targets - allow support to bully your ISP Signed-off-by: UtsavBalar1231 --- arch/arm64/configs/raphael_defconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/configs/raphael_defconfig b/arch/arm64/configs/raphael_defconfig index 4312cff59179..ecbcdac60936 100644 --- a/arch/arm64/configs/raphael_defconfig +++ b/arch/arm64/configs/raphael_defconfig @@ -1048,7 +1048,7 @@ CONFIG_IP_NF_TARGET_REDIRECT=y CONFIG_IP_NF_MANGLE=y # CONFIG_IP_NF_TARGET_CLUSTERIP is not set # CONFIG_IP_NF_TARGET_ECN is not set -# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_TARGET_TTL=y CONFIG_IP_NF_RAW=y CONFIG_IP_NF_SECURITY=y CONFIG_IP_NF_ARPTABLES=y @@ -1075,7 +1075,7 @@ CONFIG_IP6_NF_MATCH_HL=y # CONFIG_IP6_NF_MATCH_MH is not set CONFIG_IP6_NF_MATCH_RPFILTER=y # CONFIG_IP6_NF_MATCH_RT is not set -# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_TARGET_HL=y CONFIG_IP6_NF_FILTER=y CONFIG_IP6_NF_TARGET_REJECT=y # CONFIG_IP6_NF_TARGET_SYNPROXY is not set From 0abb20f803eea37e6b050087060c6d2c134449c7 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Wed, 20 May 2020 11:26:58 +0530 Subject: [PATCH 105/141] ARM64: configs: raphael: Enable Cleancache Signed-off-by: UtsavBalar1231 --- arch/arm64/configs/raphael_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/configs/raphael_defconfig b/arch/arm64/configs/raphael_defconfig index ecbcdac60936..2f18beef1e8f 100644 --- a/arch/arm64/configs/raphael_defconfig +++ b/arch/arm64/configs/raphael_defconfig @@ -589,7 +589,7 @@ CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y # CONFIG_MEMORY_FAILURE is not set # CONFIG_TRANSPARENT_HUGEPAGE is not set # CONFIG_ARCH_WANTS_THP_SWAP is not set -# CONFIG_CLEANCACHE is not set +CONFIG_CLEANCACHE=y # CONFIG_FRONTSWAP is not set CONFIG_CMA=y CONFIG_CMA_AREAS=7 From fda23b62d1c2db5daa2a55006776607601817162 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 10:33:22 +0530 Subject: [PATCH 106/141] ARM64: configs: raphael: Enable generic sound device drivers some external DAC relies on this This reverts commit 88c9604f4768782ef40d15802d64214a20f3ca3c. Signed-off-by: UtsavBalar1231 --- arch/arm64/configs/raphael_defconfig | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/arm64/configs/raphael_defconfig b/arch/arm64/configs/raphael_defconfig index 2f18beef1e8f..f5057689aa89 100644 --- a/arch/arm64/configs/raphael_defconfig +++ b/arch/arm64/configs/raphael_defconfig @@ -3076,13 +3076,18 @@ CONFIG_SND_PROC_FS=y # CONFIG_SND_SEQUENCER is not set # CONFIG_SND_OPL3_LIB_SEQ is not set # CONFIG_SND_OPL4_LIB_SEQ is not set -# CONFIG_SND_DRIVERS is not set +CONFIG_SND_DRIVERS=y +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_ALOOP is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set # # HD-Audio # CONFIG_SND_HDA_PREALLOC_SIZE=64 -# CONFIG_SND_SPI is not set +CONFIG_SND_SPI=y CONFIG_SND_USB=y CONFIG_SND_USB_AUDIO=y # CONFIG_SND_USB_UA101 is not set From 080317a09c9bb324935f277f5b40db054a4d81e4 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 10:33:48 +0530 Subject: [PATCH 107/141] ARM64: configs: raphael: Enable External SOCs Control Support Some external DAC relies on this This reverts commit 8d80225448100e7ec3ae29633076566f73ed6f3d. Signed-off-by: UtsavBalar1231 --- arch/arm64/configs/raphael_defconfig | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/arm64/configs/raphael_defconfig b/arch/arm64/configs/raphael_defconfig index f5057689aa89..c77ca7415813 100644 --- a/arch/arm64/configs/raphael_defconfig +++ b/arch/arm64/configs/raphael_defconfig @@ -4708,7 +4708,14 @@ CONFIG_NVMEM_SPMI_SDAM=y # CONFIG_FSI is not set # CONFIG_TEE is not set CONFIG_SENSORS_SSC=y -# CONFIG_ESOC is not set +CONFIG_ESOC=y +CONFIG_ESOC_DEV=y +CONFIG_ESOC_CLIENT=y +# CONFIG_ESOC_DEBUG is not set +CONFIG_ESOC_MDM_4x=y +CONFIG_ESOC_MDM_DRV=y +CONFIG_ESOC_MDM_DBG_ENG=y +# CONFIG_MDM_DBG_REQ_ENG is not set # # Qualcomm RmNet extensions From 75417a9d9c4b6b71b6e0cafaed04223b43f10cb5 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 16:59:46 +0530 Subject: [PATCH 108/141] Revert "sched: walt: hardcode sched_coloc_downmigrate_ns to 40ms" This increased active power consuption This reverts commit deec7514320fb222f1223bf1a70591914baee2e6. Signed-off-by: UtsavBalar1231 --- kernel/sched/walt.c | 2 +- kernel/sysctl.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 53b5acb6bc32..407d4a8a47c0 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -2662,7 +2662,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp, * Enable colocation and frequency aggregation for all threads in a process. * The children inherits the group id from the parent. */ -unsigned int __read_mostly sysctl_sched_coloc_downmigrate_ns = 400000000; +unsigned int __read_mostly sysctl_sched_coloc_downmigrate_ns; struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID]; static LIST_HEAD(active_related_thread_groups); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b4e966df0fa5..887a8b950179 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -457,7 +457,7 @@ static struct ctl_table kern_table[] = { .procname = "sched_coloc_downmigrate_ns", .data = &sysctl_sched_coloc_downmigrate_ns, .maxlen = sizeof(unsigned int), - .mode = 0444, + .mode = 0644, .proc_handler = proc_douintvec_minmax, }, { From 078f4fdc8323932dd79e4c3291ed3776297811b1 Mon Sep 17 00:00:00 2001 From: Yaroslav Furman Date: Tue, 18 Aug 2020 19:16:53 +0300 Subject: [PATCH 109/141] selinux: avc: fix build with CONFIG_AUDIT=y Signed-off-by: Yaroslav Furman Signed-off-by: Adam W. Willis Signed-off-by: UtsavBalar1231 --- security/selinux/avc.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/security/selinux/avc.c b/security/selinux/avc.c index bf84e966512c..31e6c0c9687a 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -757,6 +757,9 @@ static void avc_audit_pre_callback(struct audit_buffer *ab, void *a) static void avc_audit_post_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; + u32 scontext_len; + int rc; + audit_log_format(ab, " "); avc_dump_query(ab, ad->selinux_audit_data->state, ad->selinux_audit_data->ssid, From 11757f68852ccbd28590c348abe525ed1b645205 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Fri, 28 Aug 2020 17:16:53 +0530 Subject: [PATCH 110/141] kbuild: lto: remove duplicate dependencies from .mod files With LTO, llvm-nm prints out symbols for each archive member separately, which results in a lot of duplicate dependencies in the .mod file when CONFIG_TRIM_UNUSED_SYMS is enabled. When a module consists of several compilation units, the output can exceed the default xargs command size limit and split the dependency list to multiple lines, which results in used symbols getting trimmed. This change removes duplicate dependencies, which will reduce the probability of this happening and makes .mod files smaller and easier to read. Signed-off-by: Sami Tolvanen Signed-off-by: UtsavBalar1231 --- scripts/Makefile.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index dadc24989390..559d7bf30d8f 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -351,7 +351,7 @@ endef # List module undefined symbols (or empty line if not enabled) ifdef CONFIG_TRIM_UNUSED_KSYMS -cmd_undef_syms = $(NM) $@ | sed -n 's/^ \+U //p' | xargs echo +cmd_undef_syms = $(NM) $@ | sed -n 's/^ \+U //p' | sort -u | xargs echo else cmd_undef_syms = echo endif From b62835c05f3a86fc42533c3d06616575778d528f Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:28:54 +0530 Subject: [PATCH 111/141] Revert "defconfig: Enable new file encryption flags for msmnile" This reverts commit 733ab12f97e9a24ff52e07c95e7ca20704de1b2e. Signed-off-by: UtsavBalar1231 --- arch/arm64/configs/vendor/sa8155-perf_defconfig | 8 -------- arch/arm64/configs/vendor/sa8155_defconfig | 8 -------- 2 files changed, 16 deletions(-) diff --git a/arch/arm64/configs/vendor/sa8155-perf_defconfig b/arch/arm64/configs/vendor/sa8155-perf_defconfig index 1a8b42da637a..b3dc60f70e86 100644 --- a/arch/arm64/configs/vendor/sa8155-perf_defconfig +++ b/arch/arm64/configs/vendor/sa8155-perf_defconfig @@ -52,8 +52,6 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y -CONFIG_BLK_INLINE_ENCRYPTION=y -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y @@ -280,11 +278,8 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_CRYPTO=y -CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -583,8 +578,6 @@ CONFIG_MSM_PERFORMANCE=y CONFIG_MSM_DRM_TECHPACK=y CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_CDSP_RM=y -CONFIG_QTI_CRYPTO_COMMON=y -CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -615,7 +608,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y diff --git a/arch/arm64/configs/vendor/sa8155_defconfig b/arch/arm64/configs/vendor/sa8155_defconfig index b7f96a3555da..13d5861e2017 100644 --- a/arch/arm64/configs/vendor/sa8155_defconfig +++ b/arch/arm64/configs/vendor/sa8155_defconfig @@ -54,8 +54,6 @@ CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_BLK_INLINE_ENCRYPTION=y -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_CFQ_GROUP_IOSCHED=y @@ -293,11 +291,8 @@ CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y -CONFIG_SCSI_UFS_CRYPTO=y -CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -616,8 +611,6 @@ CONFIG_MSM_PERFORMANCE=y CONFIG_MSM_DRM_TECHPACK=y CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_CDSP_RM=y -CONFIG_QTI_CRYPTO_COMMON=y -CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -649,7 +642,6 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y -CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y From ea117e33802088f9c9f887940d7efb708b469c77 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:27:45 +0530 Subject: [PATCH 112/141] Revert "dm: default-key: Adapt legacy disk format for new set of arguments" This reverts commit a82c6c2a5789d9348ec9a27d229a1c4e057b7e65. Signed-off-by: UtsavBalar1231 --- drivers/md/dm-default-key.c | 30 +++++------------------------- fs/crypto/policy.c | 4 ---- 2 files changed, 5 insertions(+), 29 deletions(-) diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index dba7543a1bd5..775e9a09a4fc 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -137,11 +137,9 @@ static int default_key_ctr_optional(struct dm_target *ti, return 0; } -static void default_key_adjust_sector_size_and_iv(char **argv, - struct dm_target *ti, - struct default_key_c **dkc, - u8 *raw, u32 size, - bool is_legacy) +void default_key_adjust_sector_size_and_iv(char **argv, struct dm_target *ti, + struct default_key_c **dkc, u8 *raw, + u32 size) { struct dm_dev *dev; int i; @@ -152,7 +150,7 @@ static void default_key_adjust_sector_size_and_iv(char **argv, dev = (*dkc)->dev; - if (is_legacy) { + if (!strcmp(argv[0], "AES-256-XTS")) { memcpy(key_new.bytes, raw, size); for (i = 0; i < ARRAY_SIZE(key_new.words); i++) @@ -186,24 +184,6 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) unsigned long long tmpll; char dummy; int err; - char *_argv[10]; - bool is_legacy = false; - - if (argc >= 4 && !strcmp(argv[0], "AES-256-XTS")) { - argc = 0; - _argv[argc++] = "aes-xts-plain64"; - _argv[argc++] = argv[1]; - _argv[argc++] = "0"; - _argv[argc++] = argv[2]; - _argv[argc++] = argv[3]; - _argv[argc++] = "3"; - _argv[argc++] = "allow_discards"; - _argv[argc++] = "sector_size:4096"; - _argv[argc++] = "iv_large_sectors"; - _argv[argc] = NULL; - argv = _argv; - is_legacy = true; - } if (argc < 5) { ti->error = "Not enough arguments"; @@ -279,7 +259,7 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) } default_key_adjust_sector_size_and_iv(argv, ti, &dkc, raw_key, - raw_key_size, is_legacy); + raw_key_size); dkc->sector_bits = ilog2(dkc->sector_size); if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) { diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 804595aaea03..04d2f531a3a1 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -43,10 +43,6 @@ static bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode) filenames_mode == FSCRYPT_MODE_ADIANTUM) return true; - if (contents_mode == FSCRYPT_MODE_PRIVATE && - filenames_mode == FSCRYPT_MODE_AES_256_CTS) - return true; - return false; } From bdb23afe7f68e9da8585df42f7c9847edd5026d8 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:30:33 +0530 Subject: [PATCH 113/141] Revert "ANDROID: fscrypt: handle direct I/O with IV_INO_LBLK_32" This reverts commit 8700f864c231d92937be95bf19c56b012dfc7f32. Signed-off-by: UtsavBalar1231 --- fs/crypto/crypto.c | 8 ---- fs/crypto/inline_crypt.c | 82 ---------------------------------------- fs/direct-io.c | 10 +---- fs/ext4/inode.c | 9 +++-- fs/f2fs/f2fs.h | 8 +++- include/linux/fscrypt.h | 19 ---------- 6 files changed, 13 insertions(+), 123 deletions(-) diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 02ab7b76d157..8f3dd023ddff 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -67,14 +67,6 @@ void fscrypt_free_bounce_page(struct page *bounce_page) } EXPORT_SYMBOL(fscrypt_free_bounce_page); -/* - * Generate the IV for the given logical block number within the given file. - * For filenames encryption, lblk_num == 0. - * - * Keep this in sync with fscrypt_limit_dio_pages(). fscrypt_limit_dio_pages() - * needs to know about any IV generation methods where the low bits of IV don't - * simply contain the lblk_num (e.g., IV_INO_LBLK_32). - */ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, const struct fscrypt_info *ci) { diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index 69c281a331e5..a58c120d77f1 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "fscrypt_private.h" @@ -430,84 +429,3 @@ bool fscrypt_mergeable_bio_bh(struct bio *bio, return fscrypt_mergeable_bio(bio, inode, next_lblk); } EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); - -/** - * fscrypt_dio_supported() - check whether a direct I/O request is unsupported - * due to encryption constraints - * @iocb: the file and position the I/O is targeting - * @iter: the I/O data segment(s) - * - * Return: true if direct I/O is supported - */ -bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter) -{ - const struct inode *inode = file_inode(iocb->ki_filp); - const struct fscrypt_info *ci = inode->i_crypt_info; - const unsigned int blocksize = i_blocksize(inode); - - /* If the file is unencrypted, no veto from us. */ - if (!fscrypt_needs_contents_encryption(inode)) - return true; - - /* We only support direct I/O with inline crypto, not fs-layer crypto */ - if (!fscrypt_inode_uses_inline_crypto(inode)) - return false; - - /* - * Since the granularity of encryption is filesystem blocks, the I/O - * must be block aligned -- not just disk sector aligned. - */ - if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize)) - return false; - - /* - * With IV_INO_LBLK_32 and sub-page blocks, the DUN can wrap around in - * the middle of a page. This isn't handled by the direct I/O code yet. - */ - if (blocksize != PAGE_SIZE && - (fscrypt_policy_flags(&ci->ci_policy) & - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) - return false; - - return true; -} -EXPORT_SYMBOL_GPL(fscrypt_dio_supported); - -/** - * fscrypt_limit_dio_pages() - limit I/O pages to avoid discontiguous DUNs - * @inode: the file on which I/O is being done - * @pos: the file position (in bytes) at which the I/O is being done - * @nr_pages: the number of pages we want to submit starting at @pos - * - * For direct I/O: limit the number of pages that will be submitted in the bio - * targeting @pos, in order to avoid crossing a data unit number (DUN) - * discontinuity. This is only needed for certain IV generation methods. - * - * This assumes block_size == PAGE_SIZE; see fscrypt_dio_supported(). - * - * Return: the actual number of pages that can be submitted - */ -int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, int nr_pages) -{ - const struct fscrypt_info *ci = inode->i_crypt_info; - u32 dun; - - if (!fscrypt_inode_uses_inline_crypto(inode)) - return nr_pages; - - if (nr_pages <= 1) - return nr_pages; - - if (!(fscrypt_policy_flags(&ci->ci_policy) & - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) - return nr_pages; - - if (WARN_ON_ONCE(i_blocksize(inode) != PAGE_SIZE)) - return 1; - - /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */ - - dun = ci->ci_hashed_ino + (pos >> inode->i_blkbits); - - return min_t(u64, nr_pages, (u64)U32_MAX + 1 - dun); -} diff --git a/fs/direct-io.c b/fs/direct-io.c index 094421f05fda..729c59213d2e 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -815,17 +815,9 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, * current logical offset in the file does not equal what would * be the next logical offset in the bio, submit the bio we * have. - * - * When fscrypt inline encryption is used, data unit number - * (DUN) contiguity is also required. Normally that's implied - * by logical contiguity. However, certain IV generation - * methods (e.g. IV_INO_LBLK_32) don't guarantee it. So, we - * must explicitly check fscrypt_mergeable_bio() too. */ if (sdio->final_block_in_bio != sdio->cur_page_block || - cur_offset != bio_next_offset || - !fscrypt_mergeable_bio(sdio->bio, dio->inode, - cur_offset >> dio->inode->i_blkbits)) + cur_offset != bio_next_offset) dio_bio_submit(dio, sdio); } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index fe84dd8a74bc..096e4cc053dc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3855,9 +3855,12 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ssize_t ret; int rw = iov_iter_rw(iter); - if (!fscrypt_dio_supported(iocb, iter)) - return 0; - + if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode)) { + if (!fscrypt_inode_uses_inline_crypto(inode) || + !IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), + i_blocksize(inode))) + return 0; + } if (fsverity_active(inode)) return 0; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index f55818a8c263..a3529e3e7286 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -4030,8 +4030,12 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int rw = iov_iter_rw(iter); - if (!fscrypt_dio_supported(iocb, iter)) - return true; + if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && f2fs_encrypted_file(inode)) { + if (!fscrypt_inode_uses_inline_crypto(inode) || + !IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), + F2FS_BLKSIZE)) + return true; + } if (fsverity_active(inode)) return true; if (f2fs_is_multi_device(sbi)) diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index db99db6e9458..9f791a4b4ad3 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -532,11 +532,6 @@ extern bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, extern bool fscrypt_mergeable_bio_bh(struct bio *bio, const struct buffer_head *next_bh); -bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter); - -int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, - int nr_pages); - #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) { @@ -569,20 +564,6 @@ static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, { return true; } - -static inline bool fscrypt_dio_supported(struct kiocb *iocb, - struct iov_iter *iter) -{ - const struct inode *inode = file_inode(iocb->ki_filp); - - return !fscrypt_needs_contents_encryption(inode); -} - -static inline int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, - int nr_pages) -{ - return nr_pages; -} #endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ #if IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_DM_DEFAULT_KEY) From 764c27615d3f071ba0677adebd25f88a3de14809 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:30:47 +0530 Subject: [PATCH 114/141] Revert "BACKPORT: FROMLIST: fscrypt: add support for IV_INO_LBLK_32 policies" This reverts commit fb8bfe480c57ef95d9588c1bb5027bcf7867d944. Signed-off-by: UtsavBalar1231 --- Documentation/filesystems/fscrypt.rst | 33 ++--------------- fs/crypto/crypto.c | 6 +-- fs/crypto/fscrypt_private.h | 18 ++------- fs/crypto/inline_crypt.c | 3 -- fs/crypto/keyring.c | 1 - fs/crypto/keysetup.c | 53 +++------------------------ fs/crypto/policy.c | 51 +++++++------------------- include/uapi/linux/fscrypt.h | 3 +- 8 files changed, 29 insertions(+), 139 deletions(-) diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index 7f9a372031fd..dc444b8d3704 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -292,22 +292,8 @@ files' data differently, inode numbers are included in the IVs. Consequently, shrinking the filesystem may not be allowed. This format is optimized for use with inline encryption hardware -compliant with the UFS standard, which supports only 64 IV bits per -I/O request and may have only a small number of keyslots. - -IV_INO_LBLK_32 policies ------------------------ - -IV_INO_LBLK_32 policies work like IV_INO_LBLK_64, except that for -IV_INO_LBLK_32, the inode number is hashed with SipHash-2-4 (where the -SipHash key is derived from the master key) and added to the file -logical block number mod 2^32 to produce a 32-bit IV. - -This format is optimized for use with inline encryption hardware -compliant with the eMMC v5.2 standard, which supports only 32 IV bits -per I/O request and may have only a small number of keyslots. This -format results in some level of IV reuse, so it should only be used -when necessary due to hardware limitations. +compliant with the UFS or eMMC standards, which support only 64 IV +bits per I/O request and may have only a small number of keyslots. Key identifiers --------------- @@ -383,10 +369,6 @@ a little endian number, except that: to 32 bits and is placed in bits 0-31 of the IV. The inode number (which is also limited to 32 bits) is placed in bits 32-63. -- With `IV_INO_LBLK_32 policies`_, the logical block number is limited - to 32 bits and is placed in bits 0-31 of the IV. The inode number - is then hashed and added mod 2^32. - Note that because file logical block numbers are included in the IVs, filesystems must enforce that blocks are never shifted around within encrypted files, e.g. via "collapse range" or "insert range". @@ -483,15 +465,8 @@ This structure must be initialized as follows: (0x3). - FSCRYPT_POLICY_FLAG_DIRECT_KEY: See `DIRECT_KEY policies`_. - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64: See `IV_INO_LBLK_64 - policies`_. - - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32: See `IV_INO_LBLK_32 - policies`_. - - v1 encryption policies only support the PAD_* and DIRECT_KEY flags. - The other flags are only supported by v2 encryption policies. - - The DIRECT_KEY, IV_INO_LBLK_64, and IV_INO_LBLK_32 flags are - mutually exclusive. + policies`_. This is mutually exclusive with DIRECT_KEY and is not + supported on v1 policies. - For v2 encryption policies, ``__reserved`` must be zeroed. diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 8f3dd023ddff..cc8e334165f5 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -82,12 +82,8 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 || ((fscrypt_policy_contents_mode(&ci->ci_policy) == FSCRYPT_MODE_PRIVATE) && inlinecrypt)) { - WARN_ON_ONCE(lblk_num > U32_MAX); - WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX); + WARN_ON_ONCE((u32)lblk_num != lblk_num); lblk_num |= (u64)ci->ci_inode->i_ino << 32; - } else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) { - WARN_ON_ONCE(lblk_num > U32_MAX); - lblk_num = (u32)(ci->ci_hashed_ino + lblk_num); } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); } diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 67bcdfa16095..ae03c7fc7e52 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -245,9 +245,7 @@ struct fscrypt_info { /* This inode's nonce, copied from the fscrypt_context */ u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE]; - - /* Hashed inode number. Only set for IV_INO_LBLK_32 */ - u32 ci_hashed_ino; + u8 ci_raw_key[FSCRYPT_MAX_KEY_SIZE]; }; typedef enum { @@ -319,8 +317,6 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, #define HKDF_CONTEXT_DIRECT_KEY 3 #define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 #define HKDF_CONTEXT_DIRHASH_KEY 5 -#define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 -#define HKDF_CONTEXT_INODE_HASH_KEY 7 extern int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, @@ -517,17 +513,11 @@ struct fscrypt_master_key { struct list_head mk_decrypted_inodes; spinlock_t mk_decrypted_inodes_lock; - /* - * Per-mode encryption keys for the various types of encryption policies - * that use them. Allocated and derived on-demand. - */ + /* Per-mode keys for DIRECT_KEY policies, allocated on-demand */ struct fscrypt_prepared_key mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; - struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; - struct fscrypt_prepared_key mk_iv_ino_lblk_32_keys[__FSCRYPT_MODE_MAX + 1]; - /* Hash key for inode numbers. Initialized only when needed. */ - siphash_key_t mk_ino_hash_key; - bool mk_ino_hash_key_initialized; + /* Per-mode keys for IV_INO_LBLK_64 policies, allocated on-demand */ + struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; } __randomize_layout; diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index a58c120d77f1..f96e2972a003 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -54,9 +54,6 @@ static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) return sizeof(__le64); - if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) - return sizeof(__le32); - /* Default case: IVs are just the file logical block number */ if (sb->s_cop->get_ino_and_lblk_bits) sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index fc9ea71b50f7..9257ea1102b1 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -46,7 +46,6 @@ static void free_master_key(struct fscrypt_master_key *mk) for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]); fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]); - fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_32_keys[i]); } key_put(mk->mk_users); diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 4cac429e7adb..a3626425d633 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -56,8 +56,6 @@ struct fscrypt_mode fscrypt_modes[] = { }, }; -static DEFINE_MUTEX(fscrypt_mode_key_setup_mutex); - static struct fscrypt_mode * select_encryption_mode(const union fscrypt_policy *policy, const struct inode *inode) @@ -188,7 +186,7 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, return 0; } - mutex_lock(&fscrypt_mode_key_setup_mutex); + mutex_lock(&mode_key_setup_mutex); if (fscrypt_is_key_prepared(prep_key, ci)) goto done_unlock; @@ -239,7 +237,7 @@ done_unlock: ci->ci_key = *prep_key; err = 0; out_unlock: - mutex_unlock(&fscrypt_mode_key_setup_mutex); + mutex_unlock(&mode_key_setup_mutex); return err; } @@ -258,53 +256,15 @@ int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, return 0; } -static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci, - struct fscrypt_master_key *mk) -{ - int err; - - err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_32_keys, - HKDF_CONTEXT_IV_INO_LBLK_32_KEY, true); - if (err) - return err; - - /* pairs with smp_store_release() below */ - if (!smp_load_acquire(&mk->mk_ino_hash_key_initialized)) { - - mutex_lock(&fscrypt_mode_key_setup_mutex); - - if (mk->mk_ino_hash_key_initialized) - goto unlock; - - err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, - HKDF_CONTEXT_INODE_HASH_KEY, NULL, 0, - (u8 *)&mk->mk_ino_hash_key, - sizeof(mk->mk_ino_hash_key)); - if (err) - goto unlock; - /* pairs with smp_load_acquire() above */ - smp_store_release(&mk->mk_ino_hash_key_initialized, true); -unlock: - mutex_unlock(&fscrypt_mode_key_setup_mutex); - if (err) - return err; - } - - ci->ci_hashed_ino = (u32)siphash_1u64(ci->ci_inode->i_ino, - &mk->mk_ino_hash_key); - return 0; -} - static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, struct fscrypt_master_key *mk) { int err; if (mk->mk_secret.is_hw_wrapped && - !(ci->ci_policy.v2.flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 | - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))) { + !(ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)) { fscrypt_warn(ci->ci_inode, - "Hardware-wrapped keys are only supported with IV_INO_LBLK policies"); + "Hardware-wrapped keys are only supported with IV_INO_LBLK_64 policies"); return -EINVAL; } @@ -325,14 +285,11 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, * IV_INO_LBLK_64: encryption keys are derived from (master_key, * mode_num, filesystem_uuid), and inode number is included in * the IVs. This format is optimized for use with inline - * encryption hardware compliant with the UFS standard. + * encryption hardware compliant with the UFS or eMMC standards. */ err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_64_keys, HKDF_CONTEXT_IV_INO_LBLK_64_KEY, true); - } else if (ci->ci_policy.v2.flags & - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) { - err = fscrypt_setup_iv_ino_lblk_32_key(ci, mk); } else { u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 04d2f531a3a1..10ccf945020c 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -66,14 +66,18 @@ static bool supported_direct_key_modes(const struct inode *inode, return true; } -static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy, - const struct inode *inode, - const char *type, - int max_ino_bits, int max_lblk_bits) +static bool supported_iv_ino_lblk_64_policy( + const struct fscrypt_policy_v2 *policy, + const struct inode *inode) { struct super_block *sb = inode->i_sb; int ino_bits = 64, lblk_bits = 64; + if (policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { + fscrypt_warn(inode, + "The DIRECT_KEY and IV_INO_LBLK_64 flags are mutually exclusive"); + return false; + } /* * It's unsafe to include inode numbers in the IVs if the filesystem can * potentially renumber inodes, e.g. via filesystem shrinking. @@ -81,22 +85,16 @@ static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy, if (!sb->s_cop->has_stable_inodes || !sb->s_cop->has_stable_inodes(sb)) { fscrypt_warn(inode, - "Can't use %s policy on filesystem '%s' because it doesn't have stable inode numbers", - type, sb->s_id); + "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't have stable inode numbers", + sb->s_id); return false; } if (sb->s_cop->get_ino_and_lblk_bits) sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); - if (ino_bits > max_ino_bits) { + if (ino_bits > 32 || lblk_bits > 32) { fscrypt_warn(inode, - "Can't use %s policy on filesystem '%s' because its inode numbers are too long", - type, sb->s_id); - return false; - } - if (lblk_bits > max_lblk_bits) { - fscrypt_warn(inode, - "Can't use %s policy on filesystem '%s' because its block numbers are too long", - type, sb->s_id); + "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't use 32-bit inode and block numbers", + sb->s_id); return false; } return true; @@ -139,8 +137,6 @@ static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy, static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, const struct inode *inode) { - int count = 0; - if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, policy->filenames_encryption_mode)) { fscrypt_warn(inode, @@ -156,29 +152,13 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, return false; } - count += !!(policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY); - count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64); - count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32); - if (count > 1) { - fscrypt_warn(inode, "Mutually exclusive encryption flags (0x%02x)", - policy->flags); - return false; - } - if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && !supported_direct_key_modes(inode, policy->contents_encryption_mode, policy->filenames_encryption_mode)) return false; if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) && - !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_64", - 32, 32)) - return false; - - if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && - /* This uses hashed inode numbers, so ino_bits doesn't matter. */ - !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32", - INT_MAX, 32)) + !supported_iv_ino_lblk_64_policy(policy, inode)) return false; if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) { @@ -374,9 +354,6 @@ static int set_encryption_policy(struct inode *inode, policy->v2.master_key_identifier); if (err) return err; - if (policy->v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) - pr_warn_once("%s (pid %d) is setting an IV_INO_LBLK_32 encryption policy. This should only be used if there are certain hardware limitations.\n", - current->comm, current->pid); break; default: WARN_ON(1); diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index b2b6ee53d578..b134bfc90912 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -19,8 +19,7 @@ #define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03 #define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 #define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08 -#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 0x10 -#define FSCRYPT_POLICY_FLAGS_VALID 0x1F +#define FSCRYPT_POLICY_FLAGS_VALID 0x0F /* Encryption algorithms */ #define FSCRYPT_MODE_AES_256_XTS 1 From d7f12961eae7116fad30507d7f0fbc9884985567 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:30:59 +0530 Subject: [PATCH 115/141] Revert "ANDROID: fscrypt: set dun_bytes more precisely" This reverts commit 9baaaa3e707b6751ee11338192404ed53b4a3b9d. Signed-off-by: UtsavBalar1231 --- fs/crypto/inline_crypt.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index f96e2972a003..976617112d52 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -44,20 +44,13 @@ static void fscrypt_get_devices(struct super_block *sb, int num_devs, static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) { - struct super_block *sb = ci->ci_inode->i_sb; - unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); - int ino_bits = 64, lblk_bits = 64; + unsigned int dun_bytes = 8; - if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) - return offsetofend(union fscrypt_iv, nonce); + if (fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_DIRECT_KEY) + dun_bytes += FS_KEY_DERIVATION_NONCE_SIZE; - if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) - return sizeof(__le64); - - /* Default case: IVs are just the file logical block number */ - if (sb->s_cop->get_ino_and_lblk_bits) - sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); - return DIV_ROUND_UP(lblk_bits, 8); + return dun_bytes; } /* Enable inline encryption for this file if supported. */ From f7a0aa9c314b42380bcf83c3b3a65f09d3cf500c Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:31:14 +0530 Subject: [PATCH 116/141] Revert "ANDROID: dm-default-key: set dun_bytes more precisely" This reverts commit 9922845d60cbc1b1b8829af84fbf4a63a3879c74. Signed-off-by: UtsavBalar1231 --- drivers/md/dm-default-key.c | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 775e9a09a4fc..0dfeb3d6ded5 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -40,7 +40,6 @@ static const struct dm_default_key_cipher { * @sector_size: crypto sector size in bytes (usually 4096) * @sector_bits: log2(sector_size) * @key: the encryption key to use - * @max_dun: the maximum DUN that may be used (computed from other params) */ struct default_key_c { struct dm_dev *dev; @@ -51,7 +50,6 @@ struct default_key_c { unsigned int sector_bits; struct blk_crypto_key key; bool is_hw_wrapped; - u64 max_dun; }; static const struct dm_default_key_cipher * @@ -180,7 +178,6 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) const struct dm_default_key_cipher *cipher; u8 raw_key[DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE]; unsigned int raw_key_size; - unsigned int dun_bytes; unsigned long long tmpll; char dummy; int err; @@ -268,19 +265,15 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - dkc->max_dun = (dkc->iv_offset + ti->len - 1) >> - (dkc->sector_bits - SECTOR_SHIFT); - dun_bytes = DIV_ROUND_UP(fls64(dkc->max_dun), 8); - - err = blk_crypto_init_key(&dkc->key, raw_key, raw_key_size, + err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size, dkc->is_hw_wrapped, cipher->mode_num, - dun_bytes, dkc->sector_size); + sizeof(u64), dkc->sector_size); if (err) { ti->error = "Error initializing blk-crypto key"; goto bad; } - err = blk_crypto_start_using_mode(cipher->mode_num, dun_bytes, + err = blk_crypto_start_using_mode(cipher->mode_num, sizeof(u64), dkc->sector_size, dkc->is_hw_wrapped, dkc->dev->bdev->bd_queue); if (err) { @@ -342,13 +335,6 @@ static int default_key_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_KILL; dun[0] >>= dkc->sector_bits - SECTOR_SHIFT; /* crypto sectors */ - /* - * This check isn't necessary as we should have calculated max_dun - * correctly, but be safe. - */ - if (WARN_ON_ONCE(dun[0] > dkc->max_dun)) - return DM_MAPIO_KILL; - bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO); return DM_MAPIO_REMAPPED; From cfb28547cefe6008c5018f93cb6eb19edcae74d3 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:31:24 +0530 Subject: [PATCH 117/141] Revert "ANDROID: block: backport the ability to specify max_dun_bytes" This reverts commit 0bae5c190df7a00dcfab788e074ee49876f0b0eb. Signed-off-by: UtsavBalar1231 --- block/blk-crypto.c | 22 +++++----------------- block/keyslot-manager.c | 24 ++---------------------- drivers/md/dm-default-key.c | 6 +++--- drivers/scsi/ufs/ufshcd-crypto-qti.c | 2 -- drivers/scsi/ufs/ufshcd-crypto.c | 1 - fs/crypto/inline_crypt.c | 24 ++---------------------- include/linux/bio-crypt-ctx.h | 28 ---------------------------- include/linux/blk-crypto.h | 2 -- include/linux/keyslot-manager.h | 4 ---- 9 files changed, 12 insertions(+), 101 deletions(-) diff --git a/block/blk-crypto.c b/block/blk-crypto.c index e07a37cf8b5f..f56bbec1132f 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -108,10 +108,9 @@ int blk_crypto_submit_bio(struct bio **bio_ptr) /* Get device keyslot if supported */ if (keyslot_manager_crypto_mode_supported(q->ksm, - bc->bc_key->crypto_mode, - blk_crypto_key_dun_bytes(bc->bc_key), - bc->bc_key->data_unit_size, - bc->bc_key->is_hw_wrapped)) { + bc->bc_key->crypto_mode, + bc->bc_key->data_unit_size, + bc->bc_key->is_hw_wrapped)) { err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm); if (!err) return 0; @@ -181,8 +180,6 @@ bool blk_crypto_endio(struct bio *bio) * @is_hw_wrapped has to be set for such keys) * @is_hw_wrapped: Denotes @raw_key is wrapped. * @crypto_mode: identifier for the encryption algorithm to use - * @dun_bytes: number of bytes that will be used to specify the DUN when this - * key is used * @data_unit_size: the data unit size to use for en/decryption * * Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error. When @@ -192,12 +189,10 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, unsigned int raw_key_size, bool is_hw_wrapped, enum blk_crypto_mode_num crypto_mode, - unsigned int dun_bytes, unsigned int data_unit_size) { const struct blk_crypto_mode *mode; static siphash_key_t hash_key; - u32 hash; memset(blk_key, 0, sizeof(*blk_key)); @@ -216,9 +211,6 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, return -EINVAL; } - if (dun_bytes <= 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE) - return -EINVAL; - if (!is_power_of_2(data_unit_size)) return -EINVAL; @@ -235,8 +227,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, * precomputed here so that it only needs to be computed once per key. */ get_random_once(&hash_key, sizeof(hash_key)); - hash = (u32)siphash(raw_key, raw_key_size, &hash_key); - blk_crypto_key_set_hash_and_dun_bytes(blk_key, hash, dun_bytes); + blk_key->hash = siphash(raw_key, raw_key_size, &hash_key); return 0; } @@ -245,7 +236,6 @@ EXPORT_SYMBOL_GPL(blk_crypto_init_key); /** * blk_crypto_start_using_mode() - Start using blk-crypto on a device * @crypto_mode: the crypto mode that will be used - * @dun_bytes: number of bytes that will be used to specify the DUN * @data_unit_size: the data unit size that will be used * @is_hw_wrapped_key: whether the key will be hardware-wrapped * @q: the request queue for the device @@ -259,13 +249,12 @@ EXPORT_SYMBOL_GPL(blk_crypto_init_key); * algorithm is disabled in the crypto API; or another -errno code. */ int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, - unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key, struct request_queue *q) { if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode, - dun_bytes, data_unit_size, + data_unit_size, is_hw_wrapped_key)) return 0; if (is_hw_wrapped_key) { @@ -296,7 +285,6 @@ int blk_crypto_evict_key(struct request_queue *q, { if (q->ksm && keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode, - blk_crypto_key_dun_bytes(key), key->data_unit_size, key->is_hw_wrapped)) return keyslot_manager_evict_key(q->ksm, key); diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c index 13d34b857625..1999c503b954 100644 --- a/block/keyslot-manager.c +++ b/block/keyslot-manager.c @@ -45,7 +45,6 @@ struct keyslot_manager { struct keyslot_mgmt_ll_ops ksm_ll_ops; unsigned int features; unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX]; - unsigned int max_dun_bytes_supported; void *ll_priv_data; /* Protects programming and evicting keys from the device */ @@ -124,7 +123,6 @@ struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, ksm->features = features; memcpy(ksm->crypto_mode_supported, crypto_mode_supported, sizeof(ksm->crypto_mode_supported)); - ksm->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; ksm->ll_priv_data = ll_priv_data; init_rwsem(&ksm->lock); @@ -156,19 +154,11 @@ err_free_ksm: } EXPORT_SYMBOL_GPL(keyslot_manager_create); -void keyslot_manager_set_max_dun_bytes(struct keyslot_manager *ksm, - unsigned int max_dun_bytes) -{ - ksm->max_dun_bytes_supported = max_dun_bytes; -} -EXPORT_SYMBOL_GPL(keyslot_manager_set_max_dun_bytes); - static inline struct hlist_head * hash_bucket_for_key(struct keyslot_manager *ksm, const struct blk_crypto_key *key) { - return &ksm->slot_hashtable[blk_crypto_key_hash(key) & - (ksm->slot_hashtable_size - 1)]; + return &ksm->slot_hashtable[key->hash & (ksm->slot_hashtable_size - 1)]; } static void remove_slot_from_lru_list(struct keyslot_manager *ksm, int slot) @@ -341,7 +331,6 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot) * combination is supported by a ksm. * @ksm: The keyslot manager to check * @crypto_mode: The crypto mode to check for. - * @dun_bytes: The number of bytes that will be used to specify the DUN * @data_unit_size: The data_unit_size for the mode. * @is_hw_wrapped_key: Whether a hardware-wrapped key will be used. * @@ -353,7 +342,6 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot) */ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, enum blk_crypto_mode_num crypto_mode, - unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key) { @@ -370,10 +358,7 @@ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, if (!(ksm->features & BLK_CRYPTO_FEATURE_STANDARD_KEYS)) return false; } - if (!(ksm->crypto_mode_supported[crypto_mode] & data_unit_size)) - return false; - - return ksm->max_dun_bytes_supported >= dun_bytes; + return ksm->crypto_mode_supported[crypto_mode] & data_unit_size; } /** @@ -516,7 +501,6 @@ struct keyslot_manager *keyslot_manager_create_passthrough( ksm->features = features; memcpy(ksm->crypto_mode_supported, crypto_mode_supported, sizeof(ksm->crypto_mode_supported)); - ksm->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; ksm->ll_priv_data = ll_priv_data; init_rwsem(&ksm->lock); @@ -543,16 +527,12 @@ void keyslot_manager_intersect_modes(struct keyslot_manager *parent, unsigned int i; parent->features &= child->features; - parent->max_dun_bytes_supported = - min(parent->max_dun_bytes_supported, - child->max_dun_bytes_supported); for (i = 0; i < ARRAY_SIZE(child->crypto_mode_supported); i++) { parent->crypto_mode_supported[i] &= child->crypto_mode_supported[i]; } } else { parent->features = 0; - parent->max_dun_bytes_supported = 0; memset(parent->crypto_mode_supported, 0, sizeof(parent->crypto_mode_supported)); } diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 0dfeb3d6ded5..c1fe775ef9d5 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -267,14 +267,14 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size, dkc->is_hw_wrapped, cipher->mode_num, - sizeof(u64), dkc->sector_size); + dkc->sector_size); if (err) { ti->error = "Error initializing blk-crypto key"; goto bad; } - err = blk_crypto_start_using_mode(cipher->mode_num, sizeof(u64), - dkc->sector_size, dkc->is_hw_wrapped, + err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size, + dkc->is_hw_wrapped, dkc->dev->bdev->bd_queue); if (err) { ti->error = "Error starting to use blk-crypto"; diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.c b/drivers/scsi/ufs/ufshcd-crypto-qti.c index cfae1e5dede2..f06f2899dcac 100644 --- a/drivers/scsi/ufs/ufshcd-crypto-qti.c +++ b/drivers/scsi/ufs/ufshcd-crypto-qti.c @@ -245,8 +245,6 @@ static int ufshcd_hba_init_crypto_qti_spec(struct ufs_hba *hba, err = -ENOMEM; goto out; } - keyslot_manager_set_max_dun_bytes(hba->ksm, sizeof(u64)); - pr_debug("%s: keyslot manager created\n", __func__); return 0; diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c index 240745526135..4fb86fbf097e 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.c +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -346,7 +346,6 @@ int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba, err = -ENOMEM; goto out_free_caps; } - keyslot_manager_set_max_dun_bytes(hba->ksm, sizeof(u64)); return 0; diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index 976617112d52..e1bbaeff1c43 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -42,17 +42,6 @@ static void fscrypt_get_devices(struct super_block *sb, int num_devs, sb->s_cop->get_devices(sb, devs); } -static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) -{ - unsigned int dun_bytes = 8; - - if (fscrypt_policy_flags(&ci->ci_policy) & - FSCRYPT_POLICY_FLAG_DIRECT_KEY) - dun_bytes += FS_KEY_DERIVATION_NONCE_SIZE; - - return dun_bytes; -} - /* Enable inline encryption for this file if supported. */ int fscrypt_select_encryption_impl(struct fscrypt_info *ci, bool is_hw_wrapped_key) @@ -60,7 +49,6 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci, const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; - unsigned int dun_bytes; struct request_queue **devs; int num_devs; int i; @@ -96,12 +84,9 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci, fscrypt_get_devices(sb, num_devs, devs); - dun_bytes = fscrypt_get_dun_bytes(ci); - for (i = 0; i < num_devs; i++) { if (!keyslot_manager_crypto_mode_supported(devs[i]->ksm, crypto_mode, - dun_bytes, sb->s_blocksize, is_hw_wrapped_key)) goto out_free_devs; @@ -122,7 +107,6 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; - unsigned int dun_bytes; int num_devs; int queue_refs = 0; struct fscrypt_blk_crypto_key *blk_key; @@ -140,14 +124,11 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, blk_key->num_devs = num_devs; fscrypt_get_devices(sb, num_devs, blk_key->devs); - dun_bytes = fscrypt_get_dun_bytes(ci); - BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE); err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size, - is_hw_wrapped, crypto_mode, dun_bytes, - sb->s_blocksize); + is_hw_wrapped, crypto_mode, sb->s_blocksize); if (err) { fscrypt_err(inode, "error %d initializing blk-crypto key", err); goto fail; @@ -168,8 +149,7 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, } queue_refs++; - err = blk_crypto_start_using_mode(crypto_mode, dun_bytes, - sb->s_blocksize, + err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize, is_hw_wrapped, blk_key->devs[i]); if (err) { diff --git a/include/linux/bio-crypt-ctx.h b/include/linux/bio-crypt-ctx.h index 45d331bcc2e4..d10c5ad5e07e 100644 --- a/include/linux/bio-crypt-ctx.h +++ b/include/linux/bio-crypt-ctx.h @@ -45,15 +45,7 @@ struct blk_crypto_key { unsigned int data_unit_size; unsigned int data_unit_size_bits; unsigned int size; - - /* - * Hack to avoid breaking KMI: pack both hash and dun_bytes into the - * hash field... - */ -#define BLK_CRYPTO_KEY_HASH_MASK 0xffffff -#define BLK_CRYPTO_KEY_DUN_BYTES_SHIFT 24 unsigned int hash; - bool is_hw_wrapped; u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; }; @@ -61,26 +53,6 @@ struct blk_crypto_key { #define BLK_CRYPTO_MAX_IV_SIZE 32 #define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE/sizeof(u64)) -static inline void -blk_crypto_key_set_hash_and_dun_bytes(struct blk_crypto_key *key, - u32 hash, unsigned int dun_bytes) -{ - key->hash = (dun_bytes << BLK_CRYPTO_KEY_DUN_BYTES_SHIFT) | - (hash & BLK_CRYPTO_KEY_HASH_MASK); -} - -static inline u32 -blk_crypto_key_hash(const struct blk_crypto_key *key) -{ - return key->hash & BLK_CRYPTO_KEY_HASH_MASK; -} - -static inline unsigned int -blk_crypto_key_dun_bytes(const struct blk_crypto_key *key) -{ - return key->hash >> BLK_CRYPTO_KEY_DUN_BYTES_SHIFT; -} - /** * struct bio_crypt_ctx - an inline encryption context * @bc_key: the key, algorithm, and data unit size to use diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h index 6062002555e1..7dc478a8c3ed 100644 --- a/include/linux/blk-crypto.h +++ b/include/linux/blk-crypto.h @@ -20,11 +20,9 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, unsigned int raw_key_size, bool is_hw_wrapped, enum blk_crypto_mode_num crypto_mode, - unsigned int dun_bytes, unsigned int data_unit_size); int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, - unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key, struct request_queue *q); diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h index 57000863beb7..f022bd6d2497 100644 --- a/include/linux/keyslot-manager.h +++ b/include/linux/keyslot-manager.h @@ -56,9 +56,6 @@ struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data); -void keyslot_manager_set_max_dun_bytes(struct keyslot_manager *ksm, - unsigned int max_dun_bytes); - int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm, const struct blk_crypto_key *key); @@ -68,7 +65,6 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot); bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, enum blk_crypto_mode_num crypto_mode, - unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key); From d2ac4991b52bbb992f27fb70fec376e5213b4888 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:32:02 +0530 Subject: [PATCH 118/141] Revert "Use correct endianness for encryption keys" This reverts commit a78753197b7cfd67d4c929075d55504317fcf637. Signed-off-by: UtsavBalar1231 --- drivers/md/dm-default-key.c | 18 ++---------------- drivers/soc/qcom/crypto-qti-tz.c | 13 +------------ fs/crypto/keysetup_v1.c | 12 +----------- 3 files changed, 4 insertions(+), 39 deletions(-) diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index c1fe775ef9d5..4b47f25a257e 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -136,26 +136,13 @@ static int default_key_ctr_optional(struct dm_target *ti, } void default_key_adjust_sector_size_and_iv(char **argv, struct dm_target *ti, - struct default_key_c **dkc, u8 *raw, - u32 size) + struct default_key_c **dkc) { struct dm_dev *dev; - int i; - union { - u8 bytes[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; - u32 words[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE / sizeof(u32)]; - } key_new; dev = (*dkc)->dev; if (!strcmp(argv[0], "AES-256-XTS")) { - memcpy(key_new.bytes, raw, size); - - for (i = 0; i < ARRAY_SIZE(key_new.words); i++) - __cpu_to_be32s(&key_new.words[i]); - - memcpy(raw, key_new.bytes, size); - if (ti->len & (((*dkc)->sector_size >> SECTOR_SHIFT) - 1)) (*dkc)->sector_size = SECTOR_SIZE; @@ -255,8 +242,7 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - default_key_adjust_sector_size_and_iv(argv, ti, &dkc, raw_key, - raw_key_size); + default_key_adjust_sector_size_and_iv(argv, ti, &dkc); dkc->sector_bits = ilog2(dkc->sector_size); if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) { diff --git a/drivers/soc/qcom/crypto-qti-tz.c b/drivers/soc/qcom/crypto-qti-tz.c index 0ebdd1a1c9f8..154a08389274 100644 --- a/drivers/soc/qcom/crypto-qti-tz.c +++ b/drivers/soc/qcom/crypto-qti-tz.c @@ -35,21 +35,10 @@ int crypto_qti_program_key(struct crypto_vops_qti_entry *ice_entry, uint32_t smc_id = 0; char *tzbuf = NULL; struct scm_desc desc = {0}; - int i; - union { - u8 bytes[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; - u32 words[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE / sizeof(u32)]; - } key_new; tzbuf = ice_buffer; - memcpy(key_new.bytes, key->raw, key->size); - if (!key->is_hw_wrapped) { - for (i = 0; i < ARRAY_SIZE(key_new.words); i++) - __cpu_to_be32s(&key_new.words[i]); - } - - memcpy(tzbuf, key_new.bytes, key->size); + memcpy(tzbuf, key->raw, key->size); dmac_flush_range(tzbuf, tzbuf + key->size); smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID; diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 38e54313653d..ac549eeb1444 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -25,7 +25,6 @@ #include #include #include -#include #include "fscrypt_private.h" @@ -269,23 +268,14 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci, { u8 *derived_key; int err; - int i; - union { - u8 bytes[FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE]; - u32 words[FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE / sizeof(u32)]; - } key_new; /*Support legacy ice based content encryption mode*/ if ((fscrypt_policy_contents_mode(&ci->ci_policy) == FSCRYPT_MODE_PRIVATE) && fscrypt_using_inline_encryption(ci)) { - memcpy(key_new.bytes, raw_master_key, ci->ci_mode->keysize); - - for (i = 0; i < ARRAY_SIZE(key_new.words); i++) - __cpu_to_be32s(&key_new.words[i]); err = fscrypt_prepare_inline_crypt_key(&ci->ci_key, - key_new.bytes, + raw_master_key, ci->ci_mode->keysize, false, ci); From 7ad73e130644155f6b571c44c30fff614aa38ea8 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:51 +0530 Subject: [PATCH 119/141] Revert "ARM: dts: Make crypto address part of host controller node" This reverts commit 4abe03d70b5af292fb823b1f250989510aa99232. Signed-off-by: UtsavBalar1231 --- arch/arm64/boot/dts/qcom/atoll.dtsi | 9 +++++---- arch/arm64/boot/dts/qcom/sdmmagpie.dtsi | 9 +++++---- arch/arm64/boot/dts/qcom/sm6150.dtsi | 9 +++++---- arch/arm64/boot/dts/qcom/sm8150.dtsi | 5 +++-- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/atoll.dtsi b/arch/arm64/boot/dts/qcom/atoll.dtsi index 4f2f8b1c7b09..3c8b470205c1 100644 --- a/arch/arm64/boot/dts/qcom/atoll.dtsi +++ b/arch/arm64/boot/dts/qcom/atoll.dtsi @@ -2679,12 +2679,13 @@ sdhc_1: sdhci@7c4000 { compatible = "qcom,sdhci-msm-v5"; - reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>, <0x7c8000 0x8000>; - reg-names = "hc_mem", "cmdq_mem", "cmdq_ice"; + reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>; + reg-names = "hc_mem", "cmdq_mem"; interrupts = , ; interrupt-names = "hc_irq", "pwr_irq"; + sdhc-msm-crypto = <&sdcc1_ice>; qcom,bus-width = <8>; qcom,large-address-bus; @@ -2834,11 +2835,11 @@ ufshc_mem: ufshc@1d84000 { compatible = "qcom,ufshc"; - reg = <0x1d84000 0x3000>, <0x1d90000 0x8000>; - reg-names = "ufs_mem", "ufs_ice"; + reg = <0x1d84000 0x3000>; interrupts = <0 265 0>; phys = <&ufsphy_mem>; phy-names = "ufsphy"; + ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <1>; dev-ref-clk-freq = <0>; /* 19.2 MHz */ diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi index da9805d4cd81..3259d7cccc7e 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi @@ -2121,12 +2121,13 @@ sdhc_1: sdhci@7c4000 { compatible = "qcom,sdhci-msm-v5"; - reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>, <0x7C8000 0x8000>; - reg-names = "hc_mem", "cmdq_mem", "cmdq_ice"; + reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>; + reg-names = "hc_mem", "cmdq_mem"; interrupts = , ; interrupt-names = "hc_irq", "pwr_irq"; + sdhc-msm-crypto = <&sdcc1_ice>; qcom,bus-width = <8>; qcom,large-address-bus; @@ -2338,11 +2339,11 @@ ufshc_mem: ufshc@1d84000 { compatible = "qcom,ufshc"; - reg = <0x1d84000 0x3000>, <0x1d90000 0x8000>; - reg-names = "ufs_mem", "ufs_ice"; + reg = <0x1d84000 0x3000>; interrupts = <0 265 0>; phys = <&ufsphy_mem>; phy-names = "ufsphy"; + ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <1>; dev-ref-clk-freq = <0>; /* 19.2 MHz */ diff --git a/arch/arm64/boot/dts/qcom/sm6150.dtsi b/arch/arm64/boot/dts/qcom/sm6150.dtsi index 6822b9dc4ce0..ee7812be6fcc 100644 --- a/arch/arm64/boot/dts/qcom/sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150.dtsi @@ -1409,11 +1409,12 @@ sdhc_1: sdhci@7c4000 { compatible = "qcom,sdhci-msm-v5"; - reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>, <0x7C8000 0x8000>; - reg-names = "hc_mem", "cmdq_mem", "cmdq_ice"; + reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>; + reg-names = "hc_mem", "cmdq_mem"; interrupts = <0 641 0>, <0 644 0>; interrupt-names = "hc_irq", "pwr_irq"; + sdhc-msm-crypto = <&sdcc1_ice>; qcom,bus-width = <8>; qcom,large-address-bus; @@ -1622,11 +1623,11 @@ ufshc_mem: ufshc@1d84000 { compatible = "qcom,ufshc"; - reg = <0x1d84000 0x3000>, <0x1d90000 0x8000>; - reg-names = "ufs_mem", "ufs_ice"; + reg = <0x1d84000 0x3000>; interrupts = <0 265 0>; phys = <&ufsphy_mem>; phy-names = "ufsphy"; + ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <1>; dev-ref-clk-freq = <0>; /* 19.2 MHz */ diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 8428b869d768..bcc15b5f454e 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -2281,6 +2281,7 @@ reg = <0x1d87000 0xda8>; /* PHY regs */ reg-names = "phy_mem"; #phy-cells = <0>; + ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <2>; @@ -2296,11 +2297,11 @@ ufshc_mem: ufshc@1d84000 { compatible = "qcom,ufshc"; - reg = <0x1d84000 0x2500>, <0x1d90000 0x8000>; - reg-names = "ufs_mem", "ufs_ice"; + reg = <0x1d84000 0x2500>; interrupts = <0 265 0>; phys = <&ufsphy_mem>; phy-names = "ufsphy"; + ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <2>; dev-ref-clk-freq = <0>; /* 19.2 MHz */ From d6d7cc1b300910be55ea5bf320c49a529af04f9b Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:52 +0530 Subject: [PATCH 120/141] Revert "defconfig: Enable new file encryption flags" This reverts commit 162c3e7e6bdfa29163ab15fb32e02bdb2ffd71e4. Signed-off-by: UtsavBalar1231 --- arch/arm64/configs/vendor/atoll-perf_defconfig | 10 ---------- arch/arm64/configs/vendor/atoll_defconfig | 10 ---------- arch/arm64/configs/vendor/sdmsteppe-perf_defconfig | 10 ---------- arch/arm64/configs/vendor/sdmsteppe_defconfig | 10 ---------- arch/arm64/configs/vendor/sm8150-perf_defconfig | 8 -------- arch/arm64/configs/vendor/sm8150_defconfig | 8 -------- 6 files changed, 56 deletions(-) diff --git a/arch/arm64/configs/vendor/atoll-perf_defconfig b/arch/arm64/configs/vendor/atoll-perf_defconfig index 1298d7b34fba..8703fffe9ba7 100644 --- a/arch/arm64/configs/vendor/atoll-perf_defconfig +++ b/arch/arm64/configs/vendor/atoll-perf_defconfig @@ -52,8 +52,6 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y -CONFIG_BLK_INLINE_ENCRYPTION=y -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y @@ -282,12 +280,9 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_CRYPTO=y -CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -513,8 +508,6 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQ_HCI=y -CONFIG_MMC_CQ_HCI_CRYPTO=y -CONFIG_MMC_CQ_HCI_CRYPTO_QTI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_QPNP_FLASH_V2=y @@ -643,8 +636,6 @@ CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_CX_IPEAK=y -CONFIG_QTI_CRYPTO_COMMON=y -CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -686,7 +677,6 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y -CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/vendor/atoll_defconfig b/arch/arm64/configs/vendor/atoll_defconfig index f60a0c9906ed..35b0bb68edfe 100644 --- a/arch/arm64/configs/vendor/atoll_defconfig +++ b/arch/arm64/configs/vendor/atoll_defconfig @@ -55,8 +55,6 @@ CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_BLK_INLINE_ENCRYPTION=y -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_CFQ_GROUP_IOSCHED=y @@ -293,12 +291,9 @@ CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y -CONFIG_SCSI_UFS_CRYPTO=y -CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -528,8 +523,6 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQ_HCI=y -CONFIG_MMC_CQ_HCI_CRYPTO=y -CONFIG_MMC_CQ_HCI_CRYPTO_QTI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_QPNP_FLASH_V2=y @@ -670,8 +663,6 @@ CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_CX_IPEAK=y -CONFIG_QTI_CRYPTO_COMMON=y -CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -716,7 +707,6 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y -CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig index 825622c8c55a..f06dced29a56 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig @@ -51,8 +51,6 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y -CONFIG_BLK_INLINE_ENCRYPTION=y -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y @@ -276,12 +274,9 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_CRYPTO=y -CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -499,8 +494,6 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQ_HCI=y -CONFIG_MMC_CQ_HCI_CRYPTO=y -CONFIG_MMC_CQ_HCI_CRYPTO_QTI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_QPNP_FLASH_V2=y @@ -622,8 +615,6 @@ CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_CX_IPEAK=y -CONFIG_QTI_CRYPTO_COMMON=y -CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -664,7 +655,6 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y -CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/vendor/sdmsteppe_defconfig b/arch/arm64/configs/vendor/sdmsteppe_defconfig index f7c6eefdc4cd..e23a67e5fa74 100644 --- a/arch/arm64/configs/vendor/sdmsteppe_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe_defconfig @@ -53,8 +53,6 @@ CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_BLK_INLINE_ENCRYPTION=y -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_CFQ_GROUP_IOSCHED=y @@ -287,12 +285,9 @@ CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y -CONFIG_SCSI_UFS_CRYPTO=y -CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -523,8 +518,6 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQ_HCI=y -CONFIG_MMC_CQ_HCI_CRYPTO=y -CONFIG_MMC_CQ_HCI_CRYPTO_QTI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_QPNP_FLASH_V2=y @@ -657,8 +650,6 @@ CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_CX_IPEAK=y -CONFIG_QTI_CRYPTO_COMMON=y -CONFIG_QTI_CRYPTO_TZ=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -701,7 +692,6 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y -CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/vendor/sm8150-perf_defconfig b/arch/arm64/configs/vendor/sm8150-perf_defconfig index 16fcd3c04a7f..c5444dbd97af 100644 --- a/arch/arm64/configs/vendor/sm8150-perf_defconfig +++ b/arch/arm64/configs/vendor/sm8150-perf_defconfig @@ -53,8 +53,6 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y -CONFIG_BLK_INLINE_ENCRYPTION=y -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y @@ -287,12 +285,9 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_CRYPTO=y -CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -629,8 +624,6 @@ CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_AOP_DDR_MESSAGING=y CONFIG_QCOM_AOP_DDRSS_COMMANDS=y -CONFIG_QTI_CRYPTO_COMMON=y -CONFIG_QTI_CRYPTO_TZ=y CONFIG_QCOM_HYP_CORE_CTL=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y @@ -671,7 +664,6 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y -CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/vendor/sm8150_defconfig b/arch/arm64/configs/vendor/sm8150_defconfig index 3a528fdee801..53fd6b98411a 100644 --- a/arch/arm64/configs/vendor/sm8150_defconfig +++ b/arch/arm64/configs/vendor/sm8150_defconfig @@ -56,8 +56,6 @@ CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_BLK_INLINE_ENCRYPTION=y -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_CFQ_GROUP_IOSCHED=y @@ -300,12 +298,9 @@ CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y -CONFIG_SCSI_UFS_CRYPTO=y -CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -657,8 +652,6 @@ CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_AOP_DDR_MESSAGING=y CONFIG_QCOM_AOP_DDRSS_COMMANDS=y -CONFIG_QTI_CRYPTO_COMMON=y -CONFIG_QTI_CRYPTO_TZ=y CONFIG_QCOM_HYP_CORE_CTL=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y @@ -701,7 +694,6 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y -CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y From 950b515a085286c14e40989cb0ae8ceb824929ac Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:53 +0530 Subject: [PATCH 121/141] Revert "dm: Support legacy on disk format in dm-default-key" This reverts commit ba3bee4b2734aeb6ec3fcdf72a90fc6f8c95f74e. Signed-off-by: UtsavBalar1231 --- block/blk-crypto-fallback.c | 3 ++- drivers/md/dm-default-key.c | 19 ------------------- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index 18b6851d8301..ad83e1077ba3 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -600,7 +600,8 @@ int __init blk_crypto_fallback_init(void) crypto_mode_supported[i] = 0xFFFFFFFF; crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; - blk_crypto_ksm = keyslot_manager_create(blk_crypto_num_keyslots, + blk_crypto_ksm = keyslot_manager_create( + NULL, blk_crypto_num_keyslots, &blk_crypto_ksm_ll_ops, BLK_CRYPTO_FEATURE_STANDARD_KEYS, crypto_mode_supported, NULL); diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 4b47f25a257e..3d0bd0645f7a 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -135,22 +135,6 @@ static int default_key_ctr_optional(struct dm_target *ti, return 0; } -void default_key_adjust_sector_size_and_iv(char **argv, struct dm_target *ti, - struct default_key_c **dkc) -{ - struct dm_dev *dev; - - dev = (*dkc)->dev; - - if (!strcmp(argv[0], "AES-256-XTS")) { - if (ti->len & (((*dkc)->sector_size >> SECTOR_SHIFT) - 1)) - (*dkc)->sector_size = SECTOR_SIZE; - - if (dev->bdev->bd_part) - (*dkc)->iv_offset += dev->bdev->bd_part->start_sect; - } -} - /* * Construct a default-key mapping: * @@ -241,9 +225,6 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (err) goto bad; } - - default_key_adjust_sector_size_and_iv(argv, ti, &dkc); - dkc->sector_bits = ilog2(dkc->sector_size); if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) { ti->error = "Device size is not a multiple of sector_size"; From 4f40e609526bcb9c4782fab95a4f2b67b6d2afb1 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:53 +0530 Subject: [PATCH 122/141] Revert "fscrypt: support legacy inline crypto mode" This reverts commit da172629102626cf266fd37b9f434d34adb2ec56. Signed-off-by: UtsavBalar1231 --- fs/crypto/crypto.c | 9 +-------- fs/crypto/keyring.c | 2 +- fs/crypto/keysetup.c | 6 ++---- fs/crypto/keysetup_v1.c | 12 ------------ include/uapi/linux/fscrypt.h | 3 ++- 5 files changed, 6 insertions(+), 26 deletions(-) diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index cc8e334165f5..ed6ea28dbdad 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -72,16 +72,9 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, { u8 flags = fscrypt_policy_flags(&ci->ci_policy); - bool inlinecrypt = false; - -#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT - inlinecrypt = ci->ci_inlinecrypt; -#endif memset(iv, 0, ci->ci_mode->ivsize); - if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 || - ((fscrypt_policy_contents_mode(&ci->ci_policy) == - FSCRYPT_MODE_PRIVATE) && inlinecrypt)) { + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { WARN_ON_ONCE((u32)lblk_num != lblk_num); lblk_num |= (u64)ci->ci_inode->i_ino << 32; } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 9257ea1102b1..0081fd48e96f 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -652,7 +652,7 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) goto out_wipe_secret; err = -EINVAL; - if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) + if (arg.__flags) goto out_wipe_secret; break; case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index a3626425d633..c6ce78afbf8f 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -48,11 +48,9 @@ struct fscrypt_mode fscrypt_modes[] = { .blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM, }, [FSCRYPT_MODE_PRIVATE] = { - .friendly_name = "ice", - .cipher_str = "xts(aes)", + .friendly_name = "ICE", + .cipher_str = "bugon", .keysize = 64, - .ivsize = 16, - .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS, }, }; diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index ac549eeb1444..3f7bb48f7317 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -269,18 +269,6 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci, u8 *derived_key; int err; - /*Support legacy ice based content encryption mode*/ - if ((fscrypt_policy_contents_mode(&ci->ci_policy) == - FSCRYPT_MODE_PRIVATE) && - fscrypt_using_inline_encryption(ci)) { - - err = fscrypt_prepare_inline_crypt_key(&ci->ci_key, - raw_master_key, - ci->ci_mode->keysize, - false, - ci); - return err; - } /* * This cannot be a stack buffer because it will be passed to the * scatterlist crypto API during derive_key_aes(). diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index b134bfc90912..1b580ac60f98 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -27,8 +27,9 @@ #define FSCRYPT_MODE_AES_128_CBC 5 #define FSCRYPT_MODE_AES_128_CTS 6 #define FSCRYPT_MODE_ADIANTUM 9 +#define __FSCRYPT_MODE_MAX 9 #define FSCRYPT_MODE_PRIVATE 127 -#define __FSCRYPT_MODE_MAX 127 + /* * Legacy policy version; ad-hoc KDF and no key verification. * For new encrypted directories, use fscrypt_policy_v2 instead. From 7655190fc728c6cb90de7ee69834a4cb660da4e9 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:33:17 +0530 Subject: [PATCH 123/141] Revert "mmc: host: Fix the condition to parse crypto clocks" This reverts commit 7bc85c41a81b4ac6f9eb13cfda40d1d2adb10d5c. Signed-off-by: UtsavBalar1231 --- drivers/mmc/host/sdhci-msm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 29a87b31b6de..9e9fb3afb0ec 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2055,7 +2055,7 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, } } - if (!sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates", + if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates", &ice_clk_table, &ice_clk_table_len, 0)) { if (ice_clk_table && ice_clk_table_len) { if (ice_clk_table_len != 2) { From 105c0cb220ea2c87386cba885d5e3d6d0fb0aeb0 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:28:26 +0530 Subject: [PATCH 124/141] Revert "mmc: host: Set the supported dun size for crypto" This reverts commit b3dc768908f4607b90e2db5c49d580261bd84ead. Signed-off-by: UtsavBalar1231 --- drivers/mmc/host/cmdq_hci-crypto-qti.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/mmc/host/cmdq_hci-crypto-qti.c b/drivers/mmc/host/cmdq_hci-crypto-qti.c index 84718e8da7f3..9921a14c9cef 100644 --- a/drivers/mmc/host/cmdq_hci-crypto-qti.c +++ b/drivers/mmc/host/cmdq_hci-crypto-qti.c @@ -231,8 +231,6 @@ int cmdq_host_init_crypto_qti_spec(struct cmdq_host *host, err = -ENOMEM; goto out; } - keyslot_manager_set_max_dun_bytes(host->ksm, sizeof(u32)); - /* * In case host controller supports cryptographic operations * then, it uses 128bit task descriptor. Upper 64 bits of task From 185eb33fcc0f31a2d2ed9459270868965eb494ba Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:54 +0530 Subject: [PATCH 125/141] Revert "mmc: host: Fix the offset for ICE address" This reverts commit 2f70573ca48451e34f5a7336c25b8628c402e590. Signed-off-by: UtsavBalar1231 --- drivers/mmc/host/cmdq_hci.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c index 2d182bcb401f..a7b7597d96a3 100644 --- a/drivers/mmc/host/cmdq_hci.c +++ b/drivers/mmc/host/cmdq_hci.c @@ -411,11 +411,6 @@ static int cmdq_enable(struct mmc_host *mmc) if (cmdq_host_is_crypto_supported(cq_host)) { cmdq_crypto_enable(cq_host); cqcfg |= CQ_ICE_ENABLE; - /* For SDHC v5.0 onwards, ICE 3.0 specific registers are added - * in CQ register space, due to which few CQ registers are - * shifted. Set offset_changed boolean to use updated address. - */ - cq_host->offset_changed = true; } cmdq_writel(cq_host, cqcfg, CQCFG); From 0bb874e7e83dde730dfbc7517e23d019a64d467e Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:55 +0530 Subject: [PATCH 126/141] Revert "mmc: host: Add variant ops for cqhci crypto" This reverts commit 8e301b525543c67c6e4710e7334e33acc649ef22. Signed-off-by: UtsavBalar1231 --- drivers/mmc/host/Kconfig | 8 - drivers/mmc/host/Makefile | 1 - drivers/mmc/host/cmdq_hci-crypto-qti.c | 304 ------------------------- drivers/mmc/host/cmdq_hci-crypto-qti.h | 33 --- drivers/mmc/host/sdhci-msm.c | 8 - 5 files changed, 354 deletions(-) delete mode 100644 drivers/mmc/host/cmdq_hci-crypto-qti.c delete mode 100644 drivers/mmc/host/cmdq_hci-crypto-qti.h diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index f361eed35180..1919fbaffc8d 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -922,11 +922,3 @@ config MMC_CQ_HCI_CRYPTO Enabling this makes it possible for the kernel to use the crypto capabilities of the CQHCI device (if present) to perform crypto operations on data being transferred to/from the device. - -config MMC_CQ_HCI_CRYPTO_QTI - bool "Vendor specific CQHCI Crypto Engine Support" - depends on MMC_CQ_HCI_CRYPTO - help - Enable Vendor Crypto Engine Support in CQHCI - Enabling this allows kernel to use CQHCI crypto operations defined - and implemented by QTI. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 3b2f1dd243c3..7ab3a706bd38 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -92,7 +92,6 @@ obj-$(CONFIG_MMC_CQ_HCI) += cmdq_hci.o obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o obj-$(CONFIG_MMC_CQ_HCI_CRYPTO) += cmdq_hci-crypto.o -obj-$(CONFIG_MMC_CQ_HCI_CRYPTO_QTI) += cmdq_hci-crypto-qti.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/cmdq_hci-crypto-qti.c b/drivers/mmc/host/cmdq_hci-crypto-qti.c deleted file mode 100644 index 9921a14c9cef..000000000000 --- a/drivers/mmc/host/cmdq_hci-crypto-qti.c +++ /dev/null @@ -1,304 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2020, Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include "sdhci.h" -#include "sdhci-pltfm.h" -#include "sdhci-msm.h" -#include "cmdq_hci-crypto-qti.h" -#include - -#define RAW_SECRET_SIZE 32 -#define MINIMUM_DUN_SIZE 512 -#define MAXIMUM_DUN_SIZE 65536 - -static struct cmdq_host_crypto_variant_ops cmdq_crypto_qti_variant_ops = { - .host_init_crypto = cmdq_crypto_qti_init_crypto, - .enable = cmdq_crypto_qti_enable, - .disable = cmdq_crypto_qti_disable, - .resume = cmdq_crypto_qti_resume, - .debug = cmdq_crypto_qti_debug, -}; - -static bool ice_cap_idx_valid(struct cmdq_host *host, - unsigned int cap_idx) -{ - return cap_idx < host->crypto_capabilities.num_crypto_cap; -} - -static uint8_t get_data_unit_size_mask(unsigned int data_unit_size) -{ - if (data_unit_size < MINIMUM_DUN_SIZE || - data_unit_size > MAXIMUM_DUN_SIZE || - !is_power_of_2(data_unit_size)) - return 0; - - return data_unit_size / MINIMUM_DUN_SIZE; -} - - -void cmdq_crypto_qti_enable(struct cmdq_host *host) -{ - int err = 0; - - if (!cmdq_host_is_crypto_supported(host)) - return; - - host->caps |= CMDQ_CAP_CRYPTO_SUPPORT; - - err = crypto_qti_enable(host->crypto_vops->priv); - if (err) { - pr_err("%s: Error enabling crypto, err %d\n", - __func__, err); - cmdq_crypto_qti_disable(host); - } -} - -void cmdq_crypto_qti_disable(struct cmdq_host *host) -{ - /* cmdq_crypto_disable_spec(host) and - * crypto_qti_disable(host->crypto_vops->priv) - * are needed here? - */ -} - -static int cmdq_crypto_qti_keyslot_program(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) -{ - struct cmdq_host *host = keyslot_manager_private(ksm); - int err = 0; - u8 data_unit_mask; - int crypto_alg_id; - - crypto_alg_id = cmdq_crypto_cap_find(host, key->crypto_mode, - key->data_unit_size); - - if (!cmdq_is_crypto_enabled(host) || - !cmdq_keyslot_valid(host, slot) || - !ice_cap_idx_valid(host, crypto_alg_id)) { - return -EINVAL; - } - - data_unit_mask = get_data_unit_size_mask(key->data_unit_size); - - if (!(data_unit_mask & - host->crypto_cap_array[crypto_alg_id].sdus_mask)) { - return -EINVAL; - } - - err = crypto_qti_keyslot_program(host->crypto_vops->priv, key, - slot, data_unit_mask, crypto_alg_id); - if (err) - pr_err("%s: failed with error %d\n", __func__, err); - - return err; -} - -static int cmdq_crypto_qti_keyslot_evict(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) -{ - int err = 0; - struct cmdq_host *host = keyslot_manager_private(ksm); - - if (!cmdq_is_crypto_enabled(host) || - !cmdq_keyslot_valid(host, slot)) - return -EINVAL; - - err = crypto_qti_keyslot_evict(host->crypto_vops->priv, slot); - if (err) - pr_err("%s: failed with error %d\n", __func__, err); - - return err; -} - -static int cmdq_crypto_qti_derive_raw_secret(struct keyslot_manager *ksm, - const u8 *wrapped_key, unsigned int wrapped_key_size, - u8 *secret, unsigned int secret_size) -{ - int err = 0; - - if (wrapped_key_size <= RAW_SECRET_SIZE) { - pr_err("%s: Invalid wrapped_key_size: %u\n", __func__, - wrapped_key_size); - err = -EINVAL; - return err; - } - if (secret_size != RAW_SECRET_SIZE) { - pr_err("%s: Invalid secret size: %u\n", __func__, secret_size); - err = -EINVAL; - return err; - } - memcpy(secret, wrapped_key, secret_size); - return 0; -} - -static const struct keyslot_mgmt_ll_ops cmdq_crypto_qti_ksm_ops = { - .keyslot_program = cmdq_crypto_qti_keyslot_program, - .keyslot_evict = cmdq_crypto_qti_keyslot_evict, - .derive_raw_secret = cmdq_crypto_qti_derive_raw_secret -}; - -enum blk_crypto_mode_num cmdq_blk_crypto_qti_mode_num_for_alg_dusize( - enum cmdq_crypto_alg cmdq_crypto_alg, - enum cmdq_crypto_key_size key_size) -{ - /* - * Currently the only mode that eMMC and blk-crypto both support. - */ - if (cmdq_crypto_alg == CMDQ_CRYPTO_ALG_AES_XTS && - key_size == CMDQ_CRYPTO_KEY_SIZE_256) - return BLK_ENCRYPTION_MODE_AES_256_XTS; - - return BLK_ENCRYPTION_MODE_INVALID; -} - -int cmdq_host_init_crypto_qti_spec(struct cmdq_host *host, - const struct keyslot_mgmt_ll_ops *ksm_ops) -{ - int cap_idx = 0; - int err = 0; - unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; - enum blk_crypto_mode_num blk_mode_num; - - /* Default to disabling crypto */ - host->caps &= ~CMDQ_CAP_CRYPTO_SUPPORT; - - if (!(cmdq_readl(host, CQCAP) & CQ_CAP_CS)) { - pr_debug("%s no crypto capability\n", __func__); - err = -ENODEV; - goto out; - } - - /* - * Crypto Capabilities should never be 0, because the - * config_array_ptr > 04h. So we use a 0 value to indicate that - * crypto init failed, and can't be enabled. - */ - host->crypto_capabilities.reg_val = cmdq_readl(host, CQ_CCAP); - host->crypto_cfg_register = - (u32)host->crypto_capabilities.config_array_ptr * 0x100; - host->crypto_cap_array = - devm_kcalloc(mmc_dev(host->mmc), - host->crypto_capabilities.num_crypto_cap, - sizeof(host->crypto_cap_array[0]), GFP_KERNEL); - if (!host->crypto_cap_array) { - err = -ENOMEM; - pr_err("%s failed to allocate memory\n", __func__); - goto out; - } - - memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported)); - - /* - * Store all the capabilities now so that we don't need to repeatedly - * access the device each time we want to know its capabilities - */ - for (cap_idx = 0; cap_idx < host->crypto_capabilities.num_crypto_cap; - cap_idx++) { - host->crypto_cap_array[cap_idx].reg_val = - cpu_to_le32(cmdq_readl(host, - CQ_CRYPTOCAP + - cap_idx * sizeof(__le32))); - blk_mode_num = cmdq_blk_crypto_qti_mode_num_for_alg_dusize( - host->crypto_cap_array[cap_idx].algorithm_id, - host->crypto_cap_array[cap_idx].key_size); - if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID) - continue; - crypto_modes_supported[blk_mode_num] |= - host->crypto_cap_array[cap_idx].sdus_mask * 512; - } - - host->ksm = keyslot_manager_create(cmdq_num_keyslots(host), ksm_ops, - BLK_CRYPTO_FEATURE_STANDARD_KEYS | - BLK_CRYPTO_FEATURE_WRAPPED_KEYS, - crypto_modes_supported, host); - - if (!host->ksm) { - err = -ENOMEM; - goto out; - } - /* - * In case host controller supports cryptographic operations - * then, it uses 128bit task descriptor. Upper 64 bits of task - * descriptor would be used to pass crypto specific informaton. - */ - host->caps |= CMDQ_TASK_DESC_SZ_128; - - return 0; - -out: - /* Indicate that init failed by setting crypto_capabilities to 0 */ - host->crypto_capabilities.reg_val = 0; - return err; -} - -int cmdq_crypto_qti_init_crypto(struct cmdq_host *host, - const struct keyslot_mgmt_ll_ops *ksm_ops) -{ - int err = 0; - struct sdhci_host *sdhci = mmc_priv(host->mmc); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - struct resource *cmdq_ice_memres = NULL; - - cmdq_ice_memres = platform_get_resource_byname(msm_host->pdev, - IORESOURCE_MEM, - "cmdq_ice"); - if (!cmdq_ice_memres) { - pr_debug("%s ICE not supported\n", __func__); - host->icemmio = NULL; - return PTR_ERR(cmdq_ice_memres); - } - - host->icemmio = devm_ioremap(&msm_host->pdev->dev, - cmdq_ice_memres->start, - resource_size(cmdq_ice_memres)); - if (!host->icemmio) { - pr_err("%s failed to remap ice regs\n", __func__); - return PTR_ERR(host->icemmio); - } - - err = cmdq_host_init_crypto_qti_spec(host, &cmdq_crypto_qti_ksm_ops); - if (err) { - pr_err("%s: Error initiating crypto capabilities, err %d\n", - __func__, err); - return err; - } - - err = crypto_qti_init_crypto(&msm_host->pdev->dev, - host->icemmio, (void **)&host->crypto_vops->priv); - if (err) { - pr_err("%s: Error initiating crypto, err %d\n", - __func__, err); - } - return err; -} - -int cmdq_crypto_qti_debug(struct cmdq_host *host) -{ - return crypto_qti_debug(host->crypto_vops->priv); -} - -void cmdq_crypto_qti_set_vops(struct cmdq_host *host) -{ - return cmdq_crypto_set_vops(host, &cmdq_crypto_qti_variant_ops); -} - -int cmdq_crypto_qti_resume(struct cmdq_host *host) -{ - return crypto_qti_resume(host->crypto_vops->priv); -} diff --git a/drivers/mmc/host/cmdq_hci-crypto-qti.h b/drivers/mmc/host/cmdq_hci-crypto-qti.h deleted file mode 100644 index e63465bca3e2..000000000000 --- a/drivers/mmc/host/cmdq_hci-crypto-qti.h +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _CMDQ_HCI_CRYPTO_QTI_H -#define _CMDQ_HCI_CRYPTO_QTI_H - -#include "cmdq_hci-crypto.h" - -void cmdq_crypto_qti_enable(struct cmdq_host *host); - -void cmdq_crypto_qti_disable(struct cmdq_host *host); - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION -int cmdq_crypto_qti_init_crypto(struct cmdq_host *host, - const struct keyslot_mgmt_ll_ops *ksm_ops); -#endif - -int cmdq_crypto_qti_debug(struct cmdq_host *host); - -void cmdq_crypto_qti_set_vops(struct cmdq_host *host); - -int cmdq_crypto_qti_resume(struct cmdq_host *host); - -#endif /* _CMDQ_HCI_CRYPTO_QTI_H */ diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 9e9fb3afb0ec..ed15b5bc8018 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -43,7 +43,6 @@ #include "sdhci-msm.h" #include "cmdq_hci.h" -#include "cmdq_hci-crypto-qti.h" #define QOS_REMOVE_DELAY_MS 10 #define CORE_POWER 0x0 @@ -4645,13 +4644,6 @@ static void sdhci_msm_cmdq_init(struct sdhci_host *host, } else { msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE; } - /* - * Set the vendor specific ops needed for ICE. - * Default implementation if the ops are not set. - */ -#ifdef CONFIG_MMC_CQ_HCI_CRYPTO_QTI - cmdq_crypto_qti_set_vops(host->cq_host); -#endif } #else static void sdhci_msm_cmdq_init(struct sdhci_host *host, From 61b234edc5a673eb95c3ee3f953027a528d8b2fb Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:55 +0530 Subject: [PATCH 127/141] Revert "mmc: cqhci: Add inline crypto support to cqhci" This reverts commit a058c82783f224cb476b32a0be6460740d047ddc. Signed-off-by: UtsavBalar1231 --- drivers/mmc/host/cmdq_hci.c | 68 +------------------------------------ drivers/mmc/host/cmdq_hci.h | 20 ----------- 2 files changed, 1 insertion(+), 87 deletions(-) diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c index a7b7597d96a3..f1e4ba86f5d4 100644 --- a/drivers/mmc/host/cmdq_hci.c +++ b/drivers/mmc/host/cmdq_hci.c @@ -29,10 +29,8 @@ #include #include "cmdq_hci.h" -#include "cmdq_hci-crypto.h" #include "sdhci.h" #include "sdhci-msm.h" -#include "../core/queue.h" #define DCMD_SLOT 31 #define NUM_SLOTS 32 @@ -279,8 +277,6 @@ static void cmdq_dumpregs(struct cmdq_host *cq_host) cmdq_readl(cq_host, CQ_VENDOR_CFG + offset)); pr_err(DRV_NAME ": ===========================================\n"); - cmdq_crypto_debug(cq_host); - cmdq_dump_task_history(cq_host); if (cq_host->ops->dump_vendor_regs) cq_host->ops->dump_vendor_regs(mmc); @@ -408,11 +404,6 @@ static int cmdq_enable(struct mmc_host *mmc) cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) | (dcmd_enable ? CQ_DCMD : 0)); - if (cmdq_host_is_crypto_supported(cq_host)) { - cmdq_crypto_enable(cq_host); - cqcfg |= CQ_ICE_ENABLE; - } - cmdq_writel(cq_host, cqcfg, CQCFG); /* enable CQ_HOST */ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE, @@ -482,9 +473,6 @@ static void cmdq_disable_nosync(struct mmc_host *mmc, bool soft) { struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc); - if (cmdq_host_is_crypto_supported(cq_host)) - cmdq_crypto_disable(cq_host); - if (soft) { cmdq_writel(cq_host, cmdq_readl( cq_host, CQCFG) & ~(CQ_ENABLE), @@ -524,8 +512,6 @@ static void cmdq_reset(struct mmc_host *mmc, bool soft) cmdq_disable(mmc, true); - cmdq_crypto_reset(cq_host); - if (cq_host->ops->reset) { ret = cq_host->ops->reset(mmc); if (ret) { @@ -555,29 +541,6 @@ static void cmdq_reset(struct mmc_host *mmc, bool soft) mmc_host_clr_cq_disable(mmc); } -static inline void cmdq_prep_crypto_desc(struct cmdq_host *cq_host, - u64 *task_desc, u64 ice_ctx) -{ - u64 *ice_desc = NULL; - - if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) { - /* - * Get the address of ice context for the given task descriptor. - * ice context is present in the upper 64bits of task descriptor - * ice_conext_base_address = task_desc + 8-bytes - */ - ice_desc = (u64 *)((u8 *)task_desc + - CQ_TASK_DESC_ICE_PARAM_OFFSET); - memset(ice_desc, 0, CQ_TASK_DESC_ICE_PARAMS_SIZE); - - /* - * Assign upper 64bits data of task descritor with ice context - */ - if (ice_ctx) - *ice_desc = ice_ctx; - } -} - static void cmdq_prep_task_desc(struct mmc_request *mrq, u64 *data, bool intr, bool qbr) { @@ -779,7 +742,6 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq) u32 tag = mrq->cmdq_req->tag; struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc); struct sdhci_host *host = mmc_priv(mmc); - u64 ice_ctx = 0; if (!cq_host->enabled) { pr_err("%s: CMDQ host not enabled yet !!!\n", @@ -798,22 +760,12 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq) goto ring_doorbell; } - err = cmdq_crypto_get_ctx(cq_host, mrq, &ice_ctx); - if (err) { - mmc->err_stats[MMC_ERR_ICE_CFG]++; - pr_err("%s: failed to retrieve crypto ctx for tag %d\n", - mmc_hostname(mmc), tag); - goto ice_err; - } - task_desc = (__le64 __force *)get_desc(cq_host, tag); cmdq_prep_task_desc(mrq, &data, 1, (mrq->cmdq_req->cmdq_req_flags & QBR)); *task_desc = cpu_to_le64(data); - cmdq_prep_crypto_desc(cq_host, task_desc, ice_ctx); - cmdq_log_task_desc_history(cq_host, *task_desc, false); err = cmdq_prep_tran_desc(mrq, cq_host, tag); @@ -840,12 +792,8 @@ ring_doorbell: /* Commit the doorbell write immediately */ wmb(); - return err; - -ice_err: if (err) cmdq_runtime_pm_put(cq_host); - out: return err; } @@ -862,8 +810,6 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag) if (tag == cq_host->dcmd_slot) mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT); - cmdq_complete_crypto_desc(cq_host, mrq, NULL); - if (mrq->cmdq_req->cmdq_req_flags & DCMD) cmdq_writel(cq_host, cmdq_readl(cq_host, CQ_VENDOR_CFG + offset) | @@ -1334,15 +1280,7 @@ static int cmdq_late_init(struct mmc_host *mmc) static void cqhci_crypto_update_queue(struct mmc_host *mmc, struct request_queue *queue) { - struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc); - - if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) { - if (queue) - cmdq_crypto_setup_rq_keyslot_manager(cq_host, queue); - else - pr_err("%s can not register keyslot manager\n", - __func__); - } + //struct cqhci_host *cq_host = mmc->cqe_private; } static const struct mmc_cmdq_host_ops cmdq_host_ops = { @@ -1409,10 +1347,6 @@ int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc, if (!cq_host->mrq_slot) return -ENOMEM; - err = cmdq_host_init_crypto(cq_host); - if (err) - pr_err("%s: CMDQ Crypto init failed err %d\n", err); - init_completion(&cq_host->halt_comp); return err; } diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h index 79e7acc69e6a..65828d8026dc 100644 --- a/drivers/mmc/host/cmdq_hci.h +++ b/drivers/mmc/host/cmdq_hci.h @@ -28,7 +28,6 @@ #define CQ_DCMD 0x00001000 #define CQ_TASK_DESC_SZ 0x00000100 #define CQ_ENABLE 0x00000001 -#define CQ_ICE_ENABLE 0x00000002 /* control */ #define CQCTL 0x0C @@ -148,14 +147,6 @@ #define DAT_LENGTH(x) ((x & 0xFFFF) << 16) #define DAT_ADDR_LO(x) ((x & 0xFFFFFFFF) << 32) #define DAT_ADDR_HI(x) ((x & 0xFFFFFFFF) << 0) -#define DATA_UNIT_NUM(x) (((u64)(x) & 0xFFFFFFFF) << 0) -#define CRYPTO_CONFIG_INDEX(x) (((u64)(x) & 0xFF) << 32) -#define CRYPTO_ENABLE(x) (((u64)(x) & 0x1) << 47) - -/* ICE context is present in the upper 64bits of task descriptor */ -#define CQ_TASK_DESC_ICE_PARAM_OFFSET 8 -/* ICE descriptor size */ -#define CQ_TASK_DESC_ICE_PARAMS_SIZE 8 /* * Add new macro for updated CQ vendor specific @@ -256,7 +247,6 @@ struct task_history { struct cmdq_host { const struct cmdq_host_ops *ops; void __iomem *mmio; - void __iomem *icemmio; struct mmc_host *mmc; /* 64 bit DMA */ @@ -266,7 +256,6 @@ struct cmdq_host { u32 dcmd_slot; u32 caps; #define CMDQ_TASK_DESC_SZ_128 0x1 -#define CMDQ_CAP_CRYPTO_SUPPORT 0x2 u32 quirks; #define CMDQ_QUIRK_SHORT_TXFR_DESC_SZ 0x1 @@ -301,15 +290,6 @@ struct cmdq_host { struct completion halt_comp; struct mmc_request **mrq_slot; void *private; - const struct cmdq_host_crypto_variant_ops *crypto_vops; -#ifdef CONFIG_MMC_CQ_HCI_CRYPTO - union cmdq_crypto_capabilities crypto_capabilities; - union cmdq_crypto_cap_entry *crypto_cap_array; - u32 crypto_cfg_register; -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - struct keyslot_manager *ksm; -#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ -#endif /* CONFIG_SCSI_CQHCI_CRYPTO */ }; struct cmdq_host_ops { From 58509dbed5301733d0f75d255a25fbdfa7399b7b Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:56 +0530 Subject: [PATCH 128/141] Revert "mmc: cqhci: Add eMMC crypto APIs" This reverts commit e718389e5ebf677f3cac233b9243ad408165c221. Signed-off-by: UtsavBalar1231 --- drivers/mmc/host/Kconfig | 9 - drivers/mmc/host/Makefile | 2 +- drivers/mmc/host/cmdq_hci-crypto.c | 536 ----------------------------- drivers/mmc/host/cmdq_hci-crypto.h | 188 ---------- drivers/mmc/host/cmdq_hci.h | 28 -- 5 files changed, 1 insertion(+), 762 deletions(-) delete mode 100644 drivers/mmc/host/cmdq_hci-crypto.c delete mode 100644 drivers/mmc/host/cmdq_hci-crypto.h diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 1919fbaffc8d..979b909704df 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -913,12 +913,3 @@ config MMC_SDHCI_XENON This selects Marvell Xenon eMMC/SD/SDIO SDHCI. If you have a controller with this interface, say Y or M here. If unsure, say N. - -config MMC_CQ_HCI_CRYPTO - bool "CQHCI Crypto Engine Support" - depends on MMC_CQ_HCI && BLK_INLINE_ENCRYPTION - help - Enable Crypto Engine Support in CQHCI. - Enabling this makes it possible for the kernel to use the crypto - capabilities of the CQHCI device (if present) to perform crypto - operations on data being transferred to/from the device. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 7ab3a706bd38..6389e8125299 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -85,13 +85,13 @@ obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o +obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o obj-$(CONFIG_MMC_CQ_HCI) += cmdq_hci.o obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o -obj-$(CONFIG_MMC_CQ_HCI_CRYPTO) += cmdq_hci-crypto.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/cmdq_hci-crypto.c b/drivers/mmc/host/cmdq_hci-crypto.c deleted file mode 100644 index 26f84001f064..000000000000 --- a/drivers/mmc/host/cmdq_hci-crypto.c +++ /dev/null @@ -1,536 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Copyright (c) 2020 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * drivers/mmc/host/cmdq-crypto.c - Qualcomm Technologies, Inc. - * - * Original source is taken from: - * https://android.googlesource.com/kernel/common/+/4bac1109a10c55d49c0aa4f7ebdc4bc53cc368e8 - * The driver caters to crypto engine support for UFS controllers. - * The crypto engine programming sequence, HW functionality and register - * offset is almost same in UFS and eMMC controllers. - */ - -#include -#include "cmdq_hci-crypto.h" -#include "../core/queue.h" - -static bool cmdq_cap_idx_valid(struct cmdq_host *host, unsigned int cap_idx) -{ - return cap_idx < host->crypto_capabilities.num_crypto_cap; -} - -static u8 get_data_unit_size_mask(unsigned int data_unit_size) -{ - if (data_unit_size < 512 || data_unit_size > 65536 || - !is_power_of_2(data_unit_size)) - return 0; - - return data_unit_size / 512; -} - -static size_t get_keysize_bytes(enum cmdq_crypto_key_size size) -{ - switch (size) { - case CMDQ_CRYPTO_KEY_SIZE_128: - return 16; - case CMDQ_CRYPTO_KEY_SIZE_192: - return 24; - case CMDQ_CRYPTO_KEY_SIZE_256: - return 32; - case CMDQ_CRYPTO_KEY_SIZE_512: - return 64; - default: - return 0; - } -} - -int cmdq_crypto_cap_find(void *host_p, enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size) -{ - struct cmdq_host *host = host_p; - enum cmdq_crypto_alg cmdq_alg; - u8 data_unit_mask; - int cap_idx; - enum cmdq_crypto_key_size cmdq_key_size; - union cmdq_crypto_cap_entry *ccap_array = host->crypto_cap_array; - - if (!cmdq_host_is_crypto_supported(host)) - return -EINVAL; - - switch (crypto_mode) { - case BLK_ENCRYPTION_MODE_AES_256_XTS: - cmdq_alg = CMDQ_CRYPTO_ALG_AES_XTS; - cmdq_key_size = CMDQ_CRYPTO_KEY_SIZE_256; - break; - default: - return -EINVAL; - } - - data_unit_mask = get_data_unit_size_mask(data_unit_size); - - for (cap_idx = 0; cap_idx < host->crypto_capabilities.num_crypto_cap; - cap_idx++) { - if (ccap_array[cap_idx].algorithm_id == cmdq_alg && - (ccap_array[cap_idx].sdus_mask & data_unit_mask) && - ccap_array[cap_idx].key_size == cmdq_key_size) - return cap_idx; - } - - return -EINVAL; -} -EXPORT_SYMBOL(cmdq_crypto_cap_find); - -/** - * cmdq_crypto_cfg_entry_write_key - Write a key into a crypto_cfg_entry - * - * Writes the key with the appropriate format - for AES_XTS, - * the first half of the key is copied as is, the second half is - * copied with an offset halfway into the cfg->crypto_key array. - * For the other supported crypto algs, the key is just copied. - * - * @cfg: The crypto config to write to - * @key: The key to write - * @cap: The crypto capability (which specifies the crypto alg and key size) - * - * Returns 0 on success, or -EINVAL - */ -static int cmdq_crypto_cfg_entry_write_key(union cmdq_crypto_cfg_entry *cfg, - const u8 *key, - union cmdq_crypto_cap_entry cap) -{ - size_t key_size_bytes = get_keysize_bytes(cap.key_size); - - if (key_size_bytes == 0) - return -EINVAL; - - switch (cap.algorithm_id) { - case CMDQ_CRYPTO_ALG_AES_XTS: - key_size_bytes *= 2; - if (key_size_bytes > CMDQ_CRYPTO_KEY_MAX_SIZE) - return -EINVAL; - - memcpy(cfg->crypto_key, key, key_size_bytes/2); - memcpy(cfg->crypto_key + CMDQ_CRYPTO_KEY_MAX_SIZE/2, - key + key_size_bytes/2, key_size_bytes/2); - return 0; - case CMDQ_CRYPTO_ALG_BITLOCKER_AES_CBC: - /* fall through */ - case CMDQ_CRYPTO_ALG_AES_ECB: - /* fall through */ - case CMDQ_CRYPTO_ALG_ESSIV_AES_CBC: - memcpy(cfg->crypto_key, key, key_size_bytes); - return 0; - } - - return -EINVAL; -} - -static void cmdq_program_key(struct cmdq_host *host, - const union cmdq_crypto_cfg_entry *cfg, - int slot) -{ - int i; - u32 slot_offset = host->crypto_cfg_register + slot * sizeof(*cfg); - - if (host->crypto_vops && host->crypto_vops->program_key) - host->crypto_vops->program_key(host, cfg, slot); - - /* Clear the dword 16 */ - cmdq_writel(host, 0, slot_offset + 16 * sizeof(cfg->reg_val[0])); - /* Ensure that CFGE is cleared before programming the key */ - wmb(); - for (i = 0; i < 16; i++) { - cmdq_writel(host, le32_to_cpu(cfg->reg_val[i]), - slot_offset + i * sizeof(cfg->reg_val[0])); - /* Spec says each dword in key must be written sequentially */ - wmb(); - } - /* Write dword 17 */ - cmdq_writel(host, le32_to_cpu(cfg->reg_val[17]), - slot_offset + 17 * sizeof(cfg->reg_val[0])); - /* Dword 16 must be written last */ - wmb(); - /* Write dword 16 */ - cmdq_writel(host, le32_to_cpu(cfg->reg_val[16]), - slot_offset + 16 * sizeof(cfg->reg_val[0])); - /*Ensure that dword 16 is written */ - wmb(); -} - -static void cmdq_crypto_clear_keyslot(struct cmdq_host *host, int slot) -{ - union cmdq_crypto_cfg_entry cfg = { {0} }; - - cmdq_program_key(host, &cfg, slot); -} - -static void cmdq_crypto_clear_all_keyslots(struct cmdq_host *host) -{ - int slot; - - for (slot = 0; slot < cmdq_num_keyslots(host); slot++) - cmdq_crypto_clear_keyslot(host, slot); -} - -static int cmdq_crypto_keyslot_program(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) -{ - struct cmdq_host *host = keyslot_manager_private(ksm); - int err = 0; - u8 data_unit_mask; - union cmdq_crypto_cfg_entry cfg; - int cap_idx; - - cap_idx = cmdq_crypto_cap_find(host, key->crypto_mode, - key->data_unit_size); - - if (!cmdq_is_crypto_enabled(host) || - !cmdq_keyslot_valid(host, slot) || - !cmdq_cap_idx_valid(host, cap_idx)) - return -EINVAL; - - data_unit_mask = get_data_unit_size_mask(key->data_unit_size); - - if (!(data_unit_mask & host->crypto_cap_array[cap_idx].sdus_mask)) - return -EINVAL; - - memset(&cfg, 0, sizeof(cfg)); - cfg.data_unit_size = data_unit_mask; - cfg.crypto_cap_idx = cap_idx; - cfg.config_enable |= CMDQ_CRYPTO_CONFIGURATION_ENABLE; - - err = cmdq_crypto_cfg_entry_write_key(&cfg, key->raw, - host->crypto_cap_array[cap_idx]); - if (err) - return err; - - cmdq_program_key(host, &cfg, slot); - - memzero_explicit(&cfg, sizeof(cfg)); - - return 0; -} - -static int cmdq_crypto_keyslot_evict(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) -{ - struct cmdq_host *host = keyslot_manager_private(ksm); - - if (!cmdq_is_crypto_enabled(host) || - !cmdq_keyslot_valid(host, slot)) - return -EINVAL; - - /* - * Clear the crypto cfg on the device. Clearing CFGE - * might not be sufficient, so just clear the entire cfg. - */ - cmdq_crypto_clear_keyslot(host, slot); - - return 0; -} - -/* Functions implementing eMMC v5.2 specification behaviour */ -void cmdq_crypto_enable_spec(struct cmdq_host *host) -{ - if (!cmdq_host_is_crypto_supported(host)) - return; - - host->caps |= CMDQ_CAP_CRYPTO_SUPPORT; -} -EXPORT_SYMBOL(cmdq_crypto_enable_spec); - -void cmdq_crypto_disable_spec(struct cmdq_host *host) -{ - host->caps &= ~CMDQ_CAP_CRYPTO_SUPPORT; -} -EXPORT_SYMBOL(cmdq_crypto_disable_spec); - -static const struct keyslot_mgmt_ll_ops cmdq_ksm_ops = { - .keyslot_program = cmdq_crypto_keyslot_program, - .keyslot_evict = cmdq_crypto_keyslot_evict, -}; - -enum blk_crypto_mode_num cmdq_crypto_blk_crypto_mode_num_for_alg_dusize( - enum cmdq_crypto_alg cmdq_crypto_alg, - enum cmdq_crypto_key_size key_size) -{ - /* - * Currently the only mode that eMMC and blk-crypto both support. - */ - if (cmdq_crypto_alg == CMDQ_CRYPTO_ALG_AES_XTS && - key_size == CMDQ_CRYPTO_KEY_SIZE_256) - return BLK_ENCRYPTION_MODE_AES_256_XTS; - - return BLK_ENCRYPTION_MODE_INVALID; -} - -/** - * cmdq_host_init_crypto - Read crypto capabilities, init crypto fields in host - * @host: Per adapter instance - * - * Returns 0 on success. Returns -ENODEV if such capabilities don't exist, and - * -ENOMEM upon OOM. - */ -int cmdq_host_init_crypto_spec(struct cmdq_host *host, - const struct keyslot_mgmt_ll_ops *ksm_ops) -{ - int cap_idx = 0; - int err = 0; - unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; - enum blk_crypto_mode_num blk_mode_num; - - /* Default to disabling crypto */ - host->caps &= ~CMDQ_CAP_CRYPTO_SUPPORT; - - if (!(cmdq_readl(host, CQCAP) & CQ_CAP_CS)) { - pr_err("%s no crypto capability\n", __func__); - err = -ENODEV; - goto out; - } - - /* - * Crypto Capabilities should never be 0, because the - * config_array_ptr > 04h. So we use a 0 value to indicate that - * crypto init failed, and can't be enabled. - */ - host->crypto_capabilities.reg_val = cmdq_readl(host, CQ_CCAP); - host->crypto_cfg_register = - (u32)host->crypto_capabilities.config_array_ptr * 0x100; - host->crypto_cap_array = - devm_kcalloc(mmc_dev(host->mmc), - host->crypto_capabilities.num_crypto_cap, - sizeof(host->crypto_cap_array[0]), GFP_KERNEL); - if (!host->crypto_cap_array) { - err = -ENOMEM; - pr_err("%s no memory cap\n", __func__); - goto out; - } - - memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported)); - - /* - * Store all the capabilities now so that we don't need to repeatedly - * access the device each time we want to know its capabilities - */ - for (cap_idx = 0; cap_idx < host->crypto_capabilities.num_crypto_cap; - cap_idx++) { - host->crypto_cap_array[cap_idx].reg_val = - cpu_to_le32(cmdq_readl(host, - CQ_CRYPTOCAP + - cap_idx * sizeof(__le32))); - blk_mode_num = cmdq_crypto_blk_crypto_mode_num_for_alg_dusize( - host->crypto_cap_array[cap_idx].algorithm_id, - host->crypto_cap_array[cap_idx].key_size); - if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID) - continue; - crypto_modes_supported[blk_mode_num] |= - host->crypto_cap_array[cap_idx].sdus_mask * 512; - } - - cmdq_crypto_clear_all_keyslots(host); - - host->ksm = keyslot_manager_create(cmdq_num_keyslots(host), ksm_ops, - BLK_CRYPTO_FEATURE_STANDARD_KEYS, - crypto_modes_supported, host); - - if (!host->ksm) { - err = -ENOMEM; - goto out_free_caps; - } - /* - * In case host controller supports cryptographic operations - * then, it uses 128bit task descriptor. Upper 64 bits of task - * descriptor would be used to pass crypto specific informaton. - */ - host->caps |= CMDQ_TASK_DESC_SZ_128; - - return 0; -out_free_caps: - devm_kfree(mmc_dev(host->mmc), host->crypto_cap_array); -out: - // TODO: print error? - /* Indicate that init failed by setting crypto_capabilities to 0 */ - host->crypto_capabilities.reg_val = 0; - return err; -} -EXPORT_SYMBOL(cmdq_host_init_crypto_spec); - -void cmdq_crypto_setup_rq_keyslot_manager_spec(struct cmdq_host *host, - struct request_queue *q) -{ - if (!cmdq_host_is_crypto_supported(host) || !q) - return; - - q->ksm = host->ksm; -} -EXPORT_SYMBOL(cmdq_crypto_setup_rq_keyslot_manager_spec); - -void cmdq_crypto_destroy_rq_keyslot_manager_spec(struct cmdq_host *host, - struct request_queue *q) -{ - keyslot_manager_destroy(host->ksm); -} -EXPORT_SYMBOL(cmdq_crypto_destroy_rq_keyslot_manager_spec); - -int cmdq_prepare_crypto_desc_spec(struct cmdq_host *host, - struct mmc_request *mrq, - u64 *ice_ctx) -{ - struct bio_crypt_ctx *bc; - struct request *req = mrq->req; - - if (!req->bio || - !bio_crypt_should_process(req)) { - *ice_ctx = 0; - return 0; - } - if (WARN_ON(!cmdq_is_crypto_enabled(host))) { - /* - * Upper layer asked us to do inline encryption - * but that isn't enabled, so we fail this request. - */ - return -EINVAL; - } - - bc = req->bio->bi_crypt_context; - - if (!cmdq_keyslot_valid(host, bc->bc_keyslot)) - return -EINVAL; - - if (ice_ctx) { - *ice_ctx = DATA_UNIT_NUM(bc->bc_dun[0]) | - CRYPTO_CONFIG_INDEX(bc->bc_keyslot) | - CRYPTO_ENABLE(true); - } - - return 0; -} -EXPORT_SYMBOL(cmdq_prepare_crypto_desc_spec); - -/* Crypto Variant Ops Support */ - -void cmdq_crypto_enable(struct cmdq_host *host) -{ - if (host->crypto_vops && host->crypto_vops->enable) - return host->crypto_vops->enable(host); - - return cmdq_crypto_enable_spec(host); -} - -void cmdq_crypto_disable(struct cmdq_host *host) -{ - if (host->crypto_vops && host->crypto_vops->disable) - return host->crypto_vops->disable(host); - - return cmdq_crypto_disable_spec(host); -} - -int cmdq_host_init_crypto(struct cmdq_host *host) -{ - if (host->crypto_vops && host->crypto_vops->host_init_crypto) - return host->crypto_vops->host_init_crypto(host, - &cmdq_ksm_ops); - - return cmdq_host_init_crypto_spec(host, &cmdq_ksm_ops); -} - -void cmdq_crypto_setup_rq_keyslot_manager(struct cmdq_host *host, - struct request_queue *q) -{ - if (host->crypto_vops && host->crypto_vops->setup_rq_keyslot_manager) - return host->crypto_vops->setup_rq_keyslot_manager(host, q); - - return cmdq_crypto_setup_rq_keyslot_manager_spec(host, q); -} - -void cmdq_crypto_destroy_rq_keyslot_manager(struct cmdq_host *host, - struct request_queue *q) -{ - if (host->crypto_vops && host->crypto_vops->destroy_rq_keyslot_manager) - return host->crypto_vops->destroy_rq_keyslot_manager(host, q); - - return cmdq_crypto_destroy_rq_keyslot_manager_spec(host, q); -} - -int cmdq_crypto_get_ctx(struct cmdq_host *host, - struct mmc_request *mrq, - u64 *ice_ctx) -{ - if (host->crypto_vops && host->crypto_vops->prepare_crypto_desc) - return host->crypto_vops->prepare_crypto_desc(host, mrq, - ice_ctx); - - return cmdq_prepare_crypto_desc_spec(host, mrq, ice_ctx); -} - -int cmdq_complete_crypto_desc(struct cmdq_host *host, - struct mmc_request *mrq, - u64 *ice_ctx) -{ - if (host->crypto_vops && host->crypto_vops->complete_crypto_desc) - return host->crypto_vops->complete_crypto_desc(host, mrq, - ice_ctx); - - return 0; -} - -void cmdq_crypto_debug(struct cmdq_host *host) -{ - if (host->crypto_vops && host->crypto_vops->debug) - host->crypto_vops->debug(host); -} - -void cmdq_crypto_set_vops(struct cmdq_host *host, - struct cmdq_host_crypto_variant_ops *crypto_vops) -{ - if (host) - host->crypto_vops = crypto_vops; -} - -int cmdq_crypto_suspend(struct cmdq_host *host) -{ - if (host->crypto_vops && host->crypto_vops->suspend) - return host->crypto_vops->suspend(host); - - return 0; -} - -int cmdq_crypto_resume(struct cmdq_host *host) -{ - if (host->crypto_vops && host->crypto_vops->resume) - return host->crypto_vops->resume(host); - - return 0; -} - -int cmdq_crypto_reset(struct cmdq_host *host) -{ - if (host->crypto_vops && host->crypto_vops->reset) - return host->crypto_vops->reset(host); - - return 0; -} - -int cmdq_crypto_recovery_finish(struct cmdq_host *host) -{ - if (host->crypto_vops && host->crypto_vops->recovery_finish) - return host->crypto_vops->recovery_finish(host); - - /* Reset/Recovery might clear all keys, so reprogram all the keys. */ - keyslot_manager_reprogram_all_keys(host->ksm); - - return 0; -} diff --git a/drivers/mmc/host/cmdq_hci-crypto.h b/drivers/mmc/host/cmdq_hci-crypto.h deleted file mode 100644 index 8fb44d1eff8b..000000000000 --- a/drivers/mmc/host/cmdq_hci-crypto.h +++ /dev/null @@ -1,188 +0,0 @@ -/* Copyright 2019 Google LLC - * - * Copyright (c) 2020 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _CMDQ_CRYPTO_H -#define _CMDQ_CRYPTO_H - -#ifdef CONFIG_MMC_CQ_HCI_CRYPTO -#include -#include "cmdq_hci.h" - -static inline int cmdq_num_keyslots(struct cmdq_host *host) -{ - return host->crypto_capabilities.config_count + 1; -} - -static inline bool cmdq_keyslot_valid(struct cmdq_host *host, - unsigned int slot) -{ - /* - * The actual number of configurations supported is (CFGC+1), so slot - * numbers range from 0 to config_count inclusive. - */ - return slot < cmdq_num_keyslots(host); -} - -static inline bool cmdq_host_is_crypto_supported(struct cmdq_host *host) -{ - return host->crypto_capabilities.reg_val != 0; -} - -static inline bool cmdq_is_crypto_enabled(struct cmdq_host *host) -{ - return host->caps & CMDQ_CAP_CRYPTO_SUPPORT; -} - -/* Functions implementing eMMC v5.2 specification behaviour */ -int cmdq_prepare_crypto_desc_spec(struct cmdq_host *host, - struct mmc_request *mrq, - u64 *ice_ctx); - -void cmdq_crypto_enable_spec(struct cmdq_host *host); - -void cmdq_crypto_disable_spec(struct cmdq_host *host); - -int cmdq_host_init_crypto_spec(struct cmdq_host *host, - const struct keyslot_mgmt_ll_ops *ksm_ops); - -void cmdq_crypto_setup_rq_keyslot_manager_spec(struct cmdq_host *host, - struct request_queue *q); - -void cmdq_crypto_destroy_rq_keyslot_manager_spec(struct cmdq_host *host, - struct request_queue *q); - -void cmdq_crypto_set_vops(struct cmdq_host *host, - struct cmdq_host_crypto_variant_ops *crypto_vops); - -/* Crypto Variant Ops Support */ - -void cmdq_crypto_enable(struct cmdq_host *host); - -void cmdq_crypto_disable(struct cmdq_host *host); - -int cmdq_host_init_crypto(struct cmdq_host *host); - -void cmdq_crypto_setup_rq_keyslot_manager(struct cmdq_host *host, - struct request_queue *q); - -void cmdq_crypto_destroy_rq_keyslot_manager(struct cmdq_host *host, - struct request_queue *q); - -int cmdq_crypto_get_ctx(struct cmdq_host *host, - struct mmc_request *mrq, - u64 *ice_ctx); - -int cmdq_complete_crypto_desc(struct cmdq_host *host, - struct mmc_request *mrq, - u64 *ice_ctx); - -void cmdq_crypto_debug(struct cmdq_host *host); - -int cmdq_crypto_suspend(struct cmdq_host *host); - -int cmdq_crypto_resume(struct cmdq_host *host); - -int cmdq_crypto_reset(struct cmdq_host *host); - -int cmdq_crypto_recovery_finish(struct cmdq_host *host); - -int cmdq_crypto_cap_find(void *host_p, enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size); - -#else /* CONFIG_MMC_CQ_HCI_CRYPTO */ - -static inline bool cmdq_keyslot_valid(struct cmdq_host *host, - unsigned int slot) -{ - return false; -} - -static inline bool cmdq_host_is_crypto_supported(struct cmdq_host *host) -{ - return false; -} - -static inline bool cmdq_is_crypto_enabled(struct cmdq_host *host) -{ - return false; -} - -static inline void cmdq_crypto_enable(struct cmdq_host *host) { } - -static inline int cmdq_crypto_cap_find(void *host_p, - enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size) -{ - return 0; -} - -static inline void cmdq_crypto_disable(struct cmdq_host *host) { } - -static inline int cmdq_host_init_crypto(struct cmdq_host *host) -{ - return 0; -} - -static inline void cmdq_crypto_setup_rq_keyslot_manager( - struct cmdq_host *host, - struct request_queue *q) { } - -static inline void -cmdq_crypto_destroy_rq_keyslot_manager(struct cmdq_host *host, - struct request_queue *q) { } - -static inline int cmdq_crypto_get_ctx(struct cmdq_host *host, - struct mmc_request *mrq, - u64 *ice_ctx) -{ - *ice_ctx = 0; - return 0; -} - -static inline int cmdq_complete_crypto_desc(struct cmdq_host *host, - struct mmc_request *mrq, - u64 *ice_ctx) -{ - return 0; -} - -static inline void cmdq_crypto_debug(struct cmdq_host *host) { } - -static inline void cmdq_crypto_set_vops(struct cmdq_host *host, - struct cmdq_host_crypto_variant_ops *crypto_vops) { } - -static inline int cmdq_crypto_suspend(struct cmdq_host *host) -{ - return 0; -} - -static inline int cmdq_crypto_resume(struct cmdq_host *host) -{ - return 0; -} - -static inline int cmdq_crypto_reset(struct cmdq_host *host) -{ - return 0; -} - -static inline int cmdq_crypto_recovery_finish(struct cmdq_host *host) -{ - return 0; -} - -#endif /* CONFIG_MMC_CMDQ_CRYPTO */ -#endif /* _CMDQ_CRYPTO_H */ - - diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h index 65828d8026dc..56932b7f42f0 100644 --- a/drivers/mmc/host/cmdq_hci.h +++ b/drivers/mmc/host/cmdq_hci.h @@ -13,7 +13,6 @@ #define LINUX_MMC_CQ_HCI_H #include #include -#include /* registers */ /* version */ @@ -156,8 +155,6 @@ #define CQ_VENDOR_CFG 0x100 #define CMDQ_SEND_STATUS_TRIGGER (1 << 31) -struct cmdq_host; - /* CCAP - Crypto Capability 100h */ union cmdq_crypto_capabilities { __le32 reg_val; @@ -214,31 +211,6 @@ union cmdq_crypto_cfg_entry { }; }; -struct cmdq_host_crypto_variant_ops { - void (*setup_rq_keyslot_manager)(struct cmdq_host *host, - struct request_queue *q); - void (*destroy_rq_keyslot_manager)(struct cmdq_host *host, - struct request_queue *q); -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - int (*host_init_crypto)(struct cmdq_host *host, - const struct keyslot_mgmt_ll_ops *ksm_ops); -#endif - void (*enable)(struct cmdq_host *host); - void (*disable)(struct cmdq_host *host); - int (*suspend)(struct cmdq_host *host); - int (*resume)(struct cmdq_host *host); - int (*debug)(struct cmdq_host *host); - int (*prepare_crypto_desc)(struct cmdq_host *host, - struct mmc_request *mrq, u64 *ice_ctx); - int (*complete_crypto_desc)(struct cmdq_host *host, - struct mmc_request *mrq, u64 *ice_ctx); - int (*reset)(struct cmdq_host *host); - int (*recovery_finish)(struct cmdq_host *host); - int (*program_key)(struct cmdq_host *host, - const union cmdq_crypto_cfg_entry *cfg, int slot); - void *priv; -}; - struct task_history { u64 task; bool is_dcmd; From 2d7d753d65ddf160a02bbd4b45c543fc3996ff3c Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:56 +0530 Subject: [PATCH 129/141] Revert "mmc: cqhci: eMMC JEDEC v5.2 crypto spec addition" This reverts commit 98c06766bef41b5c080338d65124d87438b7676e. Signed-off-by: UtsavBalar1231 --- drivers/mmc/host/cmdq_hci.h | 60 ------------------------------------- 1 file changed, 60 deletions(-) diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h index 56932b7f42f0..0b7c38710c34 100644 --- a/drivers/mmc/host/cmdq_hci.h +++ b/drivers/mmc/host/cmdq_hci.h @@ -12,16 +12,12 @@ #ifndef LINUX_MMC_CQ_HCI_H #define LINUX_MMC_CQ_HCI_H #include -#include /* registers */ /* version */ #define CQVER 0x00 /* capabilities */ #define CQCAP 0x04 -#define CQ_CAP_CS (1 << 28) -#define CQ_CCAP 0x100 -#define CQ_CRYPTOCAP 0x104 /* configuration */ #define CQCFG 0x08 #define CQ_DCMD 0x00001000 @@ -155,62 +151,6 @@ #define CQ_VENDOR_CFG 0x100 #define CMDQ_SEND_STATUS_TRIGGER (1 << 31) -/* CCAP - Crypto Capability 100h */ -union cmdq_crypto_capabilities { - __le32 reg_val; - struct { - u8 num_crypto_cap; - u8 config_count; - u8 reserved; - u8 config_array_ptr; - }; -}; - -enum cmdq_crypto_key_size { - CMDQ_CRYPTO_KEY_SIZE_INVALID = 0x0, - CMDQ_CRYPTO_KEY_SIZE_128 = 0x1, - CMDQ_CRYPTO_KEY_SIZE_192 = 0x2, - CMDQ_CRYPTO_KEY_SIZE_256 = 0x3, - CMDQ_CRYPTO_KEY_SIZE_512 = 0x4, -}; - -enum cmdq_crypto_alg { - CMDQ_CRYPTO_ALG_AES_XTS = 0x0, - CMDQ_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1, - CMDQ_CRYPTO_ALG_AES_ECB = 0x2, - CMDQ_CRYPTO_ALG_ESSIV_AES_CBC = 0x3, -}; - -/* x-CRYPTOCAP - Crypto Capability X */ -union cmdq_crypto_cap_entry { - __le32 reg_val; - struct { - u8 algorithm_id; - u8 sdus_mask; /* Supported data unit size mask */ - u8 key_size; - u8 reserved; - }; -}; - -#define CMDQ_CRYPTO_CONFIGURATION_ENABLE (1 << 7) -#define CMDQ_CRYPTO_KEY_MAX_SIZE 64 - -/* x-CRYPTOCFG - Crypto Configuration X */ -union cmdq_crypto_cfg_entry { - __le32 reg_val[32]; - struct { - u8 crypto_key[CMDQ_CRYPTO_KEY_MAX_SIZE]; - u8 data_unit_size; - u8 crypto_cap_idx; - u8 reserved_1; - u8 config_enable; - u8 reserved_multi_host; - u8 reserved_2; - u8 vsb[2]; - u8 reserved_3[56]; - }; -}; - struct task_history { u64 task; bool is_dcmd; From e0fd587c43446ec2bf7bf5445584d283130c620f Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:57 +0530 Subject: [PATCH 130/141] Revert "mmc: host: Use request queue pointer for mmc crypto" This reverts commit ffc41530ad3b923d41a64cd890060607de770df0. Signed-off-by: UtsavBalar1231 --- drivers/mmc/core/queue.c | 3 --- drivers/mmc/host/cmdq_hci.c | 7 ------- include/linux/mmc/host.h | 7 ------- 3 files changed, 17 deletions(-) diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index ba338d2a1c00..ecc794323729 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -437,9 +437,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, /* hook for pm qos cmdq init */ if (card->host->cmdq_ops->init) card->host->cmdq_ops->init(card->host); - if (host->cmdq_ops->cqe_crypto_update_queue) - host->cmdq_ops->cqe_crypto_update_queue(host, - mq->queue); mq->thread = kthread_run(mmc_cmdq_thread, mq, "mmc-cmdqd/%d%s", host->index, diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c index f1e4ba86f5d4..87c1cb7abf39 100644 --- a/drivers/mmc/host/cmdq_hci.c +++ b/drivers/mmc/host/cmdq_hci.c @@ -1277,12 +1277,6 @@ static int cmdq_late_init(struct mmc_host *mmc) return 0; } -static void cqhci_crypto_update_queue(struct mmc_host *mmc, - struct request_queue *queue) -{ - //struct cqhci_host *cq_host = mmc->cqe_private; -} - static const struct mmc_cmdq_host_ops cmdq_host_ops = { .init = cmdq_late_init, .enable = cmdq_enable, @@ -1292,7 +1286,6 @@ static const struct mmc_cmdq_host_ops cmdq_host_ops = { .halt = cmdq_halt, .reset = cmdq_reset, .dumpstate = cmdq_dumpstate, - .cqe_crypto_update_queue = cqhci_crypto_update_queue, }; struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index ab2c6af4dca1..3c2b261b9c79 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -122,13 +122,6 @@ struct mmc_cmdq_host_ops { int (*halt)(struct mmc_host *host, bool halt); void (*reset)(struct mmc_host *host, bool soft); void (*dumpstate)(struct mmc_host *host); - /* - * Update the request queue with keyslot manager details. This keyslot - * manager will be used by block crypto to configure the crypto Engine - * for data encryption. - */ - void (*cqe_crypto_update_queue)(struct mmc_host *host, - struct request_queue *queue); }; struct mmc_host_ops { From d2a66f921d629795dacf26f1e59a738c766b3f44 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:57 +0530 Subject: [PATCH 131/141] Revert "Variant ops for UFS crypto and new crypto lib" This reverts commit e62b481e0fdfb96cc198ae6af0ce8b350bf693ef. Signed-off-by: UtsavBalar1231 --- drivers/scsi/ufs/Kconfig | 8 - drivers/scsi/ufs/Makefile | 1 - drivers/scsi/ufs/ufs-qcom.c | 7 - drivers/scsi/ufs/ufshcd-crypto-qti.c | 304 ---------------- drivers/scsi/ufs/ufshcd-crypto-qti.h | 50 --- drivers/scsi/ufs/ufshcd-crypto.c | 8 +- drivers/soc/qcom/Kconfig | 17 - drivers/soc/qcom/Makefile | 2 - drivers/soc/qcom/crypto-qti-common.c | 467 ------------------------- drivers/soc/qcom/crypto-qti-ice-regs.h | 163 --------- drivers/soc/qcom/crypto-qti-platform.h | 47 --- drivers/soc/qcom/crypto-qti-tz.c | 101 ------ drivers/soc/qcom/crypto-qti-tz.h | 71 ---- include/linux/crypto-qti-common.h | 95 ----- 14 files changed, 4 insertions(+), 1337 deletions(-) delete mode 100644 drivers/scsi/ufs/ufshcd-crypto-qti.c delete mode 100644 drivers/scsi/ufs/ufshcd-crypto-qti.h delete mode 100644 drivers/soc/qcom/crypto-qti-common.c delete mode 100644 drivers/soc/qcom/crypto-qti-ice-regs.h delete mode 100644 drivers/soc/qcom/crypto-qti-platform.h delete mode 100644 drivers/soc/qcom/crypto-qti-tz.c delete mode 100644 drivers/soc/qcom/crypto-qti-tz.h delete mode 100644 include/linux/crypto-qti-common.h diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 8fa2313508ea..e63ed53620d7 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -131,11 +131,3 @@ config SCSI_UFS_CRYPTO Enabling this makes it possible for the kernel to use the crypto capabilities of the UFS device (if present) to perform crypto operations on data being transferred to/from the device. - -config SCSI_UFS_CRYPTO_QTI - tristate "Vendor specific UFS Crypto Engine Support" - depends on SCSI_UFS_CRYPTO - help - Enable Vendor Crypto Engine Support in UFS - Enabling this allows kernel to use UFS crypto operations defined - and implemented by QTI. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index fe4c092c006b..93a2e1a10335 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -10,4 +10,3 @@ obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o obj-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufs-qcom-debugfs.o ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o -ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO_QTI) += ufshcd-crypto-qti.o diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 4b76913104f7..c93a6f5048d4 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -30,7 +30,6 @@ #include "ufshci.h" #include "ufs-qcom-debugfs.h" #include "ufs_quirks.h" -#include "ufshcd-crypto-qti.h" #define MAX_PROP_SIZE 32 #define VDDP_REF_CLK_MIN_UV 1200000 @@ -2104,12 +2103,6 @@ static int ufs_qcom_init(struct ufs_hba *hba) /* restore the secure configuration */ ufs_qcom_update_sec_cfg(hba, true); - /* - * Set the vendor specific ops needed for ICE. - * Default implementation if the ops are not set. - */ - ufshcd_crypto_qti_set_vops(hba); - err = ufs_qcom_bus_register(host); if (err) goto out_variant_clear; diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.c b/drivers/scsi/ufs/ufshcd-crypto-qti.c deleted file mode 100644 index f06f2899dcac..000000000000 --- a/drivers/scsi/ufs/ufshcd-crypto-qti.c +++ /dev/null @@ -1,304 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2020, Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include - -#include "ufshcd-crypto-qti.h" - -#define MINIMUM_DUN_SIZE 512 -#define MAXIMUM_DUN_SIZE 65536 - -#define NUM_KEYSLOTS(hba) (hba->crypto_capabilities.config_count + 1) - -static struct ufs_hba_crypto_variant_ops ufshcd_crypto_qti_variant_ops = { - .hba_init_crypto = ufshcd_crypto_qti_init_crypto, - .enable = ufshcd_crypto_qti_enable, - .disable = ufshcd_crypto_qti_disable, - .resume = ufshcd_crypto_qti_resume, - .debug = ufshcd_crypto_qti_debug, -}; - -static uint8_t get_data_unit_size_mask(unsigned int data_unit_size) -{ - if (data_unit_size < MINIMUM_DUN_SIZE || - data_unit_size > MAXIMUM_DUN_SIZE || - !is_power_of_2(data_unit_size)) - return 0; - - return data_unit_size / MINIMUM_DUN_SIZE; -} - -static bool ice_cap_idx_valid(struct ufs_hba *hba, - unsigned int cap_idx) -{ - return cap_idx < hba->crypto_capabilities.num_crypto_cap; -} - -void ufshcd_crypto_qti_enable(struct ufs_hba *hba) -{ - int err = 0; - - if (!ufshcd_hba_is_crypto_supported(hba)) - return; - - err = crypto_qti_enable(hba->crypto_vops->priv); - if (err) { - pr_err("%s: Error enabling crypto, err %d\n", - __func__, err); - ufshcd_crypto_qti_disable(hba); - } - - ufshcd_crypto_enable_spec(hba); - -} - -void ufshcd_crypto_qti_disable(struct ufs_hba *hba) -{ - ufshcd_crypto_disable_spec(hba); - crypto_qti_disable(hba->crypto_vops->priv); -} - - -static int ufshcd_crypto_qti_keyslot_program(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) -{ - struct ufs_hba *hba = keyslot_manager_private(ksm); - int err = 0; - u8 data_unit_mask; - int crypto_alg_id; - - crypto_alg_id = ufshcd_crypto_cap_find(hba, key->crypto_mode, - key->data_unit_size); - - if (!ufshcd_is_crypto_enabled(hba) || - !ufshcd_keyslot_valid(hba, slot) || - !ice_cap_idx_valid(hba, crypto_alg_id)) - return -EINVAL; - - data_unit_mask = get_data_unit_size_mask(key->data_unit_size); - - if (!(data_unit_mask & - hba->crypto_cap_array[crypto_alg_id].sdus_mask)) - return -EINVAL; - - pm_runtime_get_sync(hba->dev); - err = ufshcd_hold(hba, false); - if (err) { - pr_err("%s: failed to enable clocks, err %d\n", __func__, err); - return err; - } - - err = crypto_qti_keyslot_program(hba->crypto_vops->priv, key, slot, - data_unit_mask, crypto_alg_id); - if (err) { - pr_err("%s: failed with error %d\n", __func__, err); - ufshcd_release(hba, false); - pm_runtime_put_sync(hba->dev); - return err; - } - - ufshcd_release(hba, false); - pm_runtime_put_sync(hba->dev); - - return 0; -} - -static int ufshcd_crypto_qti_keyslot_evict(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) -{ - int err = 0; - struct ufs_hba *hba = keyslot_manager_private(ksm); - - if (!ufshcd_is_crypto_enabled(hba) || - !ufshcd_keyslot_valid(hba, slot)) - return -EINVAL; - - pm_runtime_get_sync(hba->dev); - err = ufshcd_hold(hba, false); - if (err) { - pr_err("%s: failed to enable clocks, err %d\n", __func__, err); - return err; - } - - err = crypto_qti_keyslot_evict(hba->crypto_vops->priv, slot); - if (err) { - pr_err("%s: failed with error %d\n", - __func__, err); - ufshcd_release(hba, false); - pm_runtime_put_sync(hba->dev); - return err; - } - - ufshcd_release(hba, false); - pm_runtime_put_sync(hba->dev); - - return err; -} - -static int ufshcd_crypto_qti_derive_raw_secret(struct keyslot_manager *ksm, - const u8 *wrapped_key, - unsigned int wrapped_key_size, - u8 *secret, - unsigned int secret_size) -{ - return crypto_qti_derive_raw_secret(wrapped_key, wrapped_key_size, - secret, secret_size); -} - -static const struct keyslot_mgmt_ll_ops ufshcd_crypto_qti_ksm_ops = { - .keyslot_program = ufshcd_crypto_qti_keyslot_program, - .keyslot_evict = ufshcd_crypto_qti_keyslot_evict, - .derive_raw_secret = ufshcd_crypto_qti_derive_raw_secret, -}; - -static enum blk_crypto_mode_num ufshcd_blk_crypto_qti_mode_num_for_alg_dusize( - enum ufs_crypto_alg ufs_crypto_alg, - enum ufs_crypto_key_size key_size) -{ - /* - * This is currently the only mode that UFS and blk-crypto both support. - */ - if (ufs_crypto_alg == UFS_CRYPTO_ALG_AES_XTS && - key_size == UFS_CRYPTO_KEY_SIZE_256) - return BLK_ENCRYPTION_MODE_AES_256_XTS; - - return BLK_ENCRYPTION_MODE_INVALID; -} - -static int ufshcd_hba_init_crypto_qti_spec(struct ufs_hba *hba, - const struct keyslot_mgmt_ll_ops *ksm_ops) -{ - int cap_idx = 0; - int err = 0; - unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; - enum blk_crypto_mode_num blk_mode_num; - - /* Default to disabling crypto */ - hba->caps &= ~UFSHCD_CAP_CRYPTO; - - if (!(hba->capabilities & MASK_CRYPTO_SUPPORT)) { - err = -ENODEV; - goto out; - } - - /* - * Crypto Capabilities should never be 0, because the - * config_array_ptr > 04h. So we use a 0 value to indicate that - * crypto init failed, and can't be enabled. - */ - hba->crypto_capabilities.reg_val = - cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP)); - hba->crypto_cfg_register = - (u32)hba->crypto_capabilities.config_array_ptr * 0x100; - hba->crypto_cap_array = - devm_kcalloc(hba->dev, - hba->crypto_capabilities.num_crypto_cap, - sizeof(hba->crypto_cap_array[0]), - GFP_KERNEL); - if (!hba->crypto_cap_array) { - err = -ENOMEM; - goto out; - } - - memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported)); - /* - * Store all the capabilities now so that we don't need to repeatedly - * access the device each time we want to know its capabilities - */ - for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap; - cap_idx++) { - hba->crypto_cap_array[cap_idx].reg_val = - cpu_to_le32(ufshcd_readl(hba, - REG_UFS_CRYPTOCAP + - cap_idx * sizeof(__le32))); - blk_mode_num = ufshcd_blk_crypto_qti_mode_num_for_alg_dusize( - hba->crypto_cap_array[cap_idx].algorithm_id, - hba->crypto_cap_array[cap_idx].key_size); - if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID) - continue; - crypto_modes_supported[blk_mode_num] |= - hba->crypto_cap_array[cap_idx].sdus_mask * 512; - } - - hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), ksm_ops, - BLK_CRYPTO_FEATURE_STANDARD_KEYS | - BLK_CRYPTO_FEATURE_WRAPPED_KEYS, - crypto_modes_supported, hba); - - if (!hba->ksm) { - err = -ENOMEM; - goto out; - } - pr_debug("%s: keyslot manager created\n", __func__); - - return 0; - -out: - /* Indicate that init failed by setting crypto_capabilities to 0 */ - hba->crypto_capabilities.reg_val = 0; - return err; -} - -int ufshcd_crypto_qti_init_crypto(struct ufs_hba *hba, - const struct keyslot_mgmt_ll_ops *ksm_ops) -{ - int err = 0; - struct platform_device *pdev = to_platform_device(hba->dev); - void __iomem *mmio_base; - struct resource *mem_res; - - mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "ufs_ice"); - mmio_base = devm_ioremap_resource(hba->dev, mem_res); - if (IS_ERR(mmio_base)) { - pr_err("%s: Unable to get ufs_crypto mmio base\n", __func__); - return PTR_ERR(mmio_base); - } - - err = ufshcd_hba_init_crypto_qti_spec(hba, &ufshcd_crypto_qti_ksm_ops); - if (err) { - pr_err("%s: Error initiating crypto capabilities, err %d\n", - __func__, err); - return err; - } - - err = crypto_qti_init_crypto(hba->dev, - mmio_base, (void **)&hba->crypto_vops->priv); - if (err) { - pr_err("%s: Error initiating crypto, err %d\n", - __func__, err); - } - return err; -} - -int ufshcd_crypto_qti_debug(struct ufs_hba *hba) -{ - return crypto_qti_debug(hba->crypto_vops->priv); -} - -void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba) -{ - return ufshcd_crypto_set_vops(hba, &ufshcd_crypto_qti_variant_ops); -} - -int ufshcd_crypto_qti_resume(struct ufs_hba *hba, - enum ufs_pm_op pm_op) -{ - return crypto_qti_resume(hba->crypto_vops->priv); -} diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.h b/drivers/scsi/ufs/ufshcd-crypto-qti.h deleted file mode 100644 index 1e75ce0a5c92..000000000000 --- a/drivers/scsi/ufs/ufshcd-crypto-qti.h +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _UFSHCD_CRYPTO_QTI_H -#define _UFSHCD_CRYPTO_QTI_H - -#include "ufshcd.h" -#include "ufshcd-crypto.h" - -void ufshcd_crypto_qti_enable(struct ufs_hba *hba); - -void ufshcd_crypto_qti_disable(struct ufs_hba *hba); - -int ufshcd_crypto_qti_init_crypto(struct ufs_hba *hba, - const struct keyslot_mgmt_ll_ops *ksm_ops); - -void ufshcd_crypto_qti_setup_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q); - -void ufshcd_crypto_qti_destroy_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q); - -int ufshcd_crypto_qti_prepare_lrbp_crypto(struct ufs_hba *hba, - struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); - -int ufshcd_crypto_qti_complete_lrbp_crypto(struct ufs_hba *hba, - struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); - -int ufshcd_crypto_qti_debug(struct ufs_hba *hba); - -int ufshcd_crypto_qti_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op); - -int ufshcd_crypto_qti_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op); - -#ifdef CONFIG_SCSI_UFS_CRYPTO_QTI -void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba); -#else -static inline void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba) -{} -#endif /* CONFIG_SCSI_UFS_CRYPTO_QTI */ -#endif /* _UFSHCD_CRYPTO_QTI_H */ diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c index 4fb86fbf097e..28abedfbf609 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.c +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -128,8 +128,8 @@ static int ufshcd_program_key(struct ufs_hba *hba, pm_runtime_get_sync(hba->dev); ufshcd_hold(hba, false); - if (hba->var->vops->program_key) { - err = hba->var->vops->program_key(hba, cfg, slot); + if (hba->vops->program_key) { + err = hba->vops->program_key(hba, cfg, slot); goto out; } @@ -154,14 +154,14 @@ static int ufshcd_program_key(struct ufs_hba *hba, wmb(); err = 0; out: - ufshcd_release(hba, false); + ufshcd_release(hba); pm_runtime_put_sync(hba->dev); return err; } static void ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) { - union ufs_crypto_cfg_entry cfg = { {0} }; + union ufs_crypto_cfg_entry cfg = { 0 }; int err; err = ufshcd_program_key(hba, &cfg, slot); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index fea849d1aa27..888290ee002f 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -1059,23 +1059,6 @@ config QCOM_SOC_INFO based on the chip ID and querying the SoC revision. This information is loaded by the bootloader into SMEM during the boot up process. -config QTI_CRYPTO_COMMON - tristate "Enable common crypto functionality used for FBE" - depends on BLK_INLINE_ENCRYPTION - help - Say 'Y' to enable the common crypto implementation to be used by - different storage layers such as UFS and EMMC for file based hardware - encryption. This library implements API to program and evict - keys using Trustzone or Hardware Key Manager. - -config QTI_CRYPTO_TZ - tristate "Enable Trustzone to be used for FBE" - depends on QTI_CRYPTO_COMMON - help - Say 'Y' to enable routing crypto requests to Trustzone while - performing hardware based file encryption. This means keys are - programmed and managed through SCM calls to TZ where ICE driver - will configure keys. endmenu config QCOM_HYP_CORE_CTL diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 862f1d1c705f..69d21a8a9ef0 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -120,5 +120,3 @@ obj-$(CONFIG_CPU_V7) += idle-v7.o obj-$(CONFIG_MSM_BAM_DMUX) += bam_dmux.o obj-$(CONFIG_WCNSS_CORE) += wcnss/ obj-$(CONFIG_RENAME_BLOCK_DEVICE) += rename_block_device.o -obj-$(CONFIG_QTI_CRYPTO_COMMON) += crypto-qti-common.o -obj-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o diff --git a/drivers/soc/qcom/crypto-qti-common.c b/drivers/soc/qcom/crypto-qti-common.c deleted file mode 100644 index cd2eaef78a10..000000000000 --- a/drivers/soc/qcom/crypto-qti-common.c +++ /dev/null @@ -1,467 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2020, Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include "crypto-qti-ice-regs.h" -#include "crypto-qti-platform.h" - -static int ice_check_fuse_setting(struct crypto_vops_qti_entry *ice_entry) -{ - uint32_t regval; - uint32_t major, minor; - - major = (ice_entry->ice_hw_version & ICE_CORE_MAJOR_REV_MASK) >> - ICE_CORE_MAJOR_REV; - minor = (ice_entry->ice_hw_version & ICE_CORE_MINOR_REV_MASK) >> - ICE_CORE_MINOR_REV; - - /* Check fuse setting is not supported on ICE 3.2 onwards */ - if ((major == 0x03) && (minor >= 0x02)) - return 0; - regval = ice_readl(ice_entry, ICE_REGS_FUSE_SETTING); - regval &= (ICE_FUSE_SETTING_MASK | - ICE_FORCE_HW_KEY0_SETTING_MASK | - ICE_FORCE_HW_KEY1_SETTING_MASK); - - if (regval) { - pr_err("%s: error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n", - __func__); - return -EPERM; - } - return 0; -} - -static int ice_check_version(struct crypto_vops_qti_entry *ice_entry) -{ - uint32_t version, major, minor, step; - - version = ice_readl(ice_entry, ICE_REGS_VERSION); - major = (version & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV; - minor = (version & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV; - step = (version & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV; - - if (major < ICE_CORE_CURRENT_MAJOR_VERSION) { - pr_err("%s: Unknown ICE device at %lu, rev %d.%d.%d\n", - __func__, (unsigned long)ice_entry->icemmio_base, - major, minor, step); - return -ENODEV; - } - - ice_entry->ice_hw_version = version; - - return 0; -} - -int crypto_qti_init_crypto(struct device *dev, void __iomem *mmio_base, - void **priv_data) -{ - int err = 0; - struct crypto_vops_qti_entry *ice_entry; - - ice_entry = devm_kzalloc(dev, - sizeof(struct crypto_vops_qti_entry), - GFP_KERNEL); - if (!ice_entry) - return -ENOMEM; - - ice_entry->icemmio_base = mmio_base; - ice_entry->flags = 0; - - err = ice_check_version(ice_entry); - if (err) { - pr_err("%s: check version failed, err %d\n", __func__, err); - return err; - } - - err = ice_check_fuse_setting(ice_entry); - if (err) - return err; - - *priv_data = (void *)ice_entry; - - return err; -} - -static void ice_low_power_and_optimization_enable( - struct crypto_vops_qti_entry *ice_entry) -{ - uint32_t regval; - - regval = ice_readl(ice_entry, ICE_REGS_ADVANCED_CONTROL); - /* Enable low power mode sequence - * [0]-0,[1]-0,[2]-0,[3]-7,[4]-0,[5]-0,[6]-0,[7]-0, - * Enable CONFIG_CLK_GATING, STREAM2_CLK_GATING and STREAM1_CLK_GATING - */ - regval |= 0x7000; - /* Optimization enable sequence - */ - regval |= 0xD807100; - ice_writel(ice_entry, regval, ICE_REGS_ADVANCED_CONTROL); - /* - * Memory barrier - to ensure write completion before next transaction - */ - wmb(); -} - -static int ice_wait_bist_status(struct crypto_vops_qti_entry *ice_entry) -{ - int count; - uint32_t regval; - - for (count = 0; count < QTI_ICE_MAX_BIST_CHECK_COUNT; count++) { - regval = ice_readl(ice_entry, ICE_REGS_BIST_STATUS); - if (!(regval & ICE_BIST_STATUS_MASK)) - break; - udelay(50); - } - - if (regval) { - pr_err("%s: wait bist status failed, reg %d\n", - __func__, regval); - return -ETIMEDOUT; - } - - return 0; -} - -static void ice_enable_intr(struct crypto_vops_qti_entry *ice_entry) -{ - uint32_t regval; - - regval = ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_MASK); - regval &= ~ICE_REGS_NON_SEC_IRQ_MASK; - ice_writel(ice_entry, regval, ICE_REGS_NON_SEC_IRQ_MASK); - /* - * Memory barrier - to ensure write completion before next transaction - */ - wmb(); -} - -static void ice_disable_intr(struct crypto_vops_qti_entry *ice_entry) -{ - uint32_t regval; - - regval = ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_MASK); - regval |= ICE_REGS_NON_SEC_IRQ_MASK; - ice_writel(ice_entry, regval, ICE_REGS_NON_SEC_IRQ_MASK); - /* - * Memory barrier - to ensure write completion before next transaction - */ - wmb(); -} - -int crypto_qti_enable(void *priv_data) -{ - int err = 0; - struct crypto_vops_qti_entry *ice_entry; - - ice_entry = (struct crypto_vops_qti_entry *) priv_data; - if (!ice_entry) { - pr_err("%s: vops ice data is invalid\n", __func__); - return -EINVAL; - } - - ice_low_power_and_optimization_enable(ice_entry); - err = ice_wait_bist_status(ice_entry); - if (err) - return err; - ice_enable_intr(ice_entry); - - return err; -} - -void crypto_qti_disable(void *priv_data) -{ - struct crypto_vops_qti_entry *ice_entry; - - ice_entry = (struct crypto_vops_qti_entry *) priv_data; - if (!ice_entry) { - pr_err("%s: vops ice data is invalid\n", __func__); - return; - } - - crypto_qti_disable_platform(ice_entry); - ice_disable_intr(ice_entry); -} - -int crypto_qti_resume(void *priv_data) -{ - int err = 0; - struct crypto_vops_qti_entry *ice_entry; - - ice_entry = (struct crypto_vops_qti_entry *) priv_data; - if (!ice_entry) { - pr_err("%s: vops ice data is invalid\n", __func__); - return -EINVAL; - } - - err = ice_wait_bist_status(ice_entry); - - return err; -} - -static void ice_dump_test_bus(struct crypto_vops_qti_entry *ice_entry) -{ - uint32_t regval = 0x1; - uint32_t val; - uint8_t bus_selector; - uint8_t stream_selector; - - pr_err("ICE TEST BUS DUMP:\n"); - - for (bus_selector = 0; bus_selector <= 0xF; bus_selector++) { - regval = 0x1; /* enable test bus */ - regval |= bus_selector << 28; - if (bus_selector == 0xD) - continue; - ice_writel(ice_entry, regval, ICE_REGS_TEST_BUS_CONTROL); - /* - * make sure test bus selector is written before reading - * the test bus register - */ - wmb(); - val = ice_readl(ice_entry, ICE_REGS_TEST_BUS_REG); - pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n", - regval, val); - } - - pr_err("ICE TEST BUS DUMP (ICE_STREAM1_DATAPATH_TEST_BUS):\n"); - for (stream_selector = 0; stream_selector <= 0xF; stream_selector++) { - regval = 0xD0000001; /* enable stream test bus */ - regval |= stream_selector << 16; - ice_writel(ice_entry, regval, ICE_REGS_TEST_BUS_CONTROL); - /* - * make sure test bus selector is written before reading - * the test bus register - */ - wmb(); - val = ice_readl(ice_entry, ICE_REGS_TEST_BUS_REG); - pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n", - regval, val); - } -} - - -int crypto_qti_debug(void *priv_data) -{ - struct crypto_vops_qti_entry *ice_entry; - - ice_entry = (struct crypto_vops_qti_entry *) priv_data; - if (!ice_entry) { - pr_err("%s: vops ice data is invalid\n", __func__); - return -EINVAL; - } - - pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_CONTROL), - ice_readl(ice_entry, ICE_REGS_RESET)); - - pr_err("%s: ICE Version: 0x%08x | ICE FUSE: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_VERSION), - ice_readl(ice_entry, ICE_REGS_FUSE_SETTING)); - - pr_err("%s: ICE Param1: 0x%08x | ICE Param2: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_PARAMETERS_1), - ice_readl(ice_entry, ICE_REGS_PARAMETERS_2)); - - pr_err("%s: ICE Param3: 0x%08x | ICE Param4: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_PARAMETERS_3), - ice_readl(ice_entry, ICE_REGS_PARAMETERS_4)); - - pr_err("%s: ICE Param5: 0x%08x | ICE IRQ STTS: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_PARAMETERS_5), - ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_STTS)); - - pr_err("%s: ICE IRQ MASK: 0x%08x | ICE IRQ CLR: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_MASK), - ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_CLR)); - - pr_err("%s: ICE INVALID CCFG ERR STTS: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_INVALID_CCFG_ERR_STTS)); - - pr_err("%s: ICE BIST Sts: 0x%08x | ICE Bypass Sts: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_BIST_STATUS), - ice_readl(ice_entry, ICE_REGS_BYPASS_STATUS)); - - pr_err("%s: ICE ADV CTRL: 0x%08x | ICE ENDIAN SWAP: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_ADVANCED_CONTROL), - ice_readl(ice_entry, ICE_REGS_ENDIAN_SWAP)); - - pr_err("%s: ICE_STM1_ERR_SYND1: 0x%08x | ICE_STM1_ERR_SYND2: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM1_ERROR_SYNDROME1), - ice_readl(ice_entry, ICE_REGS_STREAM1_ERROR_SYNDROME2)); - - pr_err("%s: ICE_STM2_ERR_SYND1: 0x%08x | ICE_STM2_ERR_SYND2: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM2_ERROR_SYNDROME1), - ice_readl(ice_entry, ICE_REGS_STREAM2_ERROR_SYNDROME2)); - - pr_err("%s: ICE_STM1_COUNTER1: 0x%08x | ICE_STM1_COUNTER2: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS1), - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS2)); - - pr_err("%s: ICE_STM1_COUNTER3: 0x%08x | ICE_STM1_COUNTER4: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS3), - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS4)); - - pr_err("%s: ICE_STM2_COUNTER1: 0x%08x | ICE_STM2_COUNTER2: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS1), - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS2)); - - pr_err("%s: ICE_STM2_COUNTER3: 0x%08x | ICE_STM2_COUNTER4: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS3), - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS4)); - - pr_err("%s: ICE_STM1_CTR5_MSB: 0x%08x | ICE_STM1_CTR5_LSB: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS5_MSB), - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS5_LSB)); - - pr_err("%s: ICE_STM1_CTR6_MSB: 0x%08x | ICE_STM1_CTR6_LSB: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS6_MSB), - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS6_LSB)); - - pr_err("%s: ICE_STM1_CTR7_MSB: 0x%08x | ICE_STM1_CTR7_LSB: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS7_MSB), - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS7_LSB)); - - pr_err("%s: ICE_STM1_CTR8_MSB: 0x%08x | ICE_STM1_CTR8_LSB: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS8_MSB), - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS8_LSB)); - - pr_err("%s: ICE_STM1_CTR9_MSB: 0x%08x | ICE_STM1_CTR9_LSB: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS9_MSB), - ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS9_LSB)); - - pr_err("%s: ICE_STM2_CTR5_MSB: 0x%08x | ICE_STM2_CTR5_LSB: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS5_MSB), - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS5_LSB)); - - pr_err("%s: ICE_STM2_CTR6_MSB: 0x%08x | ICE_STM2_CTR6_LSB: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS6_MSB), - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS6_LSB)); - - pr_err("%s: ICE_STM2_CTR7_MSB: 0x%08x | ICE_STM2_CTR7_LSB: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS7_MSB), - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS7_LSB)); - - pr_err("%s: ICE_STM2_CTR8_MSB: 0x%08x | ICE_STM2_CTR8_LSB: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS8_MSB), - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS8_LSB)); - - pr_err("%s: ICE_STM2_CTR9_MSB: 0x%08x | ICE_STM2_CTR9_LSB: 0x%08x\n", - ice_entry->ice_dev_type, - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS9_MSB), - ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS9_LSB)); - - ice_dump_test_bus(ice_entry); - - return 0; -} - -int crypto_qti_keyslot_program(void *priv_data, - const struct blk_crypto_key *key, - unsigned int slot, - u8 data_unit_mask, int capid) -{ - int err = 0; - struct crypto_vops_qti_entry *ice_entry; - - ice_entry = (struct crypto_vops_qti_entry *) priv_data; - if (!ice_entry) { - pr_err("%s: vops ice data is invalid\n", __func__); - return -EINVAL; - } - - err = crypto_qti_program_key(ice_entry, key, slot, - data_unit_mask, capid); - if (err) { - pr_err("%s: program key failed with error %d\n", __func__, err); - err = crypto_qti_invalidate_key(ice_entry, slot); - if (err) { - pr_err("%s: invalidate key failed with error %d\n", - __func__, err); - return err; - } - } - - return err; -} - -int crypto_qti_keyslot_evict(void *priv_data, unsigned int slot) -{ - int err = 0; - struct crypto_vops_qti_entry *ice_entry; - - ice_entry = (struct crypto_vops_qti_entry *) priv_data; - if (!ice_entry) { - pr_err("%s: vops ice data is invalid\n", __func__); - return -EINVAL; - } - - err = crypto_qti_invalidate_key(ice_entry, slot); - if (err) { - pr_err("%s: invalidate key failed with error %d\n", - __func__, err); - return err; - } - - return err; -} - -int crypto_qti_derive_raw_secret(const u8 *wrapped_key, - unsigned int wrapped_key_size, u8 *secret, - unsigned int secret_size) -{ - int err = 0; - - if (wrapped_key_size <= RAW_SECRET_SIZE) { - pr_err("%s: Invalid wrapped_key_size: %u\n", - __func__, wrapped_key_size); - err = -EINVAL; - return err; - } - if (secret_size != RAW_SECRET_SIZE) { - pr_err("%s: Invalid secret size: %u\n", __func__, secret_size); - err = -EINVAL; - return err; - } - - memcpy(secret, wrapped_key, secret_size); - - return err; -} diff --git a/drivers/soc/qcom/crypto-qti-ice-regs.h b/drivers/soc/qcom/crypto-qti-ice-regs.h deleted file mode 100644 index d9e4cf2ad75f..000000000000 --- a/drivers/soc/qcom/crypto-qti-ice-regs.h +++ /dev/null @@ -1,163 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_ -#define _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_ - -#include - -/* Register bits for ICE version */ -#define ICE_CORE_CURRENT_MAJOR_VERSION 0x03 - -#define ICE_CORE_STEP_REV_MASK 0xFFFF -#define ICE_CORE_STEP_REV 0 /* bit 15-0 */ -#define ICE_CORE_MAJOR_REV_MASK 0xFF000000 -#define ICE_CORE_MAJOR_REV 24 /* bit 31-24 */ -#define ICE_CORE_MINOR_REV_MASK 0xFF0000 -#define ICE_CORE_MINOR_REV 16 /* bit 23-16 */ - -#define ICE_BIST_STATUS_MASK (0xF0000000) /* bits 28-31 */ - -#define ICE_FUSE_SETTING_MASK 0x1 -#define ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 -#define ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 - -/* QTI ICE Registers from SWI */ -#define ICE_REGS_CONTROL 0x0000 -#define ICE_REGS_RESET 0x0004 -#define ICE_REGS_VERSION 0x0008 -#define ICE_REGS_FUSE_SETTING 0x0010 -#define ICE_REGS_PARAMETERS_1 0x0014 -#define ICE_REGS_PARAMETERS_2 0x0018 -#define ICE_REGS_PARAMETERS_3 0x001C -#define ICE_REGS_PARAMETERS_4 0x0020 -#define ICE_REGS_PARAMETERS_5 0x0024 - - -/* QTI ICE v3.X only */ -#define ICE_GENERAL_ERR_STTS 0x0040 -#define ICE_INVALID_CCFG_ERR_STTS 0x0030 -#define ICE_GENERAL_ERR_MASK 0x0044 - - -/* QTI ICE v2.X only */ -#define ICE_REGS_NON_SEC_IRQ_STTS 0x0040 -#define ICE_REGS_NON_SEC_IRQ_MASK 0x0044 - - -#define ICE_REGS_NON_SEC_IRQ_CLR 0x0048 -#define ICE_REGS_STREAM1_ERROR_SYNDROME1 0x0050 -#define ICE_REGS_STREAM1_ERROR_SYNDROME2 0x0054 -#define ICE_REGS_STREAM2_ERROR_SYNDROME1 0x0058 -#define ICE_REGS_STREAM2_ERROR_SYNDROME2 0x005C -#define ICE_REGS_STREAM1_BIST_ERROR_VEC 0x0060 -#define ICE_REGS_STREAM2_BIST_ERROR_VEC 0x0064 -#define ICE_REGS_STREAM1_BIST_FINISH_VEC 0x0068 -#define ICE_REGS_STREAM2_BIST_FINISH_VEC 0x006C -#define ICE_REGS_BIST_STATUS 0x0070 -#define ICE_REGS_BYPASS_STATUS 0x0074 -#define ICE_REGS_ADVANCED_CONTROL 0x1000 -#define ICE_REGS_ENDIAN_SWAP 0x1004 -#define ICE_REGS_TEST_BUS_CONTROL 0x1010 -#define ICE_REGS_TEST_BUS_REG 0x1014 -#define ICE_REGS_STREAM1_COUNTERS1 0x1100 -#define ICE_REGS_STREAM1_COUNTERS2 0x1104 -#define ICE_REGS_STREAM1_COUNTERS3 0x1108 -#define ICE_REGS_STREAM1_COUNTERS4 0x110C -#define ICE_REGS_STREAM1_COUNTERS5_MSB 0x1110 -#define ICE_REGS_STREAM1_COUNTERS5_LSB 0x1114 -#define ICE_REGS_STREAM1_COUNTERS6_MSB 0x1118 -#define ICE_REGS_STREAM1_COUNTERS6_LSB 0x111C -#define ICE_REGS_STREAM1_COUNTERS7_MSB 0x1120 -#define ICE_REGS_STREAM1_COUNTERS7_LSB 0x1124 -#define ICE_REGS_STREAM1_COUNTERS8_MSB 0x1128 -#define ICE_REGS_STREAM1_COUNTERS8_LSB 0x112C -#define ICE_REGS_STREAM1_COUNTERS9_MSB 0x1130 -#define ICE_REGS_STREAM1_COUNTERS9_LSB 0x1134 -#define ICE_REGS_STREAM2_COUNTERS1 0x1200 -#define ICE_REGS_STREAM2_COUNTERS2 0x1204 -#define ICE_REGS_STREAM2_COUNTERS3 0x1208 -#define ICE_REGS_STREAM2_COUNTERS4 0x120C -#define ICE_REGS_STREAM2_COUNTERS5_MSB 0x1210 -#define ICE_REGS_STREAM2_COUNTERS5_LSB 0x1214 -#define ICE_REGS_STREAM2_COUNTERS6_MSB 0x1218 -#define ICE_REGS_STREAM2_COUNTERS6_LSB 0x121C -#define ICE_REGS_STREAM2_COUNTERS7_MSB 0x1220 -#define ICE_REGS_STREAM2_COUNTERS7_LSB 0x1224 -#define ICE_REGS_STREAM2_COUNTERS8_MSB 0x1228 -#define ICE_REGS_STREAM2_COUNTERS8_LSB 0x122C -#define ICE_REGS_STREAM2_COUNTERS9_MSB 0x1230 -#define ICE_REGS_STREAM2_COUNTERS9_LSB 0x1234 - -#define ICE_STREAM1_PREMATURE_LBA_CHANGE (1L << 0) -#define ICE_STREAM2_PREMATURE_LBA_CHANGE (1L << 1) -#define ICE_STREAM1_NOT_EXPECTED_LBO (1L << 2) -#define ICE_STREAM2_NOT_EXPECTED_LBO (1L << 3) -#define ICE_STREAM1_NOT_EXPECTED_DUN (1L << 4) -#define ICE_STREAM2_NOT_EXPECTED_DUN (1L << 5) -#define ICE_STREAM1_NOT_EXPECTED_DUS (1L << 6) -#define ICE_STREAM2_NOT_EXPECTED_DUS (1L << 7) -#define ICE_STREAM1_NOT_EXPECTED_DBO (1L << 8) -#define ICE_STREAM2_NOT_EXPECTED_DBO (1L << 9) -#define ICE_STREAM1_NOT_EXPECTED_ENC_SEL (1L << 10) -#define ICE_STREAM2_NOT_EXPECTED_ENC_SEL (1L << 11) -#define ICE_STREAM1_NOT_EXPECTED_CONF_IDX (1L << 12) -#define ICE_STREAM2_NOT_EXPECTED_CONF_IDX (1L << 13) -#define ICE_STREAM1_NOT_EXPECTED_NEW_TRNS (1L << 14) -#define ICE_STREAM2_NOT_EXPECTED_NEW_TRNS (1L << 15) - -#define ICE_NON_SEC_IRQ_MASK \ - (ICE_STREAM1_PREMATURE_LBA_CHANGE |\ - ICE_STREAM2_PREMATURE_LBA_CHANGE |\ - ICE_STREAM1_NOT_EXPECTED_LBO |\ - ICE_STREAM2_NOT_EXPECTED_LBO |\ - ICE_STREAM1_NOT_EXPECTED_DUN |\ - ICE_STREAM2_NOT_EXPECTED_DUN |\ - ICE_STREAM2_NOT_EXPECTED_DUS |\ - ICE_STREAM1_NOT_EXPECTED_DBO |\ - ICE_STREAM2_NOT_EXPECTED_DBO |\ - ICE_STREAM1_NOT_EXPECTED_ENC_SEL |\ - ICE_STREAM2_NOT_EXPECTED_ENC_SEL |\ - ICE_STREAM1_NOT_EXPECTED_CONF_IDX |\ - ICE_STREAM1_NOT_EXPECTED_NEW_TRNS |\ - ICE_STREAM2_NOT_EXPECTED_NEW_TRNS) - -/* QTI ICE registers from secure side */ -#define ICE_TEST_BUS_REG_SECURE_INTR (1L << 28) -#define ICE_TEST_BUS_REG_NON_SECURE_INTR (1L << 2) - -#define ICE_LUT_KEYS_CRYPTOCFG_R_16 0x4040 -#define ICE_LUT_KEYS_CRYPTOCFG_R_17 0x4044 -#define ICE_LUT_KEYS_CRYPTOCFG_OFFSET 0x80 - - -#define ICE_LUT_KEYS_ICE_SEC_IRQ_STTS 0x6200 -#define ICE_LUT_KEYS_ICE_SEC_IRQ_MASK 0x6204 -#define ICE_LUT_KEYS_ICE_SEC_IRQ_CLR 0x6208 - -#define ICE_STREAM1_PARTIALLY_SET_KEY_USED (1L << 0) -#define ICE_STREAM2_PARTIALLY_SET_KEY_USED (1L << 1) -#define ICE_QTIC_DBG_OPEN_EVENT (1L << 30) -#define ICE_KEYS_RAM_RESET_COMPLETED (1L << 31) - -#define ICE_SEC_IRQ_MASK \ - (ICE_STREAM1_PARTIALLY_SET_KEY_USED |\ - ICE_STREAM2_PARTIALLY_SET_KEY_USED |\ - ICE_QTIC_DBG_OPEN_EVENT | \ - ICE_KEYS_RAM_RESET_COMPLETED) - -#define ice_writel(ice_entry, val, reg) \ - writel_relaxed((val), (ice_entry)->icemmio_base + (reg)) -#define ice_readl(ice_entry, reg) \ - readl_relaxed((ice_entry)->icemmio_base + (reg)) - -#endif /* _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_ */ diff --git a/drivers/soc/qcom/crypto-qti-platform.h b/drivers/soc/qcom/crypto-qti-platform.h deleted file mode 100644 index a37e34895ee7..000000000000 --- a/drivers/soc/qcom/crypto-qti-platform.h +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _CRYPTO_QTI_PLATFORM_H -#define _CRYPTO_QTI_PLATFORM_H - -#include -#include -#include -#include - -#if IS_ENABLED(CONFIG_QTI_CRYPTO_TZ) -int crypto_qti_program_key(struct crypto_vops_qti_entry *ice_entry, - const struct blk_crypto_key *key, unsigned int slot, - unsigned int data_unit_mask, int capid); -int crypto_qti_invalidate_key(struct crypto_vops_qti_entry *ice_entry, - unsigned int slot); -#else -static inline int crypto_qti_program_key( - struct crypto_vops_qti_entry *ice_entry, - const struct blk_crypto_key *key, - unsigned int slot, unsigned int data_unit_mask, - int capid) -{ - return 0; -} -static inline int crypto_qti_invalidate_key( - struct crypto_vops_qti_entry *ice_entry, unsigned int slot) -{ - return 0; -} -#endif /* CONFIG_QTI_CRYPTO_TZ */ - -static inline void crypto_qti_disable_platform( - struct crypto_vops_qti_entry *ice_entry) -{} - -#endif /* _CRYPTO_QTI_PLATFORM_H */ diff --git a/drivers/soc/qcom/crypto-qti-tz.c b/drivers/soc/qcom/crypto-qti-tz.c deleted file mode 100644 index 154a08389274..000000000000 --- a/drivers/soc/qcom/crypto-qti-tz.c +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2020, Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include "crypto-qti-platform.h" -#include "crypto-qti-tz.h" - -unsigned int storage_type = SDCC_CE; - -#define ICE_BUFFER_SIZE 128 - -static uint8_t ice_buffer[ICE_BUFFER_SIZE]; - -int crypto_qti_program_key(struct crypto_vops_qti_entry *ice_entry, - const struct blk_crypto_key *key, - unsigned int slot, unsigned int data_unit_mask, - int capid) -{ - int err = 0; - uint32_t smc_id = 0; - char *tzbuf = NULL; - struct scm_desc desc = {0}; - - tzbuf = ice_buffer; - - memcpy(tzbuf, key->raw, key->size); - dmac_flush_range(tzbuf, tzbuf + key->size); - - smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID; - desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID; - desc.args[0] = slot; - desc.args[1] = virt_to_phys(tzbuf); - desc.args[2] = ICE_BUFFER_SIZE; - desc.args[3] = ICE_CIPHER_MODE_XTS_256; - desc.args[4] = data_unit_mask; - - - err = scm_call2_noretry(smc_id, &desc); - if (err) - pr_err("%s:SCM call Error: 0x%x slot %d\n", - __func__, err, slot); - - return err; -} - -int crypto_qti_invalidate_key( - struct crypto_vops_qti_entry *ice_entry, unsigned int slot) -{ - int err = 0; - uint32_t smc_id = 0; - struct scm_desc desc = {0}; - - smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID; - - desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID; - desc.args[0] = slot; - - err = scm_call2_noretry(smc_id, &desc); - if (err) - pr_err("%s:SCM call Error: 0x%x\n", __func__, err); - return err; -} - -static int crypto_qti_storage_type(unsigned int *s_type) -{ - char boot[20] = {'\0'}; - char *match = (char *)strnstr(saved_command_line, - "androidboot.bootdevice=", - strlen(saved_command_line)); - if (match) { - memcpy(boot, (match + strlen("androidboot.bootdevice=")), - sizeof(boot) - 1); - if (strnstr(boot, "ufs", strlen(boot))) - *s_type = UFS_CE; - - return 0; - } - return -EINVAL; -} - -static int __init crypto_qti_init(void) -{ - return crypto_qti_storage_type(&storage_type); -} - -module_init(crypto_qti_init); diff --git a/drivers/soc/qcom/crypto-qti-tz.h b/drivers/soc/qcom/crypto-qti-tz.h deleted file mode 100644 index bcb946096072..000000000000 --- a/drivers/soc/qcom/crypto-qti-tz.h +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include - -#ifndef _CRYPTO_QTI_TZ_H -#define _CRYPTO_QTI_TZ_H - -#define TZ_ES_INVALIDATE_ICE_KEY 0x3 -#define TZ_ES_CONFIG_SET_ICE_KEY 0x4 -#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE 0x5 -#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE 0x6 - -#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_ID \ - TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \ - TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE) - -#define TZ_ES_CONFIG_SET_ICE_KEY_ID \ - TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \ - TZ_ES_CONFIG_SET_ICE_KEY) - -#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_ID \ - TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \ - TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE) - -#define TZ_ES_INVALIDATE_ICE_KEY_ID \ - TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \ - TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY) - -#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \ - TZ_SYSCALL_CREATE_PARAM_ID_1( \ - TZ_SYSCALL_PARAM_TYPE_VAL) - -#define TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID \ - TZ_SYSCALL_CREATE_PARAM_ID_5( \ - TZ_SYSCALL_PARAM_TYPE_VAL, \ - TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \ - TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL) - -#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_PARAM_ID \ - TZ_SYSCALL_CREATE_PARAM_ID_2( \ - TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL) - -#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_PARAM_ID \ - TZ_SYSCALL_CREATE_PARAM_ID_6( \ - TZ_SYSCALL_PARAM_TYPE_VAL, \ - TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \ - TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \ - TZ_SYSCALL_PARAM_TYPE_VAL) - -enum { - ICE_CIPHER_MODE_XTS_128 = 0, - ICE_CIPHER_MODE_CBC_128 = 1, - ICE_CIPHER_MODE_XTS_256 = 3, - ICE_CIPHER_MODE_CBC_256 = 4 -}; - -#define UFS_CE 10 -#define SDCC_CE 20 -#define UFS_CARD_CE 30 - -#endif /* _CRYPTO_QTI_TZ_H */ diff --git a/include/linux/crypto-qti-common.h b/include/linux/crypto-qti-common.h deleted file mode 100644 index dd4122f4d9a8..000000000000 --- a/include/linux/crypto-qti-common.h +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _CRYPTO_QTI_COMMON_H -#define _CRYPTO_QTI_COMMON_H - -#include -#include -#include -#include -#include - -#define RAW_SECRET_SIZE 32 -#define QTI_ICE_MAX_BIST_CHECK_COUNT 100 -#define QTI_ICE_TYPE_NAME_LEN 8 - -struct crypto_vops_qti_entry { - void __iomem *icemmio_base; - uint32_t ice_hw_version; - uint8_t ice_dev_type[QTI_ICE_TYPE_NAME_LEN]; - uint32_t flags; -}; - -#if IS_ENABLED(CONFIG_QTI_CRYPTO_COMMON) -// crypto-qti-common.c -int crypto_qti_init_crypto(struct device *dev, void __iomem *mmio_base, - void **priv_data); -int crypto_qti_enable(void *priv_data); -void crypto_qti_disable(void *priv_data); -int crypto_qti_resume(void *priv_data); -int crypto_qti_debug(void *priv_data); -int crypto_qti_keyslot_program(void *priv_data, - const struct blk_crypto_key *key, - unsigned int slot, u8 data_unit_mask, - int capid); -int crypto_qti_keyslot_evict(void *priv_data, unsigned int slot); -int crypto_qti_derive_raw_secret(const u8 *wrapped_key, - unsigned int wrapped_key_size, u8 *secret, - unsigned int secret_size); - -#else -static inline int crypto_qti_init_crypto(struct device *dev, - void __iomem *mmio_base, - void **priv_data) -{ - return 0; -} -static inline int crypto_qti_enable(void *priv_data) -{ - return 0; -} -static inline void crypto_qti_disable(void *priv_data) -{ - return 0; -} -static inline int crypto_qti_resume(void *priv_data) -{ - return 0; -} -static inline int crypto_qti_debug(void *priv_data) -{ - return 0; -} -static inline int crypto_qti_keyslot_program(void *priv_data, - const struct blk_crypto_key *key, - unsigned int slot, - u8 data_unit_mask, - int capid) -{ - return 0; -} -static inline int crypto_qti_keyslot_evict(void *priv_data, unsigned int slot) -{ - return 0; -} -static inline int crypto_qti_derive_raw_secret(const u8 *wrapped_key, - unsigned int wrapped_key_size, - u8 *secret, - unsigned int secret_size) -{ - return 0; -} - -#endif /* CONFIG_QTI_CRYPTO_COMMON */ - -#endif /* _CRYPTO_QTI_COMMON_H */ From a3b838cd9e48ea8d03e5873e9d2aadc32b169719 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:58 +0530 Subject: [PATCH 132/141] Revert "Revert "Reverting crypto patches"" This reverts commit b8722ec54a0aac3351c940f723356627e31a8327. Signed-off-by: UtsavBalar1231 --- Documentation/filesystems/fscrypt.rst | 86 ++----- block/blk-crypto-fallback.c | 50 ++-- block/blk-crypto-internal.h | 9 - block/blk-crypto.c | 55 +---- block/keyslot-manager.c | 30 +-- drivers/md/dm-default-key.c | 29 +-- drivers/md/dm.c | 80 +------ drivers/scsi/ufs/ufshcd-crypto.c | 12 +- drivers/scsi/ufs/ufshcd-crypto.h | 8 - drivers/scsi/ufs/ufshcd.c | 2 +- drivers/scsi/ufs/ufshcd.h | 1 - fs/crypto/Kconfig | 22 +- fs/crypto/bio.c | 175 ++++---------- fs/crypto/crypto.c | 57 ++++- fs/crypto/fname.c | 314 ++++++-------------------- fs/crypto/fscrypt_private.h | 87 ++++--- fs/crypto/hkdf.c | 2 +- fs/crypto/hooks.c | 48 +--- fs/crypto/inline_crypt.c | 74 ++---- fs/crypto/keyring.c | 22 +- fs/crypto/keysetup.c | 141 +++++------- fs/crypto/keysetup_v1.c | 21 +- fs/crypto/policy.c | 191 +++++----------- fs/ext4/Kconfig | 1 - fs/ext4/dir.c | 9 +- fs/ext4/ioctl.c | 6 - fs/ext4/namei.c | 1 - fs/ext4/super.c | 5 + fs/f2fs/Kconfig | 1 - fs/f2fs/dir.c | 74 +++--- fs/f2fs/f2fs.h | 14 +- fs/f2fs/file.c | 11 - fs/f2fs/hash.c | 25 +- fs/f2fs/inline.c | 9 +- fs/f2fs/namei.c | 1 - fs/f2fs/super.c | 7 + fs/inode.c | 3 +- fs/libfs.c | 50 ---- fs/ubifs/Kconfig | 1 - fs/ubifs/dir.c | 20 +- include/linux/bio-crypt-ctx.h | 3 - include/linux/blk-crypto.h | 18 +- include/linux/fs.h | 2 - include/linux/fscrypt.h | 134 ++++++----- include/linux/keyslot-manager.h | 14 +- include/uapi/linux/fscrypt.h | 2 - 46 files changed, 608 insertions(+), 1319 deletions(-) diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index dc444b8d3704..471a511c7508 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -234,8 +234,8 @@ HKDF is more flexible, is nonreversible, and evenly distributes entropy from the master key. HKDF is also standardized and widely used by other software, whereas the AES-128-ECB based KDF is ad-hoc. -Per-file encryption keys ------------------------- +Per-file keys +------------- Since each master key can protect many files, it is necessary to "tweak" the encryption of each file so that the same plaintext in two @@ -268,9 +268,9 @@ is greater than that of an AES-256-XTS key. Therefore, to improve performance and save memory, for Adiantum a "direct key" configuration is supported. When the user has enabled this by setting FSCRYPT_POLICY_FLAG_DIRECT_KEY in the fscrypt policy, -per-file encryption keys are not used. Instead, whenever any data -(contents or filenames) is encrypted, the file's 16-byte nonce is -included in the IV. Moreover: +per-file keys are not used. Instead, whenever any data (contents or +filenames) is encrypted, the file's 16-byte nonce is included in the +IV. Moreover: - For v1 encryption policies, the encryption is done directly with the master key. Because of this, users **must not** use the same master @@ -302,16 +302,6 @@ For master keys used for v2 encryption policies, a unique 16-byte "key identifier" is also derived using the KDF. This value is stored in the clear, since it is needed to reliably identify the key itself. -Dirhash keys ------------- - -For directories that are indexed using a secret-keyed dirhash over the -plaintext filenames, the KDF is also used to derive a 128-bit -SipHash-2-4 key per directory in order to hash filenames. This works -just like deriving a per-file encryption key, except that a different -KDF context is used. Currently, only casefolded ("case-insensitive") -encrypted directories use this style of hashing. - Encryption modes and usage ========================== @@ -335,11 +325,11 @@ used. Adiantum is a (primarily) stream cipher-based mode that is fast even on CPUs without dedicated crypto instructions. It's also a true wide-block mode, unlike XTS. It can also eliminate the need to derive -per-file encryption keys. However, it depends on the security of two -primitives, XChaCha12 and AES-256, rather than just one. See the -paper "Adiantum: length-preserving encryption for entry-level -processors" (https://eprint.iacr.org/2018/720.pdf) for more details. -To use Adiantum, CONFIG_CRYPTO_ADIANTUM must be enabled. Also, fast +per-file keys. However, it depends on the security of two primitives, +XChaCha12 and AES-256, rather than just one. See the paper +"Adiantum: length-preserving encryption for entry-level processors" +(https://eprint.iacr.org/2018/720.pdf) for more details. To use +Adiantum, CONFIG_CRYPTO_ADIANTUM must be enabled. Also, fast implementations of ChaCha and NHPoly1305 should be enabled, e.g. CONFIG_CRYPTO_CHACHA20_NEON and CONFIG_CRYPTO_NHPOLY1305_NEON for ARM. @@ -523,9 +513,7 @@ FS_IOC_SET_ENCRYPTION_POLICY can fail with the following errors: - ``EEXIST``: the file is already encrypted with an encryption policy different from the one specified - ``EINVAL``: an invalid encryption policy was specified (invalid - version, mode(s), or flags; or reserved bits were set); or a v1 - encryption policy was specified but the directory has the casefold - flag enabled (casefolding is incompatible with v1 policies). + version, mode(s), or flags; or reserved bits were set) - ``ENOKEY``: a v2 encryption policy was specified, but the key with the specified ``master_key_identifier`` has not been added, nor does the process have the CAP_FOWNER capability in the initial user @@ -633,17 +621,6 @@ from a passphrase or other low-entropy user credential. FS_IOC_GET_ENCRYPTION_PWSALT is deprecated. Instead, prefer to generate and manage any needed salt(s) in userspace. -Getting a file's encryption nonce ---------------------------------- - -Since Linux v5.7, the ioctl FS_IOC_GET_ENCRYPTION_NONCE is supported. -On encrypted files and directories it gets the inode's 16-byte nonce. -On unencrypted files and directories, it fails with ENODATA. - -This ioctl can be useful for automated tests which verify that the -encryption is being done correctly. It is not needed for normal use -of fscrypt. - Adding keys ----------- @@ -661,8 +638,7 @@ follows:: struct fscrypt_add_key_arg { struct fscrypt_key_specifier key_spec; __u32 raw_size; - __u32 key_id; - __u32 __reserved[8]; + __u32 __reserved[9]; __u8 raw[]; }; @@ -679,12 +655,6 @@ follows:: } u; }; - struct fscrypt_provisioning_key_payload { - __u32 type; - __u32 __reserved; - __u8 raw[]; - }; - :c:type:`struct fscrypt_add_key_arg` must be zeroed, then initialized as follows: @@ -707,26 +677,9 @@ as follows: ``Documentation/security/keys/core.rst``). - ``raw_size`` must be the size of the ``raw`` key provided, in bytes. - Alternatively, if ``key_id`` is nonzero, this field must be 0, since - in that case the size is implied by the specified Linux keyring key. - -- ``key_id`` is 0 if the raw key is given directly in the ``raw`` - field. Otherwise ``key_id`` is the ID of a Linux keyring key of - type "fscrypt-provisioning" whose payload is a :c:type:`struct - fscrypt_provisioning_key_payload` whose ``raw`` field contains the - raw key and whose ``type`` field matches ``key_spec.type``. Since - ``raw`` is variable-length, the total size of this key's payload - must be ``sizeof(struct fscrypt_provisioning_key_payload)`` plus the - raw key size. The process must have Search permission on this key. - - Most users should leave this 0 and specify the raw key directly. - The support for specifying a Linux keyring key is intended mainly to - allow re-adding keys after a filesystem is unmounted and re-mounted, - without having to store the raw keys in userspace memory. - ``raw`` is a variable-length field which must contain the actual - key, ``raw_size`` bytes long. Alternatively, if ``key_id`` is - nonzero, then this field is unused. + key, ``raw_size`` bytes long. For v2 policy keys, the kernel keeps track of which user (identified by effective user ID) added the key, and only allows the key to be @@ -748,16 +701,11 @@ FS_IOC_ADD_ENCRYPTION_KEY can fail with the following errors: - ``EACCES``: FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR was specified, but the caller does not have the CAP_SYS_ADMIN capability in the initial - user namespace; or the raw key was specified by Linux key ID but the - process lacks Search permission on the key. + user namespace - ``EDQUOT``: the key quota for this user would be exceeded by adding the key - ``EINVAL``: invalid key size or key specifier type, or reserved bits were set -- ``EKEYREJECTED``: the raw key was specified by Linux key ID, but the - key has the wrong type -- ``ENOKEY``: the raw key was specified by Linux key ID, but no key - exists with that ID - ``ENOTTY``: this type of filesystem does not implement encryption - ``EOPNOTSUPP``: the kernel was not configured with encryption support for this filesystem, or the filesystem superblock has not @@ -1160,8 +1108,8 @@ The context structs contain the same information as the corresponding policy structs (see `Setting an encryption policy`_), except that the context structs also contain a nonce. The nonce is randomly generated by the kernel and is used as KDF input or as a tweak to cause -different files to be encrypted differently; see `Per-file encryption -keys`_ and `DIRECT_KEY policies`_. +different files to be encrypted differently; see `Per-file keys`_ and +`DIRECT_KEY policies`_. Data path changes ----------------- @@ -1213,7 +1161,7 @@ filesystem-specific hash(es) needed for directory lookups. This allows the filesystem to still, with a high degree of confidence, map the filename given in ->lookup() back to a particular directory entry that was previously listed by readdir(). See :c:type:`struct -fscrypt_nokey_name` in the source for more details. +fscrypt_digested_name` in the source for more details. Note that the precise way that filenames are presented to userspace without the key is subject to change in the future. It is only meant diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index ad83e1077ba3..cce3317cba80 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -487,13 +487,21 @@ out: return false; } -/* - * Prepare blk-crypto-fallback for the specified crypto mode. - * Returns -ENOPKG if the needed crypto API support is missing. +/** + * blk_crypto_start_using_mode() - Start using a crypto algorithm on a device + * @mode_num: the blk_crypto_mode we want to allocate ciphers for. + * @data_unit_size: the data unit size that will be used + * @q: the request queue for the device + * + * Upper layers must call this function to ensure that a the crypto API fallback + * has transforms for this algorithm, if they become necessary. + * + * Return: 0 on success and -err on error. */ -int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) +int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, + unsigned int data_unit_size, + struct request_queue *q) { - const char *cipher_str = blk_crypto_modes[mode_num].cipher_str; struct blk_crypto_keyslot *slotp; unsigned int i; int err = 0; @@ -506,20 +514,25 @@ int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) if (likely(smp_load_acquire(&tfms_inited[mode_num]))) return 0; + /* + * If the keyslot manager of the request queue supports this + * crypto mode, then we don't need to allocate this mode. + */ + if (keyslot_manager_crypto_mode_supported(q->ksm, mode_num, + data_unit_size)) + return 0; + mutex_lock(&tfms_init_lock); if (likely(tfms_inited[mode_num])) goto out; for (i = 0; i < blk_crypto_num_keyslots; i++) { slotp = &blk_crypto_keyslots[i]; - slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0); + slotp->tfms[mode_num] = crypto_alloc_skcipher( + blk_crypto_modes[mode_num].cipher_str, + 0, 0); if (IS_ERR(slotp->tfms[mode_num])) { err = PTR_ERR(slotp->tfms[mode_num]); - if (err == -ENOENT) { - pr_warn_once("Missing crypto API support for \"%s\"\n", - cipher_str); - err = -ENOPKG; - } slotp->tfms[mode_num] = NULL; goto out_free_tfms; } @@ -545,6 +558,7 @@ out: mutex_unlock(&tfms_init_lock); return err; } +EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode); int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) { @@ -557,12 +571,6 @@ int blk_crypto_fallback_submit_bio(struct bio **bio_ptr) struct bio_crypt_ctx *bc = bio->bi_crypt_context; struct bio_fallback_crypt_ctx *f_ctx; - if (bc->bc_key->is_hw_wrapped) { - pr_warn_once("HW wrapped key cannot be used with fallback.\n"); - bio->bi_status = BLK_STS_NOTSUPP; - return -EOPNOTSUPP; - } - if (!tfms_inited[bc->bc_key->crypto_mode]) { bio->bi_status = BLK_STS_IOERR; return -EIO; @@ -600,11 +608,9 @@ int __init blk_crypto_fallback_init(void) crypto_mode_supported[i] = 0xFFFFFFFF; crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; - blk_crypto_ksm = keyslot_manager_create( - NULL, blk_crypto_num_keyslots, - &blk_crypto_ksm_ll_ops, - BLK_CRYPTO_FEATURE_STANDARD_KEYS, - crypto_mode_supported, NULL); + blk_crypto_ksm = keyslot_manager_create(blk_crypto_num_keyslots, + &blk_crypto_ksm_ll_ops, + crypto_mode_supported, NULL); if (!blk_crypto_ksm) return -ENOMEM; diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h index 4da998c803f2..40d826b743da 100644 --- a/block/blk-crypto-internal.h +++ b/block/blk-crypto-internal.h @@ -19,8 +19,6 @@ extern const struct blk_crypto_mode blk_crypto_modes[]; #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK -int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num); - int blk_crypto_fallback_submit_bio(struct bio **bio_ptr); bool blk_crypto_queue_decrypt_bio(struct bio *bio); @@ -31,13 +29,6 @@ bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc); #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ -static inline int -blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) -{ - pr_warn_once("crypto API fallback is disabled\n"); - return -ENOPKG; -} - static inline bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc) { return false; diff --git a/block/blk-crypto.c b/block/blk-crypto.c index f56bbec1132f..a8de0d9680e0 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -109,8 +109,7 @@ int blk_crypto_submit_bio(struct bio **bio_ptr) /* Get device keyslot if supported */ if (keyslot_manager_crypto_mode_supported(q->ksm, bc->bc_key->crypto_mode, - bc->bc_key->data_unit_size, - bc->bc_key->is_hw_wrapped)) { + bc->bc_key->data_unit_size)) { err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm); if (!err) return 0; @@ -176,9 +175,7 @@ bool blk_crypto_endio(struct bio *bio) * @raw_key_size: Size of raw key. Must be at least the required size for the * chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed * to be longer than the mode's actual key size, in order to - * support inline encryption hardware that accepts wrapped keys. - * @is_hw_wrapped has to be set for such keys) - * @is_hw_wrapped: Denotes @raw_key is wrapped. + * support inline encryption hardware that accepts wrapped keys.) * @crypto_mode: identifier for the encryption algorithm to use * @data_unit_size: the data unit size to use for en/decryption * @@ -187,7 +184,6 @@ bool blk_crypto_endio(struct bio *bio) */ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, unsigned int raw_key_size, - bool is_hw_wrapped, enum blk_crypto_mode_num crypto_mode, unsigned int data_unit_size) { @@ -202,14 +198,9 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE); mode = &blk_crypto_modes[crypto_mode]; - if (is_hw_wrapped) { - if (raw_key_size < mode->keysize || - raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE) - return -EINVAL; - } else { - if (raw_key_size != mode->keysize) - return -EINVAL; - } + if (raw_key_size < mode->keysize || + raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE) + return -EINVAL; if (!is_power_of_2(data_unit_size)) return -EINVAL; @@ -218,7 +209,6 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, blk_key->data_unit_size = data_unit_size; blk_key->data_unit_size_bits = ilog2(data_unit_size); blk_key->size = raw_key_size; - blk_key->is_hw_wrapped = is_hw_wrapped; memcpy(blk_key->raw, raw_key, raw_key_size); /* @@ -233,38 +223,6 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, } EXPORT_SYMBOL_GPL(blk_crypto_init_key); -/** - * blk_crypto_start_using_mode() - Start using blk-crypto on a device - * @crypto_mode: the crypto mode that will be used - * @data_unit_size: the data unit size that will be used - * @is_hw_wrapped_key: whether the key will be hardware-wrapped - * @q: the request queue for the device - * - * Upper layers must call this function to ensure that either the hardware - * supports the needed crypto settings, or the crypto API fallback has - * transforms for the needed mode allocated and ready to go. - * - * Return: 0 on success; -ENOPKG if the hardware doesn't support the crypto - * settings and blk-crypto-fallback is either disabled or the needed - * algorithm is disabled in the crypto API; or another -errno code. - */ -int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size, - bool is_hw_wrapped_key, - struct request_queue *q) -{ - if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode, - data_unit_size, - is_hw_wrapped_key)) - return 0; - if (is_hw_wrapped_key) { - pr_warn_once("hardware doesn't support wrapped keys\n"); - return -EOPNOTSUPP; - } - return blk_crypto_fallback_start_using_mode(crypto_mode); -} -EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode); - /** * blk_crypto_evict_key() - Evict a key from any inline encryption hardware * it may have been programmed into @@ -285,8 +243,7 @@ int blk_crypto_evict_key(struct request_queue *q, { if (q->ksm && keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode, - key->data_unit_size, - key->is_hw_wrapped)) + key->data_unit_size)) return keyslot_manager_evict_key(q->ksm, key); return blk_crypto_fallback_evict_key(key); diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c index 1999c503b954..7e42813c9de0 100644 --- a/block/keyslot-manager.c +++ b/block/keyslot-manager.c @@ -43,7 +43,6 @@ struct keyslot { struct keyslot_manager { unsigned int num_slots; struct keyslot_mgmt_ll_ops ksm_ll_ops; - unsigned int features; unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX]; void *ll_priv_data; @@ -79,8 +78,6 @@ static inline bool keyslot_manager_is_passthrough(struct keyslot_manager *ksm) * @ksm_ll_ops: The struct keyslot_mgmt_ll_ops for the device that this keyslot * manager will use to perform operations like programming and * evicting keys. - * @features: The supported features as a bitmask of BLK_CRYPTO_FEATURE_* flags. - * Most drivers should set BLK_CRYPTO_FEATURE_STANDARD_KEYS here. * @crypto_mode_supported: Array of size BLK_ENCRYPTION_MODE_MAX of * bitmasks that represents whether a crypto mode * and data unit size are supported. The i'th bit @@ -98,7 +95,6 @@ static inline bool keyslot_manager_is_passthrough(struct keyslot_manager *ksm) */ struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, const struct keyslot_mgmt_ll_ops *ksm_ll_ops, - unsigned int features, const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data) { @@ -120,7 +116,6 @@ struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, ksm->num_slots = num_slots; ksm->ksm_ll_ops = *ksm_ll_ops; - ksm->features = features; memcpy(ksm->crypto_mode_supported, crypto_mode_supported, sizeof(ksm->crypto_mode_supported)); ksm->ll_priv_data = ll_priv_data; @@ -326,24 +321,23 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot) } /** - * keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode / - * data unit size / is_hw_wrapped_key - * combination is supported by a ksm. + * keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode/data + * unit size combination is supported + * by a ksm. * @ksm: The keyslot manager to check * @crypto_mode: The crypto mode to check for. * @data_unit_size: The data_unit_size for the mode. - * @is_hw_wrapped_key: Whether a hardware-wrapped key will be used. * * Calls and returns the result of the crypto_mode_supported function specified * by the ksm. * * Context: Process context. - * Return: Whether or not this ksm supports the specified crypto settings. + * Return: Whether or not this ksm supports the specified crypto_mode/ + * data_unit_size combo. */ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size, - bool is_hw_wrapped_key) + unsigned int data_unit_size) { if (!ksm) return false; @@ -351,13 +345,6 @@ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, return false; if (WARN_ON(!is_power_of_2(data_unit_size))) return false; - if (is_hw_wrapped_key) { - if (!(ksm->features & BLK_CRYPTO_FEATURE_WRAPPED_KEYS)) - return false; - } else { - if (!(ksm->features & BLK_CRYPTO_FEATURE_STANDARD_KEYS)) - return false; - } return ksm->crypto_mode_supported[crypto_mode] & data_unit_size; } @@ -470,7 +457,6 @@ EXPORT_SYMBOL_GPL(keyslot_manager_destroy); /** * keyslot_manager_create_passthrough() - Create a passthrough keyslot manager * @ksm_ll_ops: The struct keyslot_mgmt_ll_ops - * @features: Bitmask of BLK_CRYPTO_FEATURE_* flags * @crypto_mode_supported: Bitmasks for supported encryption modes * @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops. * @@ -487,7 +473,6 @@ EXPORT_SYMBOL_GPL(keyslot_manager_destroy); */ struct keyslot_manager *keyslot_manager_create_passthrough( const struct keyslot_mgmt_ll_ops *ksm_ll_ops, - unsigned int features, const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data) { @@ -498,7 +483,6 @@ struct keyslot_manager *keyslot_manager_create_passthrough( return NULL; ksm->ksm_ll_ops = *ksm_ll_ops; - ksm->features = features; memcpy(ksm->crypto_mode_supported, crypto_mode_supported, sizeof(ksm->crypto_mode_supported)); ksm->ll_priv_data = ll_priv_data; @@ -526,13 +510,11 @@ void keyslot_manager_intersect_modes(struct keyslot_manager *parent, if (child) { unsigned int i; - parent->features &= child->features; for (i = 0; i < ARRAY_SIZE(child->crypto_mode_supported); i++) { parent->crypto_mode_supported[i] &= child->crypto_mode_supported[i]; } } else { - parent->features = 0; memset(parent->crypto_mode_supported, 0, sizeof(parent->crypto_mode_supported)); } diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 3d0bd0645f7a..43a30c076aa6 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -9,7 +9,7 @@ #define DM_MSG_PREFIX "default-key" -#define DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE 128 +#define DM_DEFAULT_KEY_MAX_KEY_SIZE 64 #define SECTOR_SIZE (1 << SECTOR_SHIFT) @@ -49,7 +49,6 @@ struct default_key_c { unsigned int sector_size; unsigned int sector_bits; struct blk_crypto_key key; - bool is_hw_wrapped; }; static const struct dm_default_key_cipher * @@ -85,7 +84,7 @@ static int default_key_ctr_optional(struct dm_target *ti, struct default_key_c *dkc = ti->private; struct dm_arg_set as; static const struct dm_arg _args[] = { - {0, 4, "Invalid number of feature args"}, + {0, 3, "Invalid number of feature args"}, }; unsigned int opt_params; const char *opt_string; @@ -118,8 +117,6 @@ static int default_key_ctr_optional(struct dm_target *ti, } } else if (!strcmp(opt_string, "iv_large_sectors")) { iv_large_sectors = true; - } else if (!strcmp(opt_string, "wrappedkey_v0")) { - dkc->is_hw_wrapped = true; } else { ti->error = "Invalid feature arguments"; return -EINVAL; @@ -147,8 +144,7 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct default_key_c *dkc; const struct dm_default_key_cipher *cipher; - u8 raw_key[DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE]; - unsigned int raw_key_size; + u8 raw_key[DM_DEFAULT_KEY_MAX_KEY_SIZE]; unsigned long long tmpll; char dummy; int err; @@ -180,15 +176,12 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) } /* */ - raw_key_size = strlen(argv[1]); - if (raw_key_size > 2 * DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE || - raw_key_size % 2) { - ti->error = "Invalid keysize"; + if (strlen(argv[1]) != 2 * cipher->key_size) { + ti->error = "Incorrect key size for cipher"; err = -EINVAL; goto bad; } - raw_key_size /= 2; - if (hex2bin(raw_key, argv[1], raw_key_size) != 0) { + if (hex2bin(raw_key, argv[1], cipher->key_size) != 0) { ti->error = "Malformed key string"; err = -EINVAL; goto bad; @@ -233,15 +226,13 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) } err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size, - dkc->is_hw_wrapped, cipher->mode_num, - dkc->sector_size); + cipher->mode_num, dkc->sector_size); if (err) { ti->error = "Error initializing blk-crypto key"; goto bad; } err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size, - dkc->is_hw_wrapped, dkc->dev->bdev->bd_queue); if (err) { ti->error = "Error starting to use blk-crypto"; @@ -328,8 +319,6 @@ static void default_key_status(struct dm_target *ti, status_type_t type, num_feature_args += !!ti->num_discard_bios; if (dkc->sector_size != SECTOR_SIZE) num_feature_args += 2; - if (dkc->is_hw_wrapped) - num_feature_args += 1; if (num_feature_args != 0) { DMEMIT(" %d", num_feature_args); if (ti->num_discard_bios) @@ -338,8 +327,6 @@ static void default_key_status(struct dm_target *ti, status_type_t type, DMEMIT(" sector_size:%u", dkc->sector_size); DMEMIT(" iv_large_sectors"); } - if (dkc->is_hw_wrapped) - DMEMIT(" wrappedkey_v0"); } break; } @@ -385,7 +372,7 @@ static void default_key_io_hints(struct dm_target *ti, static struct target_type default_key_target = { .name = "default-key", - .version = {2, 1, 0}, + .version = {2, 0, 0}, .module = THIS_MODULE, .ctr = default_key_ctr, .dtr = default_key_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bb78417a249b..0189f70e87a0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2088,97 +2088,21 @@ static int dm_keyslot_evict(struct keyslot_manager *ksm, return args.err; } -struct dm_derive_raw_secret_args { - const u8 *wrapped_key; - unsigned int wrapped_key_size; - u8 *secret; - unsigned int secret_size; - int err; -}; - -static int dm_derive_raw_secret_callback(struct dm_target *ti, - struct dm_dev *dev, sector_t start, - sector_t len, void *data) -{ - struct dm_derive_raw_secret_args *args = data; - struct request_queue *q = dev->bdev->bd_queue; - - if (!args->err) - return 0; - - if (!q->ksm) { - args->err = -EOPNOTSUPP; - return 0; - } - - args->err = keyslot_manager_derive_raw_secret(q->ksm, args->wrapped_key, - args->wrapped_key_size, - args->secret, - args->secret_size); - /* Try another device in case this fails. */ - return 0; -} - -/* - * Retrieve the raw_secret from the underlying device. Given that - * only only one raw_secret can exist for a particular wrappedkey, - * retrieve it only from the first device that supports derive_raw_secret() - */ -static int dm_derive_raw_secret(struct keyslot_manager *ksm, - const u8 *wrapped_key, - unsigned int wrapped_key_size, - u8 *secret, unsigned int secret_size) -{ - struct mapped_device *md = keyslot_manager_private(ksm); - struct dm_derive_raw_secret_args args = { - .wrapped_key = wrapped_key, - .wrapped_key_size = wrapped_key_size, - .secret = secret, - .secret_size = secret_size, - .err = -EOPNOTSUPP, - }; - struct dm_table *t; - int srcu_idx; - int i; - struct dm_target *ti; - - t = dm_get_live_table(md, &srcu_idx); - if (!t) - return -EOPNOTSUPP; - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - if (!ti->type->iterate_devices) - continue; - ti->type->iterate_devices(ti, dm_derive_raw_secret_callback, - &args); - if (!args.err) - break; - } - dm_put_live_table(md, srcu_idx); - return args.err; -} - static struct keyslot_mgmt_ll_ops dm_ksm_ll_ops = { .keyslot_evict = dm_keyslot_evict, - .derive_raw_secret = dm_derive_raw_secret, }; static int dm_init_inline_encryption(struct mapped_device *md) { - unsigned int features; unsigned int mode_masks[BLK_ENCRYPTION_MODE_MAX]; /* - * Initially declare support for all crypto settings. Anything - * unsupported by a child device will be removed later when calculating - * the device restrictions. + * Start out with all crypto mode support bits set. Any unsupported + * bits will be cleared later when calculating the device restrictions. */ - features = BLK_CRYPTO_FEATURE_STANDARD_KEYS | - BLK_CRYPTO_FEATURE_WRAPPED_KEYS; memset(mode_masks, 0xFF, sizeof(mode_masks)); md->queue->ksm = keyslot_manager_create_passthrough(&dm_ksm_ll_ops, - features, mode_masks, md); if (!md->queue->ksm) return -ENOMEM; diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c index 28abedfbf609..276b49ad13be 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.c +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -337,9 +337,7 @@ int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba, ufshcd_clear_all_keyslots(hba); - hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), - ksm_ops, - BLK_CRYPTO_FEATURE_STANDARD_KEYS, + hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), ksm_ops, crypto_modes_supported, hba); if (!hba->ksm) { @@ -460,14 +458,6 @@ int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, return ufshcd_prepare_lrbp_crypto_spec(hba, cmd, lrbp); } -int ufshcd_map_sg_crypto(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) -{ - if (hba->crypto_vops && hba->crypto_vops->map_sg_crypto) - return hba->crypto_vops->map_sg_crypto(hba, lrbp); - - return 0; -} - int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba, struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp) diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h index f223a06fbf93..95f37c9f7672 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.h +++ b/drivers/scsi/ufs/ufshcd-crypto.h @@ -80,8 +80,6 @@ int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); -int ufshcd_map_sg_crypto(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); - int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba, struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); @@ -135,12 +133,6 @@ static inline int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, return 0; } -static inline int ufshcd_map_sg_crypto(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp) -{ - return 0; -} - static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp) { return false; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index abedcca0e793..55f2f1645c1f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3321,7 +3321,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) lrbp->utr_descriptor_ptr->prd_table_length = 0; } - return ufshcd_map_sg_crypto(hba, lrbp); + return 0; } /** diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 7621eaf51404..790e2be33995 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -417,7 +417,6 @@ struct ufs_hba_crypto_variant_ops { int (*prepare_lrbp_crypto)(struct ufs_hba *hba, struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); - int (*map_sg_crypto)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); int (*complete_lrbp_crypto)(struct ufs_hba *hba, struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index 97c0a113f4cc..0701bb90f99c 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig @@ -1,8 +1,13 @@ config FS_ENCRYPTION bool "FS Encryption (Per-file encryption)" select CRYPTO - select CRYPTO_HASH - select CRYPTO_BLKCIPHER + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_ECB + select CRYPTO_XTS + select CRYPTO_CTS + select CRYPTO_SHA512 + select CRYPTO_HMAC select KEYS help Enable encryption of files and directories. This @@ -11,19 +16,6 @@ config FS_ENCRYPTION decrypted pages in the page cache. Currently Ext4, F2FS and UBIFS make use of this feature. -# Filesystems supporting encryption must select this if FS_ENCRYPTION. This -# allows the algorithms to be built as modules when all the filesystems are. -config FS_ENCRYPTION_ALGS - tristate - select CRYPTO_AES - select CRYPTO_CBC - select CRYPTO_CTS - select CRYPTO_ECB - select CRYPTO_HMAC - select CRYPTO_SHA256 - select CRYPTO_SHA512 - select CRYPTO_XTS - config FS_ENCRYPTION_INLINE_CRYPT bool "Enable fscrypt to use inline crypto" depends on FS_ENCRYPTION && BLK_INLINE_ENCRYPTION diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index aa36d245f548..9601e4bfc004 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -41,154 +41,63 @@ void fscrypt_decrypt_bio(struct bio *bio) } EXPORT_SYMBOL(fscrypt_decrypt_bio); -static int fscrypt_zeroout_range_inlinecrypt(const struct inode *inode, - pgoff_t lblk, - sector_t pblk, unsigned int len) -{ - const unsigned int blockbits = inode->i_blkbits; - const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits; - const unsigned int blocks_per_page = 1 << blocks_per_page_bits; - unsigned int i; - struct bio *bio; - int ret, err; - - /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ - bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); - - do { - bio_set_dev(bio, inode->i_sb->s_bdev); - bio->bi_iter.bi_sector = pblk << (blockbits - 9); - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); - - i = 0; - do { - unsigned int blocks_this_page = - min(len, blocks_per_page); - unsigned int bytes_this_page = - blocks_this_page << blockbits; - - ret = bio_add_page(bio, ZERO_PAGE(0), - bytes_this_page, 0); - if (WARN_ON(ret != bytes_this_page)) { - err = -EIO; - goto out; - } - lblk += blocks_this_page; - pblk += blocks_this_page; - len -= blocks_this_page; - } while (++i != BIO_MAX_PAGES && len != 0); - - err = submit_bio_wait(bio); - if (err) - goto out; - bio_reset(bio); - } while (len != 0); - err = 0; -out: - bio_put(bio); - return err; -} - -/** - * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file - * @inode: the file's inode - * @lblk: the first file logical block to zero out - * @pblk: the first filesystem physical block to zero out - * @len: number of blocks to zero out - * - * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write - * ciphertext blocks which decrypt to the all-zeroes block. The blocks must be - * both logically and physically contiguous. It's also assumed that the - * filesystem only uses a single block device, ->s_bdev. - * - * Note that since each block uses a different IV, this involves writing a - * different ciphertext to each block; we can't simply reuse the same one. - * - * Return: 0 on success; -errno on failure. - */ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, - sector_t pblk, unsigned int len) + sector_t pblk, unsigned int len) { const unsigned int blockbits = inode->i_blkbits; const unsigned int blocksize = 1 << blockbits; - const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits; - const unsigned int blocks_per_page = 1 << blocks_per_page_bits; - struct page *pages[16]; /* write up to 16 pages at a time */ - unsigned int nr_pages; - unsigned int i; - unsigned int offset; + const bool inlinecrypt = fscrypt_inode_uses_inline_crypto(inode); + struct page *ciphertext_page; struct bio *bio; - int ret, err; + int ret, err = 0; - if (len == 0) - return 0; - - if (fscrypt_inode_uses_inline_crypto(inode)) - return fscrypt_zeroout_range_inlinecrypt(inode, lblk, pblk, - len); - - BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_PAGES); - nr_pages = min_t(unsigned int, ARRAY_SIZE(pages), - (len + blocks_per_page - 1) >> blocks_per_page_bits); - - /* - * We need at least one page for ciphertext. Allocate the first one - * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail. - * - * Any additional page allocations are allowed to fail, as they only - * help performance, and waiting on the mempool for them could deadlock. - */ - for (i = 0; i < nr_pages; i++) { - pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS : - GFP_NOWAIT | __GFP_NOWARN); - if (!pages[i]) - break; + if (inlinecrypt) { + ciphertext_page = ZERO_PAGE(0); + } else { + ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT); + if (!ciphertext_page) + return -ENOMEM; } - nr_pages = i; - if (WARN_ON(nr_pages <= 0)) - return -EINVAL; - /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ - bio = bio_alloc(GFP_NOFS, nr_pages); + while (len--) { + if (!inlinecrypt) { + err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, + ZERO_PAGE(0), ciphertext_page, + blocksize, 0, GFP_NOFS); + if (err) + goto errout; + } + + bio = bio_alloc(GFP_NOWAIT, 1); + if (!bio) { + err = -ENOMEM; + goto errout; + } + fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOIO); - do { bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - 9); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - - i = 0; - offset = 0; - do { - err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, - ZERO_PAGE(0), pages[i], - blocksize, offset, GFP_NOFS); - if (err) - goto out; - lblk++; - pblk++; - len--; - offset += blocksize; - if (offset == PAGE_SIZE || len == 0) { - ret = bio_add_page(bio, pages[i++], offset, 0); - if (WARN_ON(ret != offset)) { - err = -EIO; - goto out; - } - offset = 0; - } - } while (i != nr_pages && len != 0); - + ret = bio_add_page(bio, ciphertext_page, blocksize, 0); + if (WARN_ON(ret != blocksize)) { + /* should never happen! */ + bio_put(bio); + err = -EIO; + goto errout; + } err = submit_bio_wait(bio); + if (err == 0 && bio->bi_status) + err = -EIO; + bio_put(bio); if (err) - goto out; - bio_reset(bio); - } while (len != 0); + goto errout; + lblk++; + pblk++; + } err = 0; -out: - bio_put(bio); - for (i = 0; i < nr_pages; i++) - fscrypt_free_bounce_page(pages[i]); +errout: + if (!inlinecrypt) + fscrypt_free_bounce_page(ciphertext_page); return err; } EXPORT_SYMBOL(fscrypt_zeroout_range); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index ed6ea28dbdad..41b4fe15b4b6 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include "fscrypt_private.h" @@ -137,7 +139,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, * multiple of the filesystem's block size. * @offs: Byte offset within @page of the first block to encrypt. Must be * a multiple of the filesystem's block size. - * @gfp_flags: Memory allocation flags. See details below. + * @gfp_flags: Memory allocation flags * * A new bounce page is allocated, and the specified block(s) are encrypted into * it. In the bounce page, the ciphertext block(s) will be located at the same @@ -147,11 +149,6 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, * * This is for use by the filesystem's ->writepages() method. * - * The bounce page allocation is mempool-backed, so it will always succeed when - * @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS. However, - * only the first page of each bio can be allocated this way. To prevent - * deadlocks, for any additional pages a mask like GFP_NOWAIT must be used. - * * Return: the new encrypted bounce page on success; an ERR_PTR() on failure */ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, @@ -288,6 +285,54 @@ int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, } EXPORT_SYMBOL(fscrypt_decrypt_block_inplace); +/* + * Validate dentries in encrypted directories to make sure we aren't potentially + * caching stale dentries after a key has been added. + */ +static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) +{ + struct dentry *dir; + int err; + int valid; + + /* + * Plaintext names are always valid, since fscrypt doesn't support + * reverting to ciphertext names without evicting the directory's inode + * -- which implies eviction of the dentries in the directory. + */ + if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME)) + return 1; + + /* + * Ciphertext name; valid if the directory's key is still unavailable. + * + * Although fscrypt forbids rename() on ciphertext names, we still must + * use dget_parent() here rather than use ->d_parent directly. That's + * because a corrupted fs image may contain directory hard links, which + * the VFS handles by moving the directory's dentry tree in the dcache + * each time ->lookup() finds the directory and it already has a dentry + * elsewhere. Thus ->d_parent can be changing, and we must safely grab + * a reference to some ->d_parent to prevent it from being freed. + */ + + if (flags & LOOKUP_RCU) + return -ECHILD; + + dir = dget_parent(dentry); + err = fscrypt_get_encryption_info(d_inode(dir)); + valid = !fscrypt_has_encryption_key(d_inode(dir)); + dput(dir); + + if (err < 0) + return err; + + return valid; +} + +const struct dentry_operations fscrypt_d_ops = { + .d_revalidate = fscrypt_d_revalidate, +}; + /** * fscrypt_initialize() - allocate major buffers for fs encryption. * @cop_flags: fscrypt operations flags diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 63bfe5e8accd..3aafddaab703 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -11,88 +11,10 @@ * This has not yet undergone a rigorous security audit. */ -#include #include -#include -#include #include #include "fscrypt_private.h" -/** - * struct fscrypt_nokey_name - identifier for directory entry when key is absent - * - * When userspace lists an encrypted directory without access to the key, the - * filesystem must present a unique "no-key name" for each filename that allows - * it to find the directory entry again if requested. Naively, that would just - * mean using the ciphertext filenames. However, since the ciphertext filenames - * can contain illegal characters ('\0' and '/'), they must be encoded in some - * way. We use base64. But that can cause names to exceed NAME_MAX (255 - * bytes), so we also need to use a strong hash to abbreviate long names. - * - * The filesystem may also need another kind of hash, the "dirhash", to quickly - * find the directory entry. Since filesystems normally compute the dirhash - * over the on-disk filename (i.e. the ciphertext), it's not computable from - * no-key names that abbreviate the ciphertext using the strong hash to fit in - * NAME_MAX. It's also not computable if it's a keyed hash taken over the - * plaintext (but it may still be available in the on-disk directory entry); - * casefolded directories use this type of dirhash. At least in these cases, - * each no-key name must include the name's dirhash too. - * - * To meet all these requirements, we base64-encode the following - * variable-length structure. It contains the dirhash, or 0's if the filesystem - * didn't provide one; up to 149 bytes of the ciphertext name; and for - * ciphertexts longer than 149 bytes, also the SHA-256 of the remaining bytes. - * - * This ensures that each no-key name contains everything needed to find the - * directory entry again, contains only legal characters, doesn't exceed - * NAME_MAX, is unambiguous unless there's a SHA-256 collision, and that we only - * take the performance hit of SHA-256 on very long filenames (which are rare). - */ -struct fscrypt_nokey_name { - u32 dirhash[2]; - u8 bytes[149]; - u8 sha256[SHA256_DIGEST_SIZE]; -}; /* 189 bytes => 252 bytes base64-encoded, which is <= NAME_MAX (255) */ - -/* - * Decoded size of max-size nokey name, i.e. a name that was abbreviated using - * the strong hash and thus includes the 'sha256' field. This isn't simply - * sizeof(struct fscrypt_nokey_name), as the padding at the end isn't included. - */ -#define FSCRYPT_NOKEY_NAME_MAX offsetofend(struct fscrypt_nokey_name, sha256) - -static struct crypto_shash *sha256_hash_tfm; - -static int fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result) -{ - struct crypto_shash *tfm = READ_ONCE(sha256_hash_tfm); - - if (unlikely(!tfm)) { - struct crypto_shash *prev_tfm; - - tfm = crypto_alloc_shash("sha256", 0, 0); - if (IS_ERR(tfm)) { - fscrypt_err(NULL, - "Error allocating SHA-256 transform: %ld", - PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - prev_tfm = cmpxchg(&sha256_hash_tfm, NULL, tfm); - if (prev_tfm) { - crypto_free_shash(tfm); - tfm = prev_tfm; - } - } - { - SHASH_DESC_ON_STACK(desc, tfm); - - desc->tfm = tfm; - desc->flags = 0; - - return crypto_shash_digest(desc, data, data_len, result); - } -} - static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) { if (str->len == 1 && str->name[0] == '.') @@ -105,19 +27,19 @@ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) } /** - * fscrypt_fname_encrypt() - encrypt a filename + * fname_encrypt() - encrypt a filename * * The output buffer must be at least as large as the input buffer. * Any extra space is filled with NUL padding before encryption. * * Return: 0 on success, -errno on failure */ -int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, - u8 *out, unsigned int olen) +int fname_encrypt(struct inode *inode, const struct qstr *iname, + u8 *out, unsigned int olen) { struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); - const struct fscrypt_info *ci = inode->i_crypt_info; + struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_key.tfm; union fscrypt_iv iv; struct scatterlist sg; @@ -163,14 +85,14 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, * * Return: 0 on success, -errno on failure */ -static int fname_decrypt(const struct inode *inode, - const struct fscrypt_str *iname, - struct fscrypt_str *oname) +static int fname_decrypt(struct inode *inode, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) { struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; - const struct fscrypt_info *ci = inode->i_crypt_info; + struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_key.tfm; union fscrypt_iv iv; int res; @@ -284,7 +206,9 @@ int fscrypt_fname_alloc_buffer(const struct inode *inode, u32 max_encrypted_len, struct fscrypt_str *crypto_str) { - const u32 max_encoded_len = BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX); + const u32 max_encoded_len = + max_t(u32, BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE), + 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name))); u32 max_presented_len; max_presented_len = max(max_encoded_len, max_encrypted_len); @@ -317,21 +241,19 @@ EXPORT_SYMBOL(fscrypt_fname_free_buffer); * * The caller must have allocated sufficient memory for the @oname string. * - * If the key is available, we'll decrypt the disk name. Otherwise, we'll - * encode it for presentation in fscrypt_nokey_name format. - * See struct fscrypt_nokey_name for details. + * If the key is available, we'll decrypt the disk name; otherwise, we'll encode + * it for presentation. Short names are directly base64-encoded, while long + * names are encoded in fscrypt_digested_name format. * * Return: 0 on success, -errno on failure */ -int fscrypt_fname_disk_to_usr(const struct inode *inode, - u32 hash, u32 minor_hash, - const struct fscrypt_str *iname, - struct fscrypt_str *oname) +int fscrypt_fname_disk_to_usr(struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) { const struct qstr qname = FSTR_TO_QSTR(iname); - struct fscrypt_nokey_name nokey_name; - u32 size; /* size of the unencoded no-key name */ - int err; + struct fscrypt_digested_name digested_name; if (fscrypt_is_dot_dotdot(&qname)) { oname->name[0] = '.'; @@ -346,37 +268,24 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode, if (fscrypt_has_encryption_key(inode)) return fname_decrypt(inode, iname, oname); - /* - * Sanity check that struct fscrypt_nokey_name doesn't have padding - * between fields and that its encoded size never exceeds NAME_MAX. - */ - BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, dirhash) != - offsetof(struct fscrypt_nokey_name, bytes)); - BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, bytes) != - offsetof(struct fscrypt_nokey_name, sha256)); - BUILD_BUG_ON(BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX) > NAME_MAX); - + if (iname->len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) { + oname->len = base64_encode(iname->name, iname->len, + oname->name); + return 0; + } if (hash) { - nokey_name.dirhash[0] = hash; - nokey_name.dirhash[1] = minor_hash; + digested_name.hash = hash; + digested_name.minor_hash = minor_hash; } else { - nokey_name.dirhash[0] = 0; - nokey_name.dirhash[1] = 0; + digested_name.hash = 0; + digested_name.minor_hash = 0; } - if (iname->len <= sizeof(nokey_name.bytes)) { - memcpy(nokey_name.bytes, iname->name, iname->len); - size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]); - } else { - memcpy(nokey_name.bytes, iname->name, sizeof(nokey_name.bytes)); - /* Compute strong hash of remaining part of name. */ - err = fscrypt_do_sha256(&iname->name[sizeof(nokey_name.bytes)], - iname->len - sizeof(nokey_name.bytes), - nokey_name.sha256); - if (err) - return err; - size = FSCRYPT_NOKEY_NAME_MAX; - } - oname->len = base64_encode((const u8 *)&nokey_name, size, oname->name); + memcpy(digested_name.digest, + FSCRYPT_FNAME_DIGEST(iname->name, iname->len), + FSCRYPT_FNAME_DIGEST_SIZE); + oname->name[0] = '_'; + oname->len = 1 + base64_encode((const u8 *)&digested_name, + sizeof(digested_name), oname->name + 1); return 0; } EXPORT_SYMBOL(fscrypt_fname_disk_to_usr); @@ -397,7 +306,8 @@ EXPORT_SYMBOL(fscrypt_fname_disk_to_usr); * get the disk_name. * * Else, for keyless @lookup operations, @iname is the presented ciphertext, so - * we decode it to get the fscrypt_nokey_name. Non-@lookup operations will be + * we decode it to get either the ciphertext disk_name (for short names) or the + * fscrypt_digested_name (for long names). Non-@lookup operations will be * impossible in this case, so we fail them with ENOKEY. * * If successful, fscrypt_free_filename() must be called later to clean up. @@ -407,8 +317,8 @@ EXPORT_SYMBOL(fscrypt_fname_disk_to_usr); int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, int lookup, struct fscrypt_name *fname) { - struct fscrypt_nokey_name *nokey_name; int ret; + int digested; memset(fname, 0, sizeof(struct fscrypt_name)); fname->usr_fname = iname; @@ -432,8 +342,8 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, if (!fname->crypto_buf.name) return -ENOMEM; - ret = fscrypt_fname_encrypt(dir, iname, fname->crypto_buf.name, - fname->crypto_buf.len); + ret = fname_encrypt(dir, iname, fname->crypto_buf.name, + fname->crypto_buf.len); if (ret) goto errout; fname->disk_name.name = fname->crypto_buf.name; @@ -448,31 +358,40 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, * We don't have the key and we are doing a lookup; decode the * user-supplied name */ + if (iname->name[0] == '_') { + if (iname->len != + 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name))) + return -ENOENT; + digested = 1; + } else { + if (iname->len > + BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)) + return -ENOENT; + digested = 0; + } - if (iname->len > BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX)) - return -ENOENT; - - fname->crypto_buf.name = kmalloc(FSCRYPT_NOKEY_NAME_MAX, GFP_KERNEL); + fname->crypto_buf.name = + kmalloc(max_t(size_t, FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE, + sizeof(struct fscrypt_digested_name)), + GFP_KERNEL); if (fname->crypto_buf.name == NULL) return -ENOMEM; - ret = base64_decode(iname->name, iname->len, fname->crypto_buf.name); - if (ret < (int)offsetof(struct fscrypt_nokey_name, bytes[1]) || - (ret > offsetof(struct fscrypt_nokey_name, sha256) && - ret != FSCRYPT_NOKEY_NAME_MAX)) { + ret = base64_decode(iname->name + digested, iname->len - digested, + fname->crypto_buf.name); + if (ret < 0) { ret = -ENOENT; goto errout; } fname->crypto_buf.len = ret; - - nokey_name = (void *)fname->crypto_buf.name; - fname->hash = nokey_name->dirhash[0]; - fname->minor_hash = nokey_name->dirhash[1]; - if (ret != FSCRYPT_NOKEY_NAME_MAX) { - /* The full ciphertext filename is available. */ - fname->disk_name.name = nokey_name->bytes; - fname->disk_name.len = - ret - offsetof(struct fscrypt_nokey_name, bytes); + if (digested) { + const struct fscrypt_digested_name *n = + (const void *)fname->crypto_buf.name; + fname->hash = n->hash; + fname->minor_hash = n->minor_hash; + } else { + fname->disk_name.name = fname->crypto_buf.name; + fname->disk_name.len = fname->crypto_buf.len; } return 0; @@ -481,106 +400,3 @@ errout: return ret; } EXPORT_SYMBOL(fscrypt_setup_filename); - -/** - * fscrypt_match_name() - test whether the given name matches a directory entry - * @fname: the name being searched for - * @de_name: the name from the directory entry - * @de_name_len: the length of @de_name in bytes - * - * Normally @fname->disk_name will be set, and in that case we simply compare - * that to the name stored in the directory entry. The only exception is that - * if we don't have the key for an encrypted directory and the name we're - * looking for is very long, then we won't have the full disk_name and instead - * we'll need to match against a fscrypt_nokey_name that includes a strong hash. - * - * Return: %true if the name matches, otherwise %false. - */ -bool fscrypt_match_name(const struct fscrypt_name *fname, - const u8 *de_name, u32 de_name_len) -{ - const struct fscrypt_nokey_name *nokey_name = - (const void *)fname->crypto_buf.name; - u8 sha256[SHA256_DIGEST_SIZE]; - - if (likely(fname->disk_name.name)) { - if (de_name_len != fname->disk_name.len) - return false; - return !memcmp(de_name, fname->disk_name.name, de_name_len); - } - if (de_name_len <= sizeof(nokey_name->bytes)) - return false; - if (memcmp(de_name, nokey_name->bytes, sizeof(nokey_name->bytes))) - return false; - if (fscrypt_do_sha256(&de_name[sizeof(nokey_name->bytes)], - de_name_len - sizeof(nokey_name->bytes), sha256)) - return false; - return !memcmp(sha256, nokey_name->sha256, sizeof(sha256)); -} -EXPORT_SYMBOL_GPL(fscrypt_match_name); - -/** - * fscrypt_fname_siphash() - calculate the SipHash of a filename - * @dir: the parent directory - * @name: the filename to calculate the SipHash of - * - * Given a plaintext filename @name and a directory @dir which uses SipHash as - * its dirhash method and has had its fscrypt key set up, this function - * calculates the SipHash of that name using the directory's secret dirhash key. - * - * Return: the SipHash of @name using the hash key of @dir - */ -u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name) -{ - const struct fscrypt_info *ci = dir->i_crypt_info; - - WARN_ON(!ci->ci_dirhash_key_initialized); - - return siphash(name->name, name->len, &ci->ci_dirhash_key); -} -EXPORT_SYMBOL_GPL(fscrypt_fname_siphash); - -/* - * Validate dentries in encrypted directories to make sure we aren't potentially - * caching stale dentries after a key has been added. - */ -int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) -{ - struct dentry *dir; - int err; - int valid; - - /* - * Plaintext names are always valid, since fscrypt doesn't support - * reverting to ciphertext names without evicting the directory's inode - * -- which implies eviction of the dentries in the directory. - */ - if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME)) - return 1; - - /* - * Ciphertext name; valid if the directory's key is still unavailable. - * - * Although fscrypt forbids rename() on ciphertext names, we still must - * use dget_parent() here rather than use ->d_parent directly. That's - * because a corrupted fs image may contain directory hard links, which - * the VFS handles by moving the directory's dentry tree in the dcache - * each time ->lookup() finds the directory and it already has a dentry - * elsewhere. Thus ->d_parent can be changing, and we must safely grab - * a reference to some ->d_parent to prevent it from being freed. - */ - - if (flags & LOOKUP_RCU) - return -ECHILD; - - dir = dget_parent(dentry); - err = fscrypt_get_encryption_info(d_inode(dir)); - valid = !fscrypt_has_encryption_key(d_inode(dir)); - dput(dir); - - if (err < 0) - return err; - - return valid; -} -EXPORT_SYMBOL(fscrypt_d_revalidate); diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index ae03c7fc7e52..739d8a9d24f5 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -12,7 +12,6 @@ #define _FSCRYPT_PRIVATE_H #include -#include #include #include @@ -78,26 +77,6 @@ static inline int fscrypt_context_size(const union fscrypt_context *ctx) return 0; } -/* Check whether an fscrypt_context has a recognized version number and size */ -static inline bool fscrypt_context_is_valid(const union fscrypt_context *ctx, - int ctx_size) -{ - return ctx_size >= 1 && ctx_size == fscrypt_context_size(ctx); -} - -/* Retrieve the context's nonce, assuming the context was already validated */ -static inline const u8 *fscrypt_context_nonce(const union fscrypt_context *ctx) -{ - switch (ctx->version) { - case FSCRYPT_CONTEXT_V1: - return ctx->v1.nonce; - case FSCRYPT_CONTEXT_V2: - return ctx->v2.nonce; - } - WARN_ON(1); - return NULL; -} - #undef fscrypt_policy union fscrypt_policy { u8 version; @@ -159,6 +138,12 @@ fscrypt_policy_flags(const union fscrypt_policy *policy) BUG(); } +static inline bool +fscrypt_is_direct_key_policy(const union fscrypt_policy *policy) +{ + return fscrypt_policy_flags(policy) & FSCRYPT_POLICY_FLAG_DIRECT_KEY; +} + /** * For encrypted symlinks, the ciphertext length is stored at the beginning * of the string in little-endian format. @@ -232,14 +217,6 @@ struct fscrypt_info { */ struct fscrypt_direct_key *ci_direct_key; - /* - * This inode's hash key for filenames. This is a 128-bit SipHash-2-4 - * key. This is only set for directories that use a keyed dirhash over - * the plaintext filenames -- currently just casefolded directories. - */ - siphash_key_t ci_dirhash_key; - bool ci_dirhash_key_initialized; - /* The encryption policy used by this inode */ union fscrypt_policy ci_policy; @@ -253,6 +230,24 @@ typedef enum { FS_ENCRYPT, } fscrypt_direction_t; +static inline bool fscrypt_valid_enc_modes(u32 contents_mode, + u32 filenames_mode) +{ + if (contents_mode == FSCRYPT_MODE_AES_128_CBC && + filenames_mode == FSCRYPT_MODE_AES_128_CTS) + return true; + + if (contents_mode == FSCRYPT_MODE_AES_256_XTS && + filenames_mode == FSCRYPT_MODE_AES_256_CTS) + return true; + + if (contents_mode == FSCRYPT_MODE_ADIANTUM && + filenames_mode == FSCRYPT_MODE_ADIANTUM) + return true; + + return false; +} + /* crypto.c */ extern struct kmem_cache *fscrypt_info_cachep; extern int fscrypt_initialize(unsigned int cop_flags); @@ -262,6 +257,7 @@ extern int fscrypt_crypt_block(const struct inode *inode, unsigned int len, unsigned int offs, gfp_t gfp_flags); extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags); +extern const struct dentry_operations fscrypt_d_ops; extern void __printf(3, 4) __cold fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...); @@ -289,9 +285,8 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, const struct fscrypt_info *ci); /* fname.c */ -extern int fscrypt_fname_encrypt(const struct inode *inode, - const struct qstr *iname, - u8 *out, unsigned int olen); +extern int fname_encrypt(struct inode *inode, const struct qstr *iname, + u8 *out, unsigned int olen); extern bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, u32 max_len, u32 *encrypted_len_ret); @@ -313,12 +308,11 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, * output doesn't reveal another. */ #define HKDF_CONTEXT_KEY_IDENTIFIER 1 -#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2 +#define HKDF_CONTEXT_PER_FILE_KEY 2 #define HKDF_CONTEXT_DIRECT_KEY 3 #define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 -#define HKDF_CONTEXT_DIRHASH_KEY 5 -extern int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, +extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, u8 *okm, unsigned int okmlen); @@ -326,8 +320,7 @@ extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); /* inline_crypt.c */ #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT -extern int fscrypt_select_encryption_impl(struct fscrypt_info *ci, - bool is_hw_wrapped_key); +extern void fscrypt_select_encryption_impl(struct fscrypt_info *ci); static inline bool fscrypt_using_inline_encryption(const struct fscrypt_info *ci) @@ -339,7 +332,6 @@ extern int fscrypt_prepare_inline_crypt_key( struct fscrypt_prepared_key *prep_key, const u8 *raw_key, unsigned int raw_key_size, - bool is_hw_wrapped, const struct fscrypt_info *ci); extern void fscrypt_destroy_inline_crypt_key( @@ -371,10 +363,8 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ -static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci, - bool is_hw_wrapped_key) +static inline void fscrypt_select_encryption_impl(struct fscrypt_info *ci) { - return 0; } static inline bool fscrypt_using_inline_encryption( @@ -386,7 +376,6 @@ static inline bool fscrypt_using_inline_encryption( static inline int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, const u8 *raw_key, unsigned int raw_key_size, - bool is_hw_wrapped, const struct fscrypt_info *ci) { WARN_ON(1); @@ -579,18 +568,20 @@ struct fscrypt_mode { extern struct fscrypt_mode fscrypt_modes[]; +static inline bool +fscrypt_mode_supports_direct_key(const struct fscrypt_mode *mode) +{ + return mode->ivsize >= offsetofend(union fscrypt_iv, nonce); +} + extern int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, const u8 *raw_key, unsigned int raw_key_size, - bool is_hw_wrapped, const struct fscrypt_info *ci); extern void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key); -extern int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, - const u8 *raw_key); - -extern int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, - const struct fscrypt_master_key *mk); +extern int fscrypt_set_derived_key(struct fscrypt_info *ci, + const u8 *derived_key); /* keysetup_v1.c */ diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c index fd7f67628561..2c026009c6e7 100644 --- a/fs/crypto/hkdf.c +++ b/fs/crypto/hkdf.c @@ -113,7 +113,7 @@ out: * adds to its application-specific info strings to guarantee that it doesn't * accidentally repeat an info string when using HKDF for different purposes.) */ -int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, +int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, u8 *okm, unsigned int okmlen) { diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index a6396bf721ac..30b1ca661249 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -4,8 +4,6 @@ * Encryption hooks for higher-level filesystem operations. */ -#include - #include "fscrypt_private.h" /** @@ -117,53 +115,12 @@ int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, spin_lock(&dentry->d_lock); dentry->d_flags |= DCACHE_ENCRYPTED_NAME; spin_unlock(&dentry->d_lock); + d_set_d_op(dentry, &fscrypt_d_ops); } return err; } EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup); -/** - * fscrypt_prepare_setflags() - prepare to change flags with FS_IOC_SETFLAGS - * @inode: the inode on which flags are being changed - * @oldflags: the old flags - * @flags: the new flags - * - * The caller should be holding i_rwsem for write. - * - * Return: 0 on success; -errno if the flags change isn't allowed or if - * another error occurs. - */ -int fscrypt_prepare_setflags(struct inode *inode, - unsigned int oldflags, unsigned int flags) -{ - struct fscrypt_info *ci; - struct fscrypt_master_key *mk; - int err; - - /* - * When the CASEFOLD flag is set on an encrypted directory, we must - * derive the secret key needed for the dirhash. This is only possible - * if the directory uses a v2 encryption policy. - */ - if (IS_ENCRYPTED(inode) && (flags & ~oldflags & FS_CASEFOLD_FL)) { - err = fscrypt_require_key(inode); - if (err) - return err; - ci = inode->i_crypt_info; - if (ci->ci_policy.version != FSCRYPT_POLICY_V2) - return -EINVAL; - mk = ci->ci_master_key->payload.data[0]; - down_read(&mk->mk_secret_sem); - if (is_master_key_secret_present(&mk->mk_secret)) - err = fscrypt_derive_dirhash_key(ci, mk); - else - err = -ENOKEY; - up_read(&mk->mk_secret_sem); - return err; - } - return 0; -} - int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, unsigned int max_len, struct fscrypt_str *disk_link) @@ -230,8 +187,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, ciphertext_len = disk_link->len - sizeof(*sd); sd->len = cpu_to_le16(ciphertext_len); - err = fscrypt_fname_encrypt(inode, &iname, sd->encrypted_path, - ciphertext_len); + err = fname_encrypt(inode, &iname, sd->encrypted_path, ciphertext_len); if (err) goto err_free_sd; diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index e1bbaeff1c43..92c471d3db73 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -26,94 +26,44 @@ struct fscrypt_blk_crypto_key { struct request_queue *devs[]; }; -static int fscrypt_get_num_devices(struct super_block *sb) -{ - if (sb->s_cop->get_num_devices) - return sb->s_cop->get_num_devices(sb); - return 1; -} - -static void fscrypt_get_devices(struct super_block *sb, int num_devs, - struct request_queue **devs) -{ - if (num_devs == 1) - devs[0] = bdev_get_queue(sb->s_bdev); - else - sb->s_cop->get_devices(sb, devs); -} - /* Enable inline encryption for this file if supported. */ -int fscrypt_select_encryption_impl(struct fscrypt_info *ci, - bool is_hw_wrapped_key) +void fscrypt_select_encryption_impl(struct fscrypt_info *ci) { const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; - enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; - struct request_queue **devs; - int num_devs; - int i; /* The file must need contents encryption, not filenames encryption */ if (!S_ISREG(inode->i_mode)) - return 0; + return; /* blk-crypto must implement the needed encryption algorithm */ - if (crypto_mode == BLK_ENCRYPTION_MODE_INVALID) - return 0; + if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) + return; /* The filesystem must be mounted with -o inlinecrypt */ if (!sb->s_cop->inline_crypt_enabled || !sb->s_cop->inline_crypt_enabled(sb)) - return 0; - - /* - * The needed encryption settings must be supported either by - * blk-crypto-fallback, or by hardware on all the filesystem's devices. - */ - - if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) && - !is_hw_wrapped_key) { - ci->ci_inlinecrypt = true; - return 0; - } - - num_devs = fscrypt_get_num_devices(sb); - devs = kmalloc_array(num_devs, sizeof(*devs), GFP_NOFS); - if (!devs) - return -ENOMEM; - - fscrypt_get_devices(sb, num_devs, devs); - - for (i = 0; i < num_devs; i++) { - if (!keyslot_manager_crypto_mode_supported(devs[i]->ksm, - crypto_mode, - sb->s_blocksize, - is_hw_wrapped_key)) - goto out_free_devs; - } + return; ci->ci_inlinecrypt = true; -out_free_devs: - kfree(devs); - return 0; } int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, const u8 *raw_key, unsigned int raw_key_size, - bool is_hw_wrapped, const struct fscrypt_info *ci) { const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; - int num_devs; + int num_devs = 1; int queue_refs = 0; struct fscrypt_blk_crypto_key *blk_key; int err; int i; - num_devs = fscrypt_get_num_devices(sb); + if (sb->s_cop->get_num_devices) + num_devs = sb->s_cop->get_num_devices(sb); if (WARN_ON(num_devs < 1)) return -EINVAL; @@ -122,13 +72,16 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, return -ENOMEM; blk_key->num_devs = num_devs; - fscrypt_get_devices(sb, num_devs, blk_key->devs); + if (num_devs == 1) + blk_key->devs[0] = bdev_get_queue(sb->s_bdev); + else + sb->s_cop->get_devices(sb, blk_key->devs); BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE); err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size, - is_hw_wrapped, crypto_mode, sb->s_blocksize); + crypto_mode, sb->s_blocksize); if (err) { fscrypt_err(inode, "error %d initializing blk-crypto key", err); goto fail; @@ -150,7 +103,6 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, queue_refs++; err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize, - is_hw_wrapped, blk_key->devs[i]); if (err) { fscrypt_err(inode, diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 0081fd48e96f..40ea4bc1059d 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -569,7 +569,6 @@ out_put: key_ref_put(ref); return err; } - /* Size of software "secret" derived from hardware-wrapped key */ #define RAW_SECRET_SIZE 32 @@ -617,7 +616,11 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; + BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE < + FSCRYPT_MAX_KEY_SIZE); + memset(&secret, 0, sizeof(secret)); + if (arg.key_id) { if (arg.raw_size != 0) return -EINVAL; @@ -626,7 +629,7 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) goto out_wipe_secret; err = -EINVAL; if (!(arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) && - secret.size > FSCRYPT_MAX_KEY_SIZE) + secret.size > FSCRYPT_MAX_KEY_SIZE) goto out_wipe_secret; } else { if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE || @@ -809,6 +812,9 @@ static int check_for_busy_inodes(struct super_block *sb, struct list_head *pos; size_t busy_count = 0; unsigned long ino; + struct dentry *dentry; + char _path[256]; + char *path = NULL; spin_lock(&mk->mk_decrypted_inodes_lock); @@ -827,14 +833,22 @@ static int check_for_busy_inodes(struct super_block *sb, struct fscrypt_info, ci_master_key_link)->ci_inode; ino = inode->i_ino; + dentry = d_find_alias(inode); } spin_unlock(&mk->mk_decrypted_inodes_lock); + if (dentry) { + path = dentry_path(dentry, _path, sizeof(_path)); + dput(dentry); + } + if (IS_ERR_OR_NULL(path)) + path = "(unknown)"; + fscrypt_warn(NULL, - "%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu", + "%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu (%s)", sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec), master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u, - ino); + ino, path); return -EBUSY; } diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index c6ce78afbf8f..b51fc41395e0 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -97,11 +97,8 @@ fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, * first time a mode is used. */ pr_info("fscrypt: %s using implementation \"%s\"\n", - mode->friendly_name, crypto_skcipher_driver_name(tfm)); - } - if (WARN_ON(crypto_skcipher_ivsize(tfm) != mode->ivsize)) { - err = -EINVAL; - goto err_free_tfm; + mode->friendly_name, + crypto_skcipher_alg(tfm)->base.cra_driver_name); } crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize); @@ -122,15 +119,15 @@ err_free_tfm: */ int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, const u8 *raw_key, unsigned int raw_key_size, - bool is_hw_wrapped, const struct fscrypt_info *ci) + const struct fscrypt_info *ci) { struct crypto_skcipher *tfm; if (fscrypt_using_inline_encryption(ci)) return fscrypt_prepare_inline_crypt_key(prep_key, - raw_key, raw_key_size, is_hw_wrapped, ci); + raw_key, raw_key_size, ci); - if (WARN_ON(is_hw_wrapped || raw_key_size != ci->ci_mode->keysize)) + if (WARN_ON(raw_key_size != ci->ci_mode->keysize)) return -EINVAL; tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode); @@ -151,18 +148,18 @@ void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key) fscrypt_destroy_inline_crypt_key(prep_key); } -/* Given a per-file encryption key, set up the file's crypto transform object */ -int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key) +/* Given the per-file key, set up the file's crypto transform object */ +int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key) { ci->ci_owns_key = true; - return fscrypt_prepare_key(&ci->ci_key, raw_key, ci->ci_mode->keysize, - false /*is_hw_wrapped*/, ci); + return fscrypt_prepare_key(&ci->ci_key, derived_key, + ci->ci_mode->keysize, ci); } -static int setup_per_mode_enc_key(struct fscrypt_info *ci, - struct fscrypt_master_key *mk, - struct fscrypt_prepared_key *keys, - u8 hkdf_context, bool include_fs_uuid) +static int setup_per_mode_key(struct fscrypt_info *ci, + struct fscrypt_master_key *mk, + struct fscrypt_prepared_key *keys, + u8 hkdf_context, bool include_fs_uuid) { static DEFINE_MUTEX(mode_key_setup_mutex); const struct inode *inode = ci->ci_inode; @@ -207,7 +204,7 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, } } err = fscrypt_prepare_key(prep_key, mk->mk_secret.raw, - mk->mk_secret.size, true, ci); + mk->mk_secret.size, ci); if (err) goto out_unlock; } else { @@ -226,7 +223,7 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, if (err) goto out_unlock; err = fscrypt_prepare_key(prep_key, mode_key, mode->keysize, - false /*is_hw_wrapped*/, ci); + ci); memzero_explicit(mode_key, mode->keysize); if (err) goto out_unlock; @@ -239,24 +236,10 @@ out_unlock: return err; } -int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, - const struct fscrypt_master_key *mk) -{ - int err; - - err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_DIRHASH_KEY, - ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE, - (u8 *)&ci->ci_dirhash_key, - sizeof(ci->ci_dirhash_key)); - if (err) - return err; - ci->ci_dirhash_key_initialized = true; - return 0; -} - static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, struct fscrypt_master_key *mk) { + u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; int err; if (mk->mk_secret.is_hw_wrapped && @@ -268,15 +251,21 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { /* - * DIRECT_KEY: instead of deriving per-file encryption keys, the - * per-file nonce will be included in all the IVs. But unlike - * v1 policies, for v2 policies in this case we don't encrypt - * with the master key directly but rather derive a per-mode - * encryption key. This ensures that the master key is - * consistently used only for HKDF, avoiding key reuse issues. + * DIRECT_KEY: instead of deriving per-file keys, the per-file + * nonce will be included in all the IVs. But unlike v1 + * policies, for v2 policies in this case we don't encrypt with + * the master key directly but rather derive a per-mode key. + * This ensures that the master key is consistently used only + * for HKDF, avoiding key reuse issues. */ - err = setup_per_mode_enc_key(ci, mk, mk->mk_direct_keys, - HKDF_CONTEXT_DIRECT_KEY, false); + if (!fscrypt_mode_supports_direct_key(ci->ci_mode)) { + fscrypt_warn(ci->ci_inode, + "Direct key flag not allowed with %s", + ci->ci_mode->friendly_name); + return -EINVAL; + } + return setup_per_mode_key(ci, mk, mk->mk_direct_keys, + HKDF_CONTEXT_DIRECT_KEY, false); } else if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { /* @@ -285,34 +274,21 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, * the IVs. This format is optimized for use with inline * encryption hardware compliant with the UFS or eMMC standards. */ - err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_64_keys, - HKDF_CONTEXT_IV_INO_LBLK_64_KEY, - true); - } else { - u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; - - err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, - HKDF_CONTEXT_PER_FILE_ENC_KEY, - ci->ci_nonce, - FS_KEY_DERIVATION_NONCE_SIZE, - derived_key, ci->ci_mode->keysize); - if (err) - return err; - - err = fscrypt_set_per_file_enc_key(ci, derived_key); - memzero_explicit(derived_key, ci->ci_mode->keysize); + return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_keys, + HKDF_CONTEXT_IV_INO_LBLK_64_KEY, + true); } + + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, + HKDF_CONTEXT_PER_FILE_KEY, + ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE, + derived_key, ci->ci_mode->keysize); if (err) return err; - /* Derive a secret dirhash key for directories that need it. */ - if (S_ISDIR(ci->ci_inode->i_mode) && IS_CASEFOLDED(ci->ci_inode)) { - err = fscrypt_derive_dirhash_key(ci, mk); - if (err) - return err; - } - - return 0; + err = fscrypt_set_derived_key(ci, derived_key); + memzero_explicit(derived_key, ci->ci_mode->keysize); + return err; } /* @@ -333,6 +309,8 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, struct fscrypt_key_specifier mk_spec; int err; + fscrypt_select_encryption_impl(ci); + switch (ci->ci_policy.version) { case FSCRYPT_POLICY_V1: mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; @@ -357,10 +335,6 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, ci->ci_policy.version != FSCRYPT_POLICY_V1) return PTR_ERR(key); - err = fscrypt_select_encryption_impl(ci, false); - if (err) - return err; - /* * As a legacy fallback for v1 policies, search for the key in * the current task's subscribed keyrings too. Don't move this @@ -395,10 +369,6 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, goto out_release_key; } - err = fscrypt_select_encryption_impl(ci, mk->mk_secret.is_hw_wrapped); - if (err) - goto out_release_key; - switch (ci->ci_policy.version) { case FSCRYPT_POLICY_V1: err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw); @@ -505,8 +475,20 @@ int fscrypt_get_encryption_info(struct inode *inode) goto out; } - memcpy(crypt_info->ci_nonce, fscrypt_context_nonce(&ctx), - FS_KEY_DERIVATION_NONCE_SIZE); + switch (ctx.version) { + case FSCRYPT_CONTEXT_V1: + memcpy(crypt_info->ci_nonce, ctx.v1.nonce, + FS_KEY_DERIVATION_NONCE_SIZE); + break; + case FSCRYPT_CONTEXT_V2: + memcpy(crypt_info->ci_nonce, ctx.v2.nonce, + FS_KEY_DERIVATION_NONCE_SIZE); + break; + default: + WARN_ON(1); + res = -EINVAL; + goto out; + } if (!fscrypt_supported_policy(&crypt_info->ci_policy, inode)) { res = -EINVAL; @@ -606,15 +588,6 @@ int fscrypt_drop_inode(struct inode *inode) return 0; mk = ci->ci_master_key->payload.data[0]; - /* - * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes - * protected by the key were cleaned by sync_filesystem(). But if - * userspace is still using the files, inodes can be dirtied between - * then and now. We mustn't lose any writes, so skip dirty inodes here. - */ - if (inode->i_state & I_DIRTY_ALL) - return 0; - /* * Note: since we aren't holding ->mk_secret_sem, the result here can * immediately become outdated. But there's no correctness problem with diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 3f7bb48f7317..47591c54dc3d 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -9,7 +9,7 @@ * This file implements compatibility functions for the original encryption * policy version ("v1"), including: * - * - Deriving per-file encryption keys using the AES-128-ECB based KDF + * - Deriving per-file keys using the AES-128-ECB based KDF * (rather than the new method of using HKDF-SHA512) * * - Retrieving fscrypt master keys from process-subscribed keyrings @@ -234,7 +234,7 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key) refcount_set(&dk->dk_refcount, 1); dk->dk_mode = ci->ci_mode; err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci->ci_mode->keysize, - false /*is_hw_wrapped*/, ci); + ci); if (err) goto err_free_dk; memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor, @@ -252,8 +252,23 @@ err_free_dk: static int setup_v1_file_key_direct(struct fscrypt_info *ci, const u8 *raw_master_key) { + const struct fscrypt_mode *mode = ci->ci_mode; struct fscrypt_direct_key *dk; + if (!fscrypt_mode_supports_direct_key(mode)) { + fscrypt_warn(ci->ci_inode, + "Direct key mode not allowed with %s", + mode->friendly_name); + return -EINVAL; + } + + if (ci->ci_policy.v1.contents_encryption_mode != + ci->ci_policy.v1.filenames_encryption_mode) { + fscrypt_warn(ci->ci_inode, + "Direct key mode not allowed with different contents and filenames modes"); + return -EINVAL; + } + dk = fscrypt_get_direct_key(ci, raw_master_key); if (IS_ERR(dk)) return PTR_ERR(dk); @@ -282,7 +297,7 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci, if (err) goto out; - err = fscrypt_set_per_file_enc_key(ci, derived_key); + err = fscrypt_set_derived_key(ci, derived_key); out: kzfree(derived_key); return err; diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 10ccf945020c..96f528071bed 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -29,43 +29,6 @@ bool fscrypt_policies_equal(const union fscrypt_policy *policy1, return !memcmp(policy1, policy2, fscrypt_policy_size(policy1)); } -static bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode) -{ - if (contents_mode == FSCRYPT_MODE_AES_256_XTS && - filenames_mode == FSCRYPT_MODE_AES_256_CTS) - return true; - - if (contents_mode == FSCRYPT_MODE_AES_128_CBC && - filenames_mode == FSCRYPT_MODE_AES_128_CTS) - return true; - - if (contents_mode == FSCRYPT_MODE_ADIANTUM && - filenames_mode == FSCRYPT_MODE_ADIANTUM) - return true; - - return false; -} - -static bool supported_direct_key_modes(const struct inode *inode, - u32 contents_mode, u32 filenames_mode) -{ - const struct fscrypt_mode *mode; - - if (contents_mode != filenames_mode) { - fscrypt_warn(inode, - "Direct key flag not allowed with different contents and filenames modes"); - return false; - } - mode = &fscrypt_modes[contents_mode]; - - if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) { - fscrypt_warn(inode, "Direct key flag not allowed with %s", - mode->friendly_name); - return false; - } - return true; -} - static bool supported_iv_ino_lblk_64_policy( const struct fscrypt_policy_v2 *policy, const struct inode *inode) @@ -100,82 +63,13 @@ static bool supported_iv_ino_lblk_64_policy( return true; } -static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy, - const struct inode *inode) -{ - if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, - policy->filenames_encryption_mode)) { - fscrypt_warn(inode, - "Unsupported encryption modes (contents %d, filenames %d)", - policy->contents_encryption_mode, - policy->filenames_encryption_mode); - return false; - } - - if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | - FSCRYPT_POLICY_FLAG_DIRECT_KEY)) { - fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", - policy->flags); - return false; - } - - if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && - !supported_direct_key_modes(inode, policy->contents_encryption_mode, - policy->filenames_encryption_mode)) - return false; - - if (IS_CASEFOLDED(inode)) { - /* With v1, there's no way to derive dirhash keys. */ - fscrypt_warn(inode, - "v1 policies can't be used on casefolded directories"); - return false; - } - - return true; -} - -static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, - const struct inode *inode) -{ - if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, - policy->filenames_encryption_mode)) { - fscrypt_warn(inode, - "Unsupported encryption modes (contents %d, filenames %d)", - policy->contents_encryption_mode, - policy->filenames_encryption_mode); - return false; - } - - if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) { - fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", - policy->flags); - return false; - } - - if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && - !supported_direct_key_modes(inode, policy->contents_encryption_mode, - policy->filenames_encryption_mode)) - return false; - - if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) && - !supported_iv_ino_lblk_64_policy(policy, inode)) - return false; - - if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) { - fscrypt_warn(inode, "Reserved bits set in encryption policy"); - return false; - } - - return true; -} - /** * fscrypt_supported_policy - check whether an encryption policy is supported * * Given an encryption policy, check whether all its encryption modes and other - * settings are supported by this kernel on the given inode. (But we don't - * currently don't check for crypto API support here, so attempting to use an - * algorithm not configured into the crypto API will still fail later.) + * settings are supported by this kernel. (But we don't currently don't check + * for crypto API support here, so attempting to use an algorithm not configured + * into the crypto API will still fail later.) * * Return: %true if supported, else %false */ @@ -183,10 +77,60 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, const struct inode *inode) { switch (policy_u->version) { - case FSCRYPT_POLICY_V1: - return fscrypt_supported_v1_policy(&policy_u->v1, inode); - case FSCRYPT_POLICY_V2: - return fscrypt_supported_v2_policy(&policy_u->v2, inode); + case FSCRYPT_POLICY_V1: { + const struct fscrypt_policy_v1 *policy = &policy_u->v1; + + if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, + policy->filenames_encryption_mode)) { + fscrypt_warn(inode, + "Unsupported encryption modes (contents %d, filenames %d)", + policy->contents_encryption_mode, + policy->filenames_encryption_mode); + return false; + } + + if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | + FSCRYPT_POLICY_FLAG_DIRECT_KEY)) { + fscrypt_warn(inode, + "Unsupported encryption flags (0x%02x)", + policy->flags); + return false; + } + + return true; + } + case FSCRYPT_POLICY_V2: { + const struct fscrypt_policy_v2 *policy = &policy_u->v2; + + if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, + policy->filenames_encryption_mode)) { + fscrypt_warn(inode, + "Unsupported encryption modes (contents %d, filenames %d)", + policy->contents_encryption_mode, + policy->filenames_encryption_mode); + return false; + } + + if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) { + fscrypt_warn(inode, + "Unsupported encryption flags (0x%02x)", + policy->flags); + return false; + } + + if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) && + !supported_iv_ino_lblk_64_policy(policy, inode)) + return false; + + if (memchr_inv(policy->__reserved, 0, + sizeof(policy->__reserved))) { + fscrypt_warn(inode, + "Reserved bits set in encryption policy"); + return false; + } + + return true; + } } return false; } @@ -258,7 +202,7 @@ int fscrypt_policy_from_context(union fscrypt_policy *policy_u, { memset(policy_u, 0, sizeof(*policy_u)); - if (!fscrypt_context_is_valid(ctx_u, ctx_size)) + if (ctx_size <= 0 || ctx_size != fscrypt_context_size(ctx_u)) return -EINVAL; switch (ctx_u->version) { @@ -481,25 +425,6 @@ int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *uarg) } EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_policy_ex); -/* FS_IOC_GET_ENCRYPTION_NONCE: retrieve file's encryption nonce for testing */ -int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg) -{ - struct inode *inode = file_inode(filp); - union fscrypt_context ctx; - int ret; - - ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); - if (ret < 0) - return ret; - if (!fscrypt_context_is_valid(&ctx, ret)) - return -EINVAL; - if (copy_to_user(arg, fscrypt_context_nonce(&ctx), - FS_KEY_DERIVATION_NONCE_SIZE)) - return -EFAULT; - return 0; -} -EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_nonce); - /** * fscrypt_has_permitted_context() - is a file's encryption policy permitted * within its directory? diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 332ebb130fee..ac2a73c00bfa 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -37,7 +37,6 @@ config EXT4_FS select CRC16 select CRYPTO select CRYPTO_CRC32C - select FS_ENCRYPTION_ALGS if FS_ENCRYPTION help This is the next generation of the ext3 filesystem. diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index ee766e3bed8b..e4d13c6ac931 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -117,7 +117,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) if (IS_ENCRYPTED(inode)) { err = fscrypt_get_encryption_info(inode); - if (err) + if (err && err != -ENOKEY) return err; } @@ -664,3 +664,10 @@ const struct file_operations ext4_dir_operations = { .open = ext4_dir_open, .release = ext4_release_dir, }; + +#ifdef CONFIG_UNICODE +const struct dentry_operations ext4_dentry_ops = { + .d_hash = generic_ci_d_hash, + .d_compare = generic_ci_d_compare, +}; +#endif diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 3e5ca2107998..e7c7a6737a46 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -1100,11 +1100,6 @@ resizefs_out: return -EOPNOTSUPP; return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); - case FS_IOC_GET_ENCRYPTION_NONCE: - if (!ext4_has_feature_encrypt(sb)) - return -EOPNOTSUPP; - return fscrypt_ioctl_get_nonce(filp, (void __user *)arg); - case EXT4_IOC_FSGETXATTR: { struct fsxattr fa; @@ -1248,7 +1243,6 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FS_IOC_REMOVE_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: case FS_IOC_GET_ENCRYPTION_KEY_STATUS: - case FS_IOC_GET_ENCRYPTION_NONCE: case EXT4_IOC_SHUTDOWN: case FS_IOC_GETFSMAP: case FS_IOC_ENABLE_VERITY: diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index b134add5a5ad..0e6a7cb9e9cf 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1608,7 +1608,6 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir, struct buffer_head *bh; err = ext4_fname_prepare_lookup(dir, dentry, &fname); - generic_set_encrypted_ci_d_ops(dir, dentry); if (err == -ENOENT) return NULL; if (err) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 8731f6935136..8f6ee92e51db 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4492,6 +4492,11 @@ no_journal: goto failed_mount4; } +#ifdef CONFIG_UNICODE + if (sb->s_encoding) + sb->s_d_op = &ext4_dentry_ops; +#endif + sb->s_root = d_make_root(root); if (!sb->s_root) { ext4_msg(sb, KERN_ERR, "get root dentry failed"); diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig index 1940a6574b66..675af7cd29d3 100644 --- a/fs/f2fs/Kconfig +++ b/fs/f2fs/Kconfig @@ -5,7 +5,6 @@ config F2FS_FS select CRYPTO select CRYPTO_CRC32 select F2FS_FS_XATTR if FS_ENCRYPTION - select FS_ENCRYPTION_ALGS if FS_ENCRYPTION help F2FS is based on Log-structured File System (LFS), which supports versatile "flash-friendly" features. The design has been focused on diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 7fd0b08d7518..0898fff69259 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -108,52 +108,34 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, * Test whether a case-insensitive directory entry matches the filename * being searched for. * - * Only called for encrypted names if the key is available. - * * Returns: 0 if the directory entry matches, more than 0 if it * doesn't match or less than zero on error. */ -static int f2fs_ci_compare(const struct inode *parent, const struct qstr *name, - u8 *de_name, size_t de_name_len, bool quick) +int f2fs_ci_compare(const struct inode *parent, const struct qstr *name, + const struct qstr *entry, bool quick) { const struct super_block *sb = parent->i_sb; const struct unicode_map *um = sb->s_encoding; - struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len); - struct qstr entry = QSTR_INIT(de_name, de_name_len); int ret; - if (IS_ENCRYPTED(parent)) { - const struct fscrypt_str encrypted_name = - FSTR_INIT(de_name, de_name_len); - - decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL); - if (!decrypted_name.name) - return -ENOMEM; - ret = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name, - &decrypted_name); - if (ret < 0) - goto out; - entry.name = decrypted_name.name; - entry.len = decrypted_name.len; - } - if (quick) - ret = utf8_strncasecmp_folded(um, name, &entry); + ret = utf8_strncasecmp_folded(um, name, entry); else - ret = utf8_strncasecmp(um, name, &entry); + ret = utf8_strncasecmp(um, name, entry); + if (ret < 0) { /* Handle invalid character sequence as either an error * or as an opaque byte sequence. */ if (sb_has_enc_strict_mode(sb)) - ret = -EINVAL; - else if (name->len != entry.len) - ret = 1; - else - ret = !!memcmp(name->name, entry.name, entry.len); + return -EINVAL; + + if (name->len != entry->len) + return 1; + + return !!memcmp(name->name, entry->name, name->len); } -out: - kfree(decrypted_name.name); + return ret; } @@ -191,24 +173,24 @@ static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d, { #ifdef CONFIG_UNICODE struct inode *parent = d->inode; - u8 *name; - int len; + struct super_block *sb = parent->i_sb; + struct qstr entry; #endif if (de->hash_code != namehash) return false; #ifdef CONFIG_UNICODE - name = d->filename[bit_pos]; - len = le16_to_cpu(de->name_len); + entry.name = d->filename[bit_pos]; + entry.len = de->name_len; - if (needs_casefold(parent)) { + if (sb->s_encoding && IS_CASEFOLDED(parent)) { if (cf_str->name) { struct qstr cf = {.name = cf_str->name, .len = cf_str->len}; - return !f2fs_ci_compare(parent, &cf, name, len, true); + return !f2fs_ci_compare(parent, &cf, &entry, true); } - return !f2fs_ci_compare(parent, fname->usr_fname, name, len, + return !f2fs_ci_compare(parent, fname->usr_fname, &entry, false); } #endif @@ -632,13 +614,13 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, const struct qstr *orig_name, - f2fs_hash_t dentry_hash, struct inode *inode, nid_t ino, umode_t mode) { unsigned int bit_pos; unsigned int level; unsigned int current_depth; unsigned long bidx, block; + f2fs_hash_t dentry_hash; unsigned int nbucket, nblock; struct page *dentry_page = NULL; struct f2fs_dentry_block *dentry_blk = NULL; @@ -648,6 +630,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, level = 0; slots = GET_DENTRY_SLOTS(new_name->len); + dentry_hash = f2fs_dentry_hash(dir, new_name, NULL); current_depth = F2FS_I(dir)->i_current_depth; if (F2FS_I(dir)->chash == dentry_hash) { @@ -733,19 +716,17 @@ int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname, struct inode *inode, nid_t ino, umode_t mode) { struct qstr new_name; - f2fs_hash_t dentry_hash; int err = -EAGAIN; new_name.name = fname_name(fname); new_name.len = fname_len(fname); if (f2fs_has_inline_dentry(dir)) - err = f2fs_add_inline_entry(dir, &new_name, fname, + err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname, inode, ino, mode); - dentry_hash = f2fs_dentry_hash(dir, &new_name, fname); if (err == -EAGAIN) err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname, - dentry_hash, inode, ino, mode); + inode, ino, mode); f2fs_update_time(F2FS_I_SB(dir), REQ_TIME); return err; @@ -1018,7 +999,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) if (IS_ENCRYPTED(inode)) { err = fscrypt_get_encryption_info(inode); - if (err) + if (err && err != -ENOKEY) goto out; err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr); @@ -1094,3 +1075,10 @@ const struct file_operations f2fs_dir_operations = { .compat_ioctl = f2fs_compat_ioctl, #endif }; + +#ifdef CONFIG_UNICODE +const struct dentry_operations f2fs_dentry_ops = { + .d_hash = generic_ci_d_hash, + .d_compare = generic_ci_d_compare, +}; +#endif diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index a3529e3e7286..4a365cf7f068 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3137,6 +3137,11 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, bool hot, bool set); struct dentry *f2fs_get_parent(struct dentry *child); +extern int f2fs_ci_compare(const struct inode *parent, + const struct qstr *name, + const struct qstr *entry, + bool quick); + /* * dir.c */ @@ -3170,7 +3175,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, const struct qstr *name, f2fs_hash_t name_hash, unsigned int bit_pos); int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, - const struct qstr *orig_name, f2fs_hash_t dentry_hash, + const struct qstr *orig_name, struct inode *inode, nid_t ino, umode_t mode); int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname, struct inode *inode, nid_t ino, umode_t mode); @@ -3203,7 +3208,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); * hash.c */ f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, - const struct qstr *name_info, const struct fscrypt_name *fname); + const struct qstr *name_info, struct fscrypt_name *fname); /* * node.c @@ -3683,6 +3688,9 @@ static inline void update_sit_info(struct f2fs_sb_info *sbi) {} #endif extern const struct file_operations f2fs_dir_operations; +#ifdef CONFIG_UNICODE +extern const struct dentry_operations f2fs_dentry_ops; +#endif extern const struct file_operations f2fs_file_operations; extern const struct inode_operations f2fs_file_inode_operations; extern const struct address_space_operations f2fs_dblock_aops; @@ -3713,7 +3721,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, struct page *ipage); int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, - const struct fscrypt_name *fname, + const struct qstr *orig_name, struct inode *inode, nid_t ino, umode_t mode); void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, struct inode *dir, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 311a36cba330..b0c432c0fbb2 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -2444,14 +2444,6 @@ static int f2fs_ioc_get_encryption_key_status(struct file *filp, return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); } -static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg) -{ - if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) - return -EOPNOTSUPP; - - return fscrypt_ioctl_get_nonce(filp, (void __user *)arg); -} - static int f2fs_ioc_gc(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); @@ -3419,8 +3411,6 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_remove_encryption_key_all_users(filp, arg); case FS_IOC_GET_ENCRYPTION_KEY_STATUS: return f2fs_ioc_get_encryption_key_status(filp, arg); - case FS_IOC_GET_ENCRYPTION_NONCE: - return f2fs_ioc_get_encryption_nonce(filp, arg); case F2FS_IOC_GARBAGE_COLLECT: return f2fs_ioc_gc(filp, arg); case F2FS_IOC_GARBAGE_COLLECT_RANGE: @@ -3600,7 +3590,6 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FS_IOC_REMOVE_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: case FS_IOC_GET_ENCRYPTION_KEY_STATUS: - case FS_IOC_GET_ENCRYPTION_NONCE: case F2FS_IOC_GARBAGE_COLLECT: case F2FS_IOC_GARBAGE_COLLECT_RANGE: case F2FS_IOC_WRITE_CHECKPOINT: diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index 8f7ee4362312..28acb24e7a7a 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -68,9 +68,8 @@ static void str2hashbuf(const unsigned char *msg, size_t len, *buf++ = pad; } -static f2fs_hash_t __f2fs_dentry_hash(const struct inode *dir, - const struct qstr *name_info, - const struct fscrypt_name *fname) +static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info, + struct fscrypt_name *fname) { __u32 hash; f2fs_hash_t f2fs_hash; @@ -80,17 +79,12 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct inode *dir, size_t len = name_info->len; /* encrypted bigname case */ - if (fname && fname->is_ciphertext_name) + if (fname && !fname->disk_name.name) return cpu_to_le32(fname->hash); if (is_dot_dotdot(name_info)) return 0; - if (IS_CASEFOLDED(dir) && IS_ENCRYPTED(dir)) { - f2fs_hash = cpu_to_le32(fscrypt_fname_siphash(dir, name_info)); - return f2fs_hash; - } - /* Initialize the default seed for the hash checksum functions */ buf[0] = 0x67452301; buf[1] = 0xefcdab89; @@ -112,7 +106,7 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct inode *dir, } f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, - const struct qstr *name_info, const struct fscrypt_name *fname) + const struct qstr *name_info, struct fscrypt_name *fname) { #ifdef CONFIG_UNICODE struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); @@ -120,30 +114,27 @@ f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, int r, dlen; unsigned char *buff; struct qstr folded; - const struct qstr *name = fname ? fname->usr_fname : name_info; if (!name_info->len || !IS_CASEFOLDED(dir)) goto opaque_seq; - if (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir)) - goto opaque_seq; - buff = f2fs_kzalloc(sbi, sizeof(char) * PATH_MAX, GFP_KERNEL); if (!buff) return -ENOMEM; - dlen = utf8_casefold(um, name, buff, PATH_MAX); + + dlen = utf8_casefold(um, name_info, buff, PATH_MAX); if (dlen < 0) { kvfree(buff); goto opaque_seq; } folded.name = buff; folded.len = dlen; - r = __f2fs_dentry_hash(dir, &folded, fname); + r = __f2fs_dentry_hash(&folded, fname); kvfree(buff); return r; opaque_seq: #endif - return __f2fs_dentry_hash(dir, name_info, fname); + return __f2fs_dentry_hash(name_info, fname); } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index b01e0ac34f8c..f8c0f6eec6ae 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -482,8 +482,8 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry) ino = le32_to_cpu(de->ino); fake_mode = f2fs_get_de_type(de) << S_SHIFT; - err = f2fs_add_regular_entry(dir, &new_name, NULL, - de->hash_code, NULL, ino, fake_mode); + err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL, + ino, fake_mode); if (err) goto punch_dentry_pages; @@ -595,7 +595,7 @@ out: } int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, - const struct fscrypt_name *fname, + const struct qstr *orig_name, struct inode *inode, nid_t ino, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); @@ -606,7 +606,6 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, struct f2fs_dentry_ptr d; int slots = GET_DENTRY_SLOTS(new_name->len); struct page *page = NULL; - const struct qstr *orig_name = fname->usr_fname; int err = 0; ipage = f2fs_get_node_page(sbi, dir->i_ino); @@ -637,7 +636,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, f2fs_wait_on_page_writeback(ipage, NODE, true, true); - name_hash = f2fs_dentry_hash(dir, new_name, fname); + name_hash = f2fs_dentry_hash(dir, new_name, NULL); f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos); set_page_dirty(ipage); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index a8959c64bf3a..23d6cccdb4c1 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -492,7 +492,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, } err = fscrypt_prepare_lookup(dir, dentry, &fname); - generic_set_encrypted_ci_d_ops(dir, dentry); if (err == -ENOENT) goto out_splice; if (err) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index fa0a4ae4cf96..de737389ba94 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -3331,6 +3331,12 @@ static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) struct unicode_map *encoding; __u16 encoding_flags; + if (f2fs_sb_has_encrypt(sbi)) { + f2fs_err(sbi, + "Can't mount with encoding and encryption"); + return -EINVAL; + } + if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info, &encoding_flags)) { f2fs_err(sbi, @@ -3353,6 +3359,7 @@ static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) sbi->sb->s_encoding = encoding; sbi->sb->s_encoding_flags = encoding_flags; + sbi->sb->s_d_op = &f2fs_dentry_ops; } #else if (f2fs_sb_has_casefold(sbi)) { diff --git a/fs/inode.c b/fs/inode.c index 8c25e0df7a0e..4e30a37ef712 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -2167,7 +2166,7 @@ int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, !capable(CAP_LINUX_IMMUTABLE)) return -EPERM; - return fscrypt_prepare_setflags(inode, oldflags, flags); + return 0; } EXPORT_SYMBOL(vfs_ioc_setflags_prepare); diff --git a/fs/libfs.c b/fs/libfs.c index 4f2ac9ac0c9a..f66eb521d4f8 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1281,54 +1281,4 @@ err: return ret; } EXPORT_SYMBOL(generic_ci_d_hash); - -static const struct dentry_operations generic_ci_dentry_ops = { - .d_hash = generic_ci_d_hash, - .d_compare = generic_ci_d_compare, -}; #endif - -#ifdef CONFIG_FS_ENCRYPTION -static const struct dentry_operations generic_encrypted_dentry_ops = { - .d_revalidate = fscrypt_d_revalidate, -}; -#endif - -#if IS_ENABLED(CONFIG_UNICODE) && IS_ENABLED(CONFIG_FS_ENCRYPTION) -static const struct dentry_operations generic_encrypted_ci_dentry_ops = { - .d_hash = generic_ci_d_hash, - .d_compare = generic_ci_d_compare, - .d_revalidate = fscrypt_d_revalidate, -}; -#endif - -/** - * generic_set_encrypted_ci_d_ops - helper for setting d_ops for given dentry - * @dir: parent of dentry whose ops to set - * @dentry: detnry to set ops on - * - * This function sets the dentry ops for the given dentry to handle both - * casefolding and encryption of the dentry name. - */ -void generic_set_encrypted_ci_d_ops(struct inode *dir, struct dentry *dentry) -{ -#ifdef CONFIG_FS_ENCRYPTION - if (dentry->d_flags & DCACHE_ENCRYPTED_NAME) { -#ifdef CONFIG_UNICODE - if (dir->i_sb->s_encoding) { - d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops); - return; - } -#endif - d_set_d_op(dentry, &generic_encrypted_dentry_ops); - return; - } -#endif -#ifdef CONFIG_UNICODE - if (dir->i_sb->s_encoding) { - d_set_d_op(dentry, &generic_ci_dentry_ops); - return; - } -#endif -} -EXPORT_SYMBOL(generic_set_encrypted_ci_d_ops); diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig index fe221d7d99d6..dfc6fdf019d7 100644 --- a/fs/ubifs/Kconfig +++ b/fs/ubifs/Kconfig @@ -7,7 +7,6 @@ config UBIFS_FS select CRYPTO if UBIFS_FS_ZLIB select CRYPTO_LZO if UBIFS_FS_LZO select CRYPTO_DEFLATE if UBIFS_FS_ZLIB - select FS_ENCRYPTION_ALGS if FS_ENCRYPTION depends on MTD_UBI help UBIFS is a file system for flash devices which works on top of UBI. diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 7d5c2cf95353..26ac11d0eb4b 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -208,7 +208,6 @@ static int dbg_check_name(const struct ubifs_info *c, return 0; } -static void ubifs_set_d_ops(struct inode *dir, struct dentry *dentry); static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { @@ -222,7 +221,6 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino); err = fscrypt_prepare_lookup(dir, dentry, &nm); - ubifs_set_d_ops(dir, dentry); if (err == -ENOENT) return d_splice_alias(NULL, dentry); if (err) @@ -539,7 +537,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx) if (encrypted) { err = fscrypt_get_encryption_info(dir); - if (err) + if (err && err != -ENOKEY) return err; err = fscrypt_fname_alloc_buffer(dir, UBIFS_MAX_NLEN, &fstr); @@ -1686,19 +1684,3 @@ const struct file_operations ubifs_dir_operations = { .compat_ioctl = ubifs_compat_ioctl, #endif }; - -#ifdef CONFIG_FS_ENCRYPTION -static const struct dentry_operations ubifs_encrypted_dentry_ops = { - .d_revalidate = fscrypt_d_revalidate, -}; -#endif - -static void ubifs_set_d_ops(struct inode *dir, struct dentry *dentry) -{ -#ifdef CONFIG_FS_ENCRYPTION - if (dentry->d_flags & DCACHE_ENCRYPTED_NAME) { - d_set_d_op(dentry, &ubifs_encrypted_dentry_ops); - return; - } -#endif -} diff --git a/include/linux/bio-crypt-ctx.h b/include/linux/bio-crypt-ctx.h index d10c5ad5e07e..12b46ece9c55 100644 --- a/include/linux/bio-crypt-ctx.h +++ b/include/linux/bio-crypt-ctx.h @@ -33,8 +33,6 @@ enum blk_crypto_mode_num { * @data_unit_size_bits: log2 of data_unit_size * @size: size of this key in bytes (determined by @crypto_mode) * @hash: hash of this key, for keyslot manager use only - * @is_hw_wrapped: @raw points to a wrapped key to be used by an inline - * encryption hardware that accepts wrapped keys. * @raw: the raw bytes of this key. Only the first @size bytes are used. * * A blk_crypto_key is immutable once created, and many bios can reference it at @@ -46,7 +44,6 @@ struct blk_crypto_key { unsigned int data_unit_size_bits; unsigned int size; unsigned int hash; - bool is_hw_wrapped; u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; }; diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h index 7dc478a8c3ed..485cee0b92dd 100644 --- a/include/linux/blk-crypto.h +++ b/include/linux/blk-crypto.h @@ -18,15 +18,9 @@ bool blk_crypto_endio(struct bio *bio); int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, unsigned int raw_key_size, - bool is_hw_wrapped, enum blk_crypto_mode_num crypto_mode, unsigned int data_unit_size); -int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size, - bool is_hw_wrapped_key, - struct request_queue *q); - int blk_crypto_evict_key(struct request_queue *q, const struct blk_crypto_key *key); @@ -46,10 +40,22 @@ static inline bool blk_crypto_endio(struct bio *bio) #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK +int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, + unsigned int data_unit_size, + struct request_queue *q); + int blk_crypto_fallback_init(void); #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ +static inline int +blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, + unsigned int data_unit_size, + struct request_queue *q) +{ + return 0; +} + static inline int blk_crypto_fallback_init(void) { return 0; diff --git a/include/linux/fs.h b/include/linux/fs.h index ae71a1faca40..1d8a53a6211a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3217,8 +3217,6 @@ static inline bool needs_casefold(const struct inode *dir) return 0; } #endif -extern void generic_set_encrypted_ci_d_ops(struct inode *dir, - struct dentry *dentry); #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 9f791a4b4ad3..3a2971075432 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -77,21 +77,6 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode) return READ_ONCE(inode->i_crypt_info) != NULL; } -/** - * fscrypt_needs_contents_encryption() - check whether an inode needs - * contents encryption - * - * Return: %true iff the inode is an encrypted regular file and the kernel was - * built with fscrypt support. - * - * If you need to know whether the encrypt bit is set even when the kernel was - * built without fscrypt support, you must use IS_ENCRYPTED() directly instead. - */ -static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) -{ - return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); -} - static inline bool fscrypt_dummy_context_enabled(struct inode *inode) { return inode->i_sb->s_cop->dummy_context && @@ -139,13 +124,11 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) } extern void fscrypt_free_bounce_page(struct page *bounce_page); -extern int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags); /* policy.c */ extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); extern int fscrypt_ioctl_get_policy(struct file *, void __user *); extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *); -extern int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); @@ -177,14 +160,82 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname) extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, struct fscrypt_str *); extern void fscrypt_fname_free_buffer(struct fscrypt_str *); -extern int fscrypt_fname_disk_to_usr(const struct inode *inode, - u32 hash, u32 minor_hash, - const struct fscrypt_str *iname, - struct fscrypt_str *oname); -extern bool fscrypt_match_name(const struct fscrypt_name *fname, - const u8 *de_name, u32 de_name_len); -extern u64 fscrypt_fname_siphash(const struct inode *dir, - const struct qstr *name); +extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, + const struct fscrypt_str *, struct fscrypt_str *); + +#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 + +/* Extracts the second-to-last ciphertext block; see explanation below */ +#define FSCRYPT_FNAME_DIGEST(name, len) \ + ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \ + FS_CRYPTO_BLOCK_SIZE)) + +#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE + +/** + * fscrypt_digested_name - alternate identifier for an on-disk filename + * + * When userspace lists an encrypted directory without access to the key, + * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE + * bytes are shown in this abbreviated form (base64-encoded) rather than as the + * full ciphertext (base64-encoded). This is necessary to allow supporting + * filenames up to NAME_MAX bytes, since base64 encoding expands the length. + * + * To make it possible for filesystems to still find the correct directory entry + * despite not knowing the full on-disk name, we encode any filesystem-specific + * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups, + * followed by the second-to-last ciphertext block of the filename. Due to the + * use of the CBC-CTS encryption mode, the second-to-last ciphertext block + * depends on the full plaintext. (Note that ciphertext stealing causes the + * last two blocks to appear "flipped".) This makes accidental collisions very + * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they + * share the same filesystem-specific hashes. + * + * However, this scheme isn't immune to intentional collisions, which can be + * created by anyone able to create arbitrary plaintext filenames and view them + * without the key. Making the "digest" be a real cryptographic hash like + * SHA-256 over the full ciphertext would prevent this, although it would be + * less efficient and harder to implement, especially since the filesystem would + * need to calculate it for each directory entry examined during a search. + */ +struct fscrypt_digested_name { + u32 hash; + u32 minor_hash; + u8 digest[FSCRYPT_FNAME_DIGEST_SIZE]; +}; + +/** + * fscrypt_match_name() - test whether the given name matches a directory entry + * @fname: the name being searched for + * @de_name: the name from the directory entry + * @de_name_len: the length of @de_name in bytes + * + * Normally @fname->disk_name will be set, and in that case we simply compare + * that to the name stored in the directory entry. The only exception is that + * if we don't have the key for an encrypted directory and a filename in it is + * very long, then we won't have the full disk_name and we'll instead need to + * match against the fscrypt_digested_name. + * + * Return: %true if the name matches, otherwise %false. + */ +static inline bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len) +{ + if (unlikely(!fname->disk_name.name)) { + const struct fscrypt_digested_name *n = + (const void *)fname->crypto_buf.name; + if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_')) + return false; + if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) + return false; + return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len), + n->digest, FSCRYPT_FNAME_DIGEST_SIZE); + } + + if (de_name_len != fname->disk_name.len) + return false; + return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); +} /* bio.c */ extern void fscrypt_decrypt_bio(struct bio *); @@ -202,8 +253,6 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir, unsigned int flags); extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, struct fscrypt_name *fname); -extern int fscrypt_prepare_setflags(struct inode *inode, - unsigned int oldflags, unsigned int flags); extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, unsigned int max_len, struct fscrypt_str *disk_link); @@ -220,11 +269,6 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode) return false; } -static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) -{ - return false; -} - static inline bool fscrypt_dummy_context_enabled(struct inode *inode) { return false; @@ -304,11 +348,6 @@ static inline int fscrypt_ioctl_get_policy_ex(struct file *filp, return -EOPNOTSUPP; } -static inline int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg) -{ - return -EOPNOTSUPP; -} - static inline int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { @@ -413,7 +452,7 @@ static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) return; } -static inline int fscrypt_fname_disk_to_usr(const struct inode *inode, +static inline int fscrypt_fname_disk_to_usr(struct inode *inode, u32 hash, u32 minor_hash, const struct fscrypt_str *iname, struct fscrypt_str *oname) @@ -430,13 +469,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname, return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); } -static inline u64 fscrypt_fname_siphash(const struct inode *dir, - const struct qstr *name) -{ - WARN_ON_ONCE(1); - return 0; -} - /* bio.c */ static inline void fscrypt_decrypt_bio(struct bio *bio) { @@ -479,13 +511,6 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir, return -EOPNOTSUPP; } -static inline int fscrypt_prepare_setflags(struct inode *inode, - unsigned int oldflags, - unsigned int flags) -{ - return 0; -} - static inline int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, unsigned int max_len, @@ -678,9 +703,8 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir, * filenames are presented in encrypted form. Therefore, we'll try to set up * the directory's encryption key, but even without it the lookup can continue. * - * After calling this function, a filesystem should ensure that it's dentry - * operations contain fscrypt_d_revalidate if DCACHE_ENCRYPTED_NAME was set, - * so that the dentry can be invalidated if the key is later added. + * This also installs a custom ->d_revalidate() method which will invalidate the + * dentry if it was created without the key and the key is later added. * * Return: 0 on success; -ENOENT if key is unavailable but the filename isn't a * correctly formed encoded ciphertext name, so a negative dentry should be diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h index f022bd6d2497..6d32a031218e 100644 --- a/include/linux/keyslot-manager.h +++ b/include/linux/keyslot-manager.h @@ -8,15 +8,6 @@ #include -/* Inline crypto feature bits. Must set at least one. */ -enum { - /* Support for standard software-specified keys */ - BLK_CRYPTO_FEATURE_STANDARD_KEYS = BIT(0), - - /* Support for hardware-wrapped keys */ - BLK_CRYPTO_FEATURE_WRAPPED_KEYS = BIT(1), -}; - #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct keyslot_manager; @@ -52,7 +43,6 @@ struct keyslot_mgmt_ll_ops { struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, const struct keyslot_mgmt_ll_ops *ksm_ops, - unsigned int features, const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data); @@ -65,8 +55,7 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot); bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size, - bool is_hw_wrapped_key); + unsigned int data_unit_size); int keyslot_manager_evict_key(struct keyslot_manager *ksm, const struct blk_crypto_key *key); @@ -79,7 +68,6 @@ void keyslot_manager_destroy(struct keyslot_manager *ksm); struct keyslot_manager *keyslot_manager_create_passthrough( const struct keyslot_mgmt_ll_ops *ksm_ops, - unsigned int features, const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data); diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 1b580ac60f98..1b9cdb7a5c8f 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -8,7 +8,6 @@ #ifndef _UAPI_LINUX_FSCRYPT_H #define _UAPI_LINUX_FSCRYPT_H -#include #include /* Encryption policy flags */ @@ -167,7 +166,6 @@ struct fscrypt_get_key_status_arg { #define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) #define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg) #define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg) -#define FS_IOC_GET_ENCRYPTION_NONCE _IOR('f', 27, __u8[16]) /**********************************************************************/ From bab82400adc9e8f4cd21f01dd9ef957e01938fea Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:58 +0530 Subject: [PATCH 133/141] Revert "Integrate the new file encryption framework" This reverts commit 88205c5d9959b8ca4fb63bed60d0775b663b94c0. Signed-off-by: UtsavBalar1231 --- Documentation/block/00-INDEX | 2 - Documentation/block/index.rst | 26 - Documentation/block/inline-encryption.rst | 183 ------ arch/arm64/configs/cuttlefish_defconfig | 3 - arch/x86/configs/x86_64_cuttlefish_defconfig | 3 - block/Kconfig | 17 - block/Makefile | 3 - block/bio-crypt-ctx.c | 142 ---- block/bio.c | 23 +- block/blk-core.c | 11 +- block/blk-crypto-fallback.c | 650 ------------------- block/blk-crypto-internal.h | 58 -- block/blk-crypto.c | 251 ------- block/blk-merge.c | 11 - block/keyslot-manager.c | 560 ---------------- drivers/md/Kconfig | 21 - drivers/md/Makefile | 1 - drivers/md/dm-bow.c | 1 - drivers/md/dm-default-key.c | 403 ------------ drivers/md/dm-linear.c | 1 - drivers/md/dm-table.c | 52 -- drivers/md/dm.c | 100 +-- drivers/scsi/ufs/Kconfig | 9 - drivers/scsi/ufs/Makefile | 4 +- drivers/scsi/ufs/ufs-qcom.c | 6 - drivers/scsi/ufs/ufshcd-crypto.c | 499 -------------- drivers/scsi/ufs/ufshcd-crypto.h | 167 ----- drivers/scsi/ufs/ufshcd.c | 67 +- drivers/scsi/ufs/ufshcd.h | 59 -- drivers/scsi/ufs/ufshci.h | 56 -- fs/buffer.c | 3 - fs/crypto/Kconfig | 6 - fs/crypto/Makefile | 1 - fs/crypto/bio.c | 28 +- fs/crypto/crypto.c | 2 +- fs/crypto/fname.c | 4 +- fs/crypto/fscrypt_private.h | 155 +---- fs/crypto/inline_crypt.c | 353 ---------- fs/crypto/keyring.c | 61 +- fs/crypto/keysetup.c | 169 ++--- fs/crypto/keysetup_v1.c | 17 +- fs/direct-io.c | 5 - fs/ext4/ext4.h | 1 - fs/ext4/inode.c | 16 +- fs/ext4/page-io.c | 6 +- fs/ext4/readpage.c | 11 +- fs/ext4/super.c | 13 - fs/f2fs/data.c | 71 +- fs/f2fs/f2fs.h | 11 +- fs/f2fs/super.c | 41 -- fs/iomap.c | 6 - include/linux/bio-crypt-ctx.h | 228 ------- include/linux/bio.h | 1 - include/linux/blk-crypto.h | 66 -- include/linux/blk_types.h | 9 - include/linux/blkdev.h | 6 - include/linux/device-mapper.h | 6 - include/linux/fscrypt.h | 72 -- include/linux/keyslot-manager.h | 84 --- include/uapi/linux/fscrypt.h | 2 - 60 files changed, 145 insertions(+), 4698 deletions(-) delete mode 100644 Documentation/block/index.rst delete mode 100644 Documentation/block/inline-encryption.rst delete mode 100644 block/bio-crypt-ctx.c delete mode 100644 block/blk-crypto-fallback.c delete mode 100644 block/blk-crypto-internal.h delete mode 100644 block/blk-crypto.c delete mode 100644 block/keyslot-manager.c delete mode 100644 drivers/md/dm-default-key.c delete mode 100644 drivers/scsi/ufs/ufshcd-crypto.c delete mode 100644 drivers/scsi/ufs/ufshcd-crypto.h delete mode 100644 fs/crypto/inline_crypt.c delete mode 100644 include/linux/bio-crypt-ctx.h delete mode 100644 include/linux/blk-crypto.h delete mode 100644 include/linux/keyslot-manager.h diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX index 1c63f2cba97e..f8614b3d49f9 100644 --- a/Documentation/block/00-INDEX +++ b/Documentation/block/00-INDEX @@ -16,8 +16,6 @@ data-integrity.txt - Block data integrity deadline-iosched.txt - Deadline IO scheduler tunables -inline-encryption.rst - - Blk-crypto internals and inline encryption ioprio.txt - Block io priorities (in CFQ scheduler) pr.txt diff --git a/Documentation/block/index.rst b/Documentation/block/index.rst deleted file mode 100644 index 026addfc69bc..000000000000 --- a/Documentation/block/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -===== -Block -===== - -.. toctree:: - :maxdepth: 1 - - bfq-iosched - biodoc - biovecs - capability - cmdline-partition - data-integrity - deadline-iosched - inline-encryption - ioprio - kyber-iosched - null_blk - pr - queue-sysfs - request - stat - switching-sched - writeback_cache_control diff --git a/Documentation/block/inline-encryption.rst b/Documentation/block/inline-encryption.rst deleted file mode 100644 index 330106b23c09..000000000000 --- a/Documentation/block/inline-encryption.rst +++ /dev/null @@ -1,183 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -================= -Inline Encryption -================= - -Objective -========= - -We want to support inline encryption (IE) in the kernel. -To allow for testing, we also want a crypto API fallback when actual -IE hardware is absent. We also want IE to work with layered devices -like dm and loopback (i.e. we want to be able to use the IE hardware -of the underlying devices if present, or else fall back to crypto API -en/decryption). - - -Constraints and notes -===================== - -- IE hardware have a limited number of "keyslots" that can be programmed - with an encryption context (key, algorithm, data unit size, etc.) at any time. - One can specify a keyslot in a data request made to the device, and the - device will en/decrypt the data using the encryption context programmed into - that specified keyslot. When possible, we want to make multiple requests with - the same encryption context share the same keyslot. - -- We need a way for filesystems to specify an encryption context to use for - en/decrypting a struct bio, and a device driver (like UFS) needs to be able - to use that encryption context when it processes the bio. - -- We need a way for device drivers to expose their capabilities in a unified - way to the upper layers. - - -Design -====== - -We add a struct bio_crypt_ctx to struct bio that can represent an -encryption context, because we need to be able to pass this encryption -context from the FS layer to the device driver to act upon. - -While IE hardware works on the notion of keyslots, the FS layer has no -knowledge of keyslots - it simply wants to specify an encryption context to -use while en/decrypting a bio. - -We introduce a keyslot manager (KSM) that handles the translation from -encryption contexts specified by the FS to keyslots on the IE hardware. -This KSM also serves as the way IE hardware can expose their capabilities to -upper layers. The generic mode of operation is: each device driver that wants -to support IE will construct a KSM and set it up in its struct request_queue. -Upper layers that want to use IE on this device can then use this KSM in -the device's struct request_queue to translate an encryption context into -a keyslot. The presence of the KSM in the request queue shall be used to mean -that the device supports IE. - -On the device driver end of the interface, the device driver needs to tell the -KSM how to actually manipulate the IE hardware in the device to do things like -programming the crypto key into the IE hardware into a particular keyslot. All -this is achieved through the :c:type:`struct keyslot_mgmt_ll_ops` that the -device driver passes to the KSM when creating it. - -It uses refcounts to track which keyslots are idle (either they have no -encryption context programmed, or there are no in-flight struct bios -referencing that keyslot). When a new encryption context needs a keyslot, it -tries to find a keyslot that has already been programmed with the same -encryption context, and if there is no such keyslot, it evicts the least -recently used idle keyslot and programs the new encryption context into that -one. If no idle keyslots are available, then the caller will sleep until there -is at least one. - - -Blk-crypto -========== - -The above is sufficient for simple cases, but does not work if there is a -need for a crypto API fallback, or if we are want to use IE with layered -devices. To these ends, we introduce blk-crypto. Blk-crypto allows us to -present a unified view of encryption to the FS (so FS only needs to specify -an encryption context and not worry about keyslots at all), and blk-crypto -can decide whether to delegate the en/decryption to IE hardware or to the -crypto API. Blk-crypto maintains an internal KSM that serves as the crypto -API fallback. - -Blk-crypto needs to ensure that the encryption context is programmed into the -"correct" keyslot manager for IE. If a bio is submitted to a layered device -that eventually passes the bio down to a device that really does support IE, we -want the encryption context to be programmed into a keyslot for the KSM of the -device with IE support. However, blk-crypto does not know a priori whether a -particular device is the final device in the layering structure for a bio or -not. So in the case that a particular device does not support IE, since it is -possibly the final destination device for the bio, if the bio requires -encryption (i.e. the bio is doing a write operation), blk-crypto must fallback -to the crypto API *before* sending the bio to the device. - -Blk-crypto ensures that: - -- The bio's encryption context is programmed into a keyslot in the KSM of the - request queue that the bio is being submitted to (or the crypto API fallback - KSM if the request queue doesn't have a KSM), and that the ``bc_ksm`` - in the ``bi_crypt_context`` is set to this KSM - -- That the bio has its own individual reference to the keyslot in this KSM. - Once the bio passes through blk-crypto, its encryption context is programmed - in some KSM. The "its own individual reference to the keyslot" ensures that - keyslots can be released by each bio independently of other bios while - ensuring that the bio has a valid reference to the keyslot when, for e.g., the - crypto API fallback KSM in blk-crypto performs crypto on the device's behalf. - The individual references are ensured by increasing the refcount for the - keyslot in the ``bc_ksm`` when a bio with a programmed encryption - context is cloned. - - -What blk-crypto does on bio submission --------------------------------------- - -**Case 1:** blk-crypto is given a bio with only an encryption context that hasn't -been programmed into any keyslot in any KSM (for e.g. a bio from the FS). - In this case, blk-crypto will program the encryption context into the KSM of the - request queue the bio is being submitted to (and if this KSM does not exist, - then it will program it into blk-crypto's internal KSM for crypto API - fallback). The KSM that this encryption context was programmed into is stored - as the ``bc_ksm`` in the bio's ``bi_crypt_context``. - -**Case 2:** blk-crypto is given a bio whose encryption context has already been -programmed into a keyslot in the *crypto API fallback* KSM. - In this case, blk-crypto does nothing; it treats the bio as not having - specified an encryption context. Note that we cannot do here what we will do - in Case 3 because we would have already encrypted the bio via the crypto API - by this point. - -**Case 3:** blk-crypto is given a bio whose encryption context has already been -programmed into a keyslot in some KSM (that is *not* the crypto API fallback -KSM). - In this case, blk-crypto first releases that keyslot from that KSM and then - treats the bio as in Case 1. - -This way, when a device driver is processing a bio, it can be sure that -the bio's encryption context has been programmed into some KSM (either the -device driver's request queue's KSM, or blk-crypto's crypto API fallback KSM). -It then simply needs to check if the bio's ``bc_ksm`` is the device's -request queue's KSM. If so, then it should proceed with IE. If not, it should -simply do nothing with respect to crypto, because some other KSM (perhaps the -blk-crypto crypto API fallback KSM) is handling the en/decryption. - -Blk-crypto will release the keyslot that is being held by the bio (and also -decrypt it if the bio is using the crypto API fallback KSM) once -``bio_remaining_done`` returns true for the bio. - - -Layered Devices -=============== - -Layered devices that wish to support IE need to create their own keyslot -manager for their request queue, and expose whatever functionality they choose. -When a layered device wants to pass a bio to another layer (either by -resubmitting the same bio, or by submitting a clone), it doesn't need to do -anything special because the bio (or the clone) will once again pass through -blk-crypto, which will work as described in Case 3. If a layered device wants -for some reason to do the IO by itself instead of passing it on to a child -device, but it also chose to expose IE capabilities by setting up a KSM in its -request queue, it is then responsible for en/decrypting the data itself. In -such cases, the device can choose to call the blk-crypto function -``blk_crypto_fallback_to_kernel_crypto_api`` (TODO: Not yet implemented), which will -cause the en/decryption to be done via the crypto API fallback. - - -Future Optimizations for layered devices -======================================== - -Creating a keyslot manager for the layered device uses up memory for each -keyslot, and in general, a layered device (like dm-linear) merely passes the -request on to a "child" device, so the keyslots in the layered device itself -might be completely unused. We can instead define a new type of KSM; the -"passthrough KSM", that layered devices can use to let blk-crypto know that -this layered device *will* pass the bio to some child device (and hence -through blk-crypto again, at which point blk-crypto can program the encryption -context, instead of programming it into the layered device's KSM). Again, if -the device "lies" and decides to do the IO itself instead of passing it on to -a child device, it is responsible for doing the en/decryption (and can choose -to call ``blk_crypto_fallback_to_kernel_crypto_api``). Another use case for the -"passthrough KSM" is for IE devices that want to manage their own keyslots/do -not have a limited number of keyslots. diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig index 56e652620541..ba7cfa923d70 100644 --- a/arch/arm64/configs/cuttlefish_defconfig +++ b/arch/arm64/configs/cuttlefish_defconfig @@ -47,8 +47,6 @@ CONFIG_REFCOUNT_FULL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y -CONFIG_BLK_INLINE_ENCRYPTION=y -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PCI=y CONFIG_PCI_HOST_GENERIC=y CONFIG_PREEMPT=y @@ -433,7 +431,6 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y -CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y # CONFIG_DNOTIFY is not set diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig index 5a03ff9b45db..a7f60e3d5dde 100644 --- a/arch/x86/configs/x86_64_cuttlefish_defconfig +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -46,8 +46,6 @@ CONFIG_REFCOUNT_FULL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y -CONFIG_BLK_INLINE_ENCRYPTION=y -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_SMP=y CONFIG_HYPERVISOR_GUEST=y @@ -451,7 +449,6 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y -CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y CONFIG_FS_VERITY=y CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_QUOTA=y diff --git a/block/Kconfig b/block/Kconfig index 4d9bcb951d83..28ec55752b68 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -184,23 +184,6 @@ config BLK_SED_OPAL Enabling this option enables users to setup/unlock/lock Locking ranges for SED devices using the Opal protocol. -config BLK_INLINE_ENCRYPTION - bool "Enable inline encryption support in block layer" - help - Build the blk-crypto subsystem. Enabling this lets the - block layer handle encryption, so users can take - advantage of inline encryption hardware if present. - -config BLK_INLINE_ENCRYPTION_FALLBACK - bool "Enable crypto API fallback for blk-crypto" - depends on BLK_INLINE_ENCRYPTION - select CRYPTO - select CRYPTO_BLKCIPHER - help - Enabling this lets the block layer handle inline encryption - by falling back to the kernel crypto API when inline - encryption hardware is not present. - menu "Partition Types" source "block/partitions/Kconfig" diff --git a/block/Makefile b/block/Makefile index ab14055d8222..6a56303b9925 100644 --- a/block/Makefile +++ b/block/Makefile @@ -35,6 +35,3 @@ obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o obj-$(CONFIG_BLK_WBT) += blk-wbt.o obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o -obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \ - blk-crypto.o -obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o \ No newline at end of file diff --git a/block/bio-crypt-ctx.c b/block/bio-crypt-ctx.c deleted file mode 100644 index 75008b2afea2..000000000000 --- a/block/bio-crypt-ctx.c +++ /dev/null @@ -1,142 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2019 Google LLC - */ - -#include -#include -#include -#include -#include - -#include "blk-crypto-internal.h" - -static int num_prealloc_crypt_ctxs = 128; - -module_param(num_prealloc_crypt_ctxs, int, 0444); -MODULE_PARM_DESC(num_prealloc_crypt_ctxs, - "Number of bio crypto contexts to preallocate"); - -static struct kmem_cache *bio_crypt_ctx_cache; -static mempool_t *bio_crypt_ctx_pool; - -int __init bio_crypt_ctx_init(void) -{ - size_t i; - - bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); - if (!bio_crypt_ctx_cache) - return -ENOMEM; - - bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs, - bio_crypt_ctx_cache); - if (!bio_crypt_ctx_pool) - return -ENOMEM; - - /* This is assumed in various places. */ - BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); - - /* Sanity check that no algorithm exceeds the defined limits. */ - for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { - BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE); - BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); - } - - return 0; -} - -struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask) -{ - return mempool_alloc(bio_crypt_ctx_pool, gfp_mask); -} -EXPORT_SYMBOL_GPL(bio_crypt_alloc_ctx); - -void bio_crypt_free_ctx(struct bio *bio) -{ - mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); - bio->bi_crypt_context = NULL; -} - -void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) -{ - const struct bio_crypt_ctx *src_bc = src->bi_crypt_context; - - bio_clone_skip_dm_default_key(dst, src); - - /* - * If a bio is fallback_crypted, then it will be decrypted when - * bio_endio is called. As we only want the data to be decrypted once, - * copies of the bio must not have have a crypt context. - */ - if (!src_bc || bio_crypt_fallback_crypted(src_bc)) - return; - - dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask); - *dst->bi_crypt_context = *src_bc; - - if (src_bc->bc_keyslot >= 0) - keyslot_manager_get_slot(src_bc->bc_ksm, src_bc->bc_keyslot); -} -EXPORT_SYMBOL_GPL(bio_crypt_clone); - -bool bio_crypt_should_process(struct request *rq) -{ - struct bio *bio = rq->bio; - - if (!bio || !bio->bi_crypt_context) - return false; - - return rq->q->ksm == bio->bi_crypt_context->bc_ksm; -} -EXPORT_SYMBOL_GPL(bio_crypt_should_process); - -/* - * Checks that two bio crypt contexts are compatible - i.e. that - * they are mergeable except for data_unit_num continuity. - */ -bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2) -{ - struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context; - struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context; - - if (!bc1) - return !bc2; - return bc2 && bc1->bc_key == bc2->bc_key; -} - -/* - * Checks that two bio crypt contexts are compatible, and also - * that their data_unit_nums are continuous (and can hence be merged) - * in the order b_1 followed by b_2. - */ -bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes, - struct bio *b_2) -{ - struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context; - struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context; - - if (!bio_crypt_ctx_compatible(b_1, b_2)) - return false; - - return !bc1 || bio_crypt_dun_is_contiguous(bc1, b1_bytes, bc2->bc_dun); -} - -void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc) -{ - keyslot_manager_put_slot(bc->bc_ksm, bc->bc_keyslot); - bc->bc_ksm = NULL; - bc->bc_keyslot = -1; -} - -int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc, - struct keyslot_manager *ksm) -{ - int slot = keyslot_manager_get_slot_for_key(ksm, bc->bc_key); - - if (slot < 0) - return slot; - - bc->bc_keyslot = slot; - bc->bc_ksm = ksm; - return 0; -} diff --git a/block/bio.c b/block/bio.c index 6ef2e22d2bf3..a3c4fd9ec478 100644 --- a/block/bio.c +++ b/block/bio.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include "blk.h" @@ -244,8 +243,6 @@ fallback: void bio_uninit(struct bio *bio) { bio_disassociate_task(bio); - - bio_crypt_free_ctx(bio); } EXPORT_SYMBOL(bio_uninit); @@ -631,12 +628,15 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) __bio_clone_fast(b, bio); - bio_crypt_clone(b, bio, gfp_mask); + if (bio_integrity(bio)) { + int ret; - if (bio_integrity(bio) && - bio_integrity_clone(b, bio, gfp_mask) < 0) { - bio_put(b); - return NULL; + ret = bio_integrity_clone(b, bio, gfp_mask); + + if (ret < 0) { + bio_put(b); + return NULL; + } } return b; @@ -704,8 +704,6 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, break; } - bio_crypt_clone(bio, bio_src, gfp_mask); - if (bio_integrity(bio_src)) { int ret; @@ -1037,7 +1035,6 @@ void bio_advance(struct bio *bio, unsigned bytes) if (bio_integrity(bio)) bio_integrity_advance(bio, bytes); - bio_crypt_advance(bio, bytes); bio_advance_iter(bio, &bio->bi_iter, bytes); } EXPORT_SYMBOL(bio_advance); @@ -1895,10 +1892,6 @@ void bio_endio(struct bio *bio) again: if (!bio_remaining_done(bio)) return; - - if (!blk_crypto_endio(bio)) - return; - if (!bio_integrity_endio(bio)) return; diff --git a/block/blk-core.c b/block/blk-core.c index e90d2e3644c0..52490014818f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -35,7 +35,6 @@ #include #include #include -#include #define CREATE_TRACE_POINTS #include @@ -2285,9 +2284,7 @@ blk_qc_t generic_make_request(struct bio *bio) /* Create a fresh bio_list for all subordinate requests */ bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_init(&bio_list_on_stack[0]); - - if (!blk_crypto_submit_bio(&bio)) - ret = q->make_request_fn(q, bio); + ret = q->make_request_fn(q, bio); /* sort new bios into those for a lower level * and those for the same level @@ -3731,12 +3728,6 @@ int __init blk_dev_init(void) blk_debugfs_root = debugfs_create_dir("block", NULL); #endif - if (bio_crypt_ctx_init() < 0) - panic("Failed to allocate mem for bio crypt ctxs\n"); - - if (blk_crypto_fallback_init() < 0) - panic("Failed to init blk-crypto-fallback\n"); - return 0; } diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c deleted file mode 100644 index cce3317cba80..000000000000 --- a/block/blk-crypto-fallback.c +++ /dev/null @@ -1,650 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2019 Google LLC - */ - -/* - * Refer to Documentation/block/inline-encryption.rst for detailed explanation. - */ - -#define pr_fmt(fmt) "blk-crypto-fallback: " fmt - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "blk-crypto-internal.h" - -static unsigned int num_prealloc_bounce_pg = 32; -module_param(num_prealloc_bounce_pg, uint, 0); -MODULE_PARM_DESC(num_prealloc_bounce_pg, - "Number of preallocated bounce pages for the blk-crypto crypto API fallback"); - -static unsigned int blk_crypto_num_keyslots = 100; -module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0); -MODULE_PARM_DESC(num_keyslots, - "Number of keyslots for the blk-crypto crypto API fallback"); - -static unsigned int num_prealloc_fallback_crypt_ctxs = 128; -module_param(num_prealloc_fallback_crypt_ctxs, uint, 0); -MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs, - "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback"); - -struct bio_fallback_crypt_ctx { - struct bio_crypt_ctx crypt_ctx; - /* - * Copy of the bvec_iter when this bio was submitted. - * We only want to en/decrypt the part of the bio as described by the - * bvec_iter upon submission because bio might be split before being - * resubmitted - */ - struct bvec_iter crypt_iter; - u64 fallback_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; -}; - -/* The following few vars are only used during the crypto API fallback */ -static struct kmem_cache *bio_fallback_crypt_ctx_cache; -static mempool_t *bio_fallback_crypt_ctx_pool; - -/* - * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate - * all of a mode's tfms when that mode starts being used. Since each mode may - * need all the keyslots at some point, each mode needs its own tfm for each - * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to - * match the behavior of real inline encryption hardware (which only supports a - * single encryption context per keyslot), we only allow one tfm per keyslot to - * be used at a time - the rest of the unused tfms have their keys cleared. - */ -static DEFINE_MUTEX(tfms_init_lock); -static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX]; - -struct blk_crypto_decrypt_work { - struct work_struct work; - struct bio *bio; -}; - -static struct blk_crypto_keyslot { - struct crypto_skcipher *tfm; - enum blk_crypto_mode_num crypto_mode; - struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; -} *blk_crypto_keyslots; - -/* The following few vars are only used during the crypto API fallback */ -static struct keyslot_manager *blk_crypto_ksm; -static struct workqueue_struct *blk_crypto_wq; -static mempool_t *blk_crypto_bounce_page_pool; -static struct kmem_cache *blk_crypto_decrypt_work_cache; - -bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc) -{ - return bc && bc->bc_ksm == blk_crypto_ksm; -} - -/* - * This is the key we set when evicting a keyslot. This *should* be the all 0's - * key, but AES-XTS rejects that key, so we use some random bytes instead. - */ -static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE]; - -static void blk_crypto_evict_keyslot(unsigned int slot) -{ - struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; - enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode; - int err; - - WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID); - - /* Clear the key in the skcipher */ - err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key, - blk_crypto_modes[crypto_mode].keysize); - WARN_ON(err); - slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; -} - -static int blk_crypto_keyslot_program(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) -{ - struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; - const enum blk_crypto_mode_num crypto_mode = key->crypto_mode; - int err; - - if (crypto_mode != slotp->crypto_mode && - slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) { - blk_crypto_evict_keyslot(slot); - } - - if (!slotp->tfms[crypto_mode]) - return -ENOMEM; - slotp->crypto_mode = crypto_mode; - err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw, - key->size); - if (err) { - blk_crypto_evict_keyslot(slot); - return err; - } - return 0; -} - -static int blk_crypto_keyslot_evict(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) -{ - blk_crypto_evict_keyslot(slot); - return 0; -} - -/* - * The crypto API fallback KSM ops - only used for a bio when it specifies a - * blk_crypto_mode for which we failed to get a keyslot in the device's inline - * encryption hardware (which probably means the device doesn't have inline - * encryption hardware that supports that crypto mode). - */ -static const struct keyslot_mgmt_ll_ops blk_crypto_ksm_ll_ops = { - .keyslot_program = blk_crypto_keyslot_program, - .keyslot_evict = blk_crypto_keyslot_evict, -}; - -static void blk_crypto_encrypt_endio(struct bio *enc_bio) -{ - struct bio *src_bio = enc_bio->bi_private; - int i; - - for (i = 0; i < enc_bio->bi_vcnt; i++) - mempool_free(enc_bio->bi_io_vec[i].bv_page, - blk_crypto_bounce_page_pool); - - src_bio->bi_status = enc_bio->bi_status; - - bio_put(enc_bio); - bio_endio(src_bio); -} - -static struct bio *blk_crypto_clone_bio(struct bio *bio_src) -{ - struct bvec_iter iter; - struct bio_vec bv; - struct bio *bio; - - bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL); - if (!bio) - return NULL; - bio->bi_disk = bio_src->bi_disk; - bio->bi_opf = bio_src->bi_opf; - bio->bi_ioprio = bio_src->bi_ioprio; - bio->bi_write_hint = bio_src->bi_write_hint; - bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; - bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; - - bio_for_each_segment(bv, bio_src, iter) - bio->bi_io_vec[bio->bi_vcnt++] = bv; - - if (bio_integrity(bio_src) && - bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) { - bio_put(bio); - return NULL; - } - - bio_clone_blkcg_association(bio, bio_src); - - bio_clone_skip_dm_default_key(bio, bio_src); - - return bio; -} - -static int blk_crypto_alloc_cipher_req(struct bio *src_bio, - struct skcipher_request **ciph_req_ret, - struct crypto_wait *wait) -{ - struct skcipher_request *ciph_req; - const struct blk_crypto_keyslot *slotp; - - slotp = &blk_crypto_keyslots[src_bio->bi_crypt_context->bc_keyslot]; - ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode], - GFP_NOIO); - if (!ciph_req) { - src_bio->bi_status = BLK_STS_RESOURCE; - return -ENOMEM; - } - - skcipher_request_set_callback(ciph_req, - CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - crypto_req_done, wait); - *ciph_req_ret = ciph_req; - return 0; -} - -static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr) -{ - struct bio *bio = *bio_ptr; - unsigned int i = 0; - unsigned int num_sectors = 0; - struct bio_vec bv; - struct bvec_iter iter; - - bio_for_each_segment(bv, bio, iter) { - num_sectors += bv.bv_len >> SECTOR_SHIFT; - if (++i == BIO_MAX_PAGES) - break; - } - if (num_sectors < bio_sectors(bio)) { - struct bio *split_bio; - - split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL); - if (!split_bio) { - bio->bi_status = BLK_STS_RESOURCE; - return -ENOMEM; - } - bio_chain(split_bio, bio); - generic_make_request(bio); - *bio_ptr = split_bio; - } - return 0; -} - -union blk_crypto_iv { - __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; - u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; -}; - -static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], - union blk_crypto_iv *iv) -{ - int i; - - for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) - iv->dun[i] = cpu_to_le64(dun[i]); -} - -/* - * The crypto API fallback's encryption routine. - * Allocate a bounce bio for encryption, encrypt the input bio using crypto API, - * and replace *bio_ptr with the bounce bio. May split input bio if it's too - * large. - */ -static int blk_crypto_encrypt_bio(struct bio **bio_ptr) -{ - struct bio *src_bio; - struct skcipher_request *ciph_req = NULL; - DECLARE_CRYPTO_WAIT(wait); - u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; - union blk_crypto_iv iv; - struct scatterlist src, dst; - struct bio *enc_bio; - unsigned int i, j; - int data_unit_size; - struct bio_crypt_ctx *bc; - int err = 0; - - /* Split the bio if it's too big for single page bvec */ - err = blk_crypto_split_bio_if_needed(bio_ptr); - if (err) - return err; - - src_bio = *bio_ptr; - bc = src_bio->bi_crypt_context; - data_unit_size = bc->bc_key->data_unit_size; - - /* Allocate bounce bio for encryption */ - enc_bio = blk_crypto_clone_bio(src_bio); - if (!enc_bio) { - src_bio->bi_status = BLK_STS_RESOURCE; - return -ENOMEM; - } - - /* - * Use the crypto API fallback keyslot manager to get a crypto_skcipher - * for the algorithm and key specified for this bio. - */ - err = bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm); - if (err) { - src_bio->bi_status = BLK_STS_IOERR; - goto out_put_enc_bio; - } - - /* and then allocate an skcipher_request for it */ - err = blk_crypto_alloc_cipher_req(src_bio, &ciph_req, &wait); - if (err) - goto out_release_keyslot; - - memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); - sg_init_table(&src, 1); - sg_init_table(&dst, 1); - - skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size, - iv.bytes); - - /* Encrypt each page in the bounce bio */ - for (i = 0; i < enc_bio->bi_vcnt; i++) { - struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i]; - struct page *plaintext_page = enc_bvec->bv_page; - struct page *ciphertext_page = - mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO); - - enc_bvec->bv_page = ciphertext_page; - - if (!ciphertext_page) { - src_bio->bi_status = BLK_STS_RESOURCE; - err = -ENOMEM; - goto out_free_bounce_pages; - } - - sg_set_page(&src, plaintext_page, data_unit_size, - enc_bvec->bv_offset); - sg_set_page(&dst, ciphertext_page, data_unit_size, - enc_bvec->bv_offset); - - /* Encrypt each data unit in this page */ - for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { - blk_crypto_dun_to_iv(curr_dun, &iv); - err = crypto_wait_req(crypto_skcipher_encrypt(ciph_req), - &wait); - if (err) { - i++; - src_bio->bi_status = BLK_STS_RESOURCE; - goto out_free_bounce_pages; - } - bio_crypt_dun_increment(curr_dun, 1); - src.offset += data_unit_size; - dst.offset += data_unit_size; - } - } - - enc_bio->bi_private = src_bio; - enc_bio->bi_end_io = blk_crypto_encrypt_endio; - *bio_ptr = enc_bio; - - enc_bio = NULL; - err = 0; - goto out_free_ciph_req; - -out_free_bounce_pages: - while (i > 0) - mempool_free(enc_bio->bi_io_vec[--i].bv_page, - blk_crypto_bounce_page_pool); -out_free_ciph_req: - skcipher_request_free(ciph_req); -out_release_keyslot: - bio_crypt_ctx_release_keyslot(bc); -out_put_enc_bio: - if (enc_bio) - bio_put(enc_bio); - - return err; -} - -static void blk_crypto_free_fallback_crypt_ctx(struct bio *bio) -{ - mempool_free(container_of(bio->bi_crypt_context, - struct bio_fallback_crypt_ctx, - crypt_ctx), - bio_fallback_crypt_ctx_pool); - bio->bi_crypt_context = NULL; -} - -/* - * The crypto API fallback's main decryption routine. - * Decrypts input bio in place. - */ -static void blk_crypto_decrypt_bio(struct work_struct *work) -{ - struct blk_crypto_decrypt_work *decrypt_work = - container_of(work, struct blk_crypto_decrypt_work, work); - struct bio *bio = decrypt_work->bio; - struct skcipher_request *ciph_req = NULL; - DECLARE_CRYPTO_WAIT(wait); - struct bio_vec bv; - struct bvec_iter iter; - u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; - union blk_crypto_iv iv; - struct scatterlist sg; - struct bio_crypt_ctx *bc = bio->bi_crypt_context; - struct bio_fallback_crypt_ctx *f_ctx = - container_of(bc, struct bio_fallback_crypt_ctx, crypt_ctx); - const int data_unit_size = bc->bc_key->data_unit_size; - unsigned int i; - int err; - - /* - * Use the crypto API fallback keyslot manager to get a crypto_skcipher - * for the algorithm and key specified for this bio. - */ - if (bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm)) { - bio->bi_status = BLK_STS_RESOURCE; - goto out_no_keyslot; - } - - /* and then allocate an skcipher_request for it */ - err = blk_crypto_alloc_cipher_req(bio, &ciph_req, &wait); - if (err) - goto out; - - memcpy(curr_dun, f_ctx->fallback_dun, sizeof(curr_dun)); - sg_init_table(&sg, 1); - skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size, - iv.bytes); - - /* Decrypt each segment in the bio */ - __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { - struct page *page = bv.bv_page; - - sg_set_page(&sg, page, data_unit_size, bv.bv_offset); - - /* Decrypt each data unit in the segment */ - for (i = 0; i < bv.bv_len; i += data_unit_size) { - blk_crypto_dun_to_iv(curr_dun, &iv); - if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req), - &wait)) { - bio->bi_status = BLK_STS_IOERR; - goto out; - } - bio_crypt_dun_increment(curr_dun, 1); - sg.offset += data_unit_size; - } - } - -out: - skcipher_request_free(ciph_req); - bio_crypt_ctx_release_keyslot(bc); -out_no_keyslot: - kmem_cache_free(blk_crypto_decrypt_work_cache, decrypt_work); - blk_crypto_free_fallback_crypt_ctx(bio); - bio_endio(bio); -} - -/* - * Queue bio for decryption. - * Returns true iff bio was queued for decryption. - */ -bool blk_crypto_queue_decrypt_bio(struct bio *bio) -{ - struct blk_crypto_decrypt_work *decrypt_work; - - /* If there was an IO error, don't queue for decrypt. */ - if (bio->bi_status) - goto out; - - decrypt_work = kmem_cache_zalloc(blk_crypto_decrypt_work_cache, - GFP_ATOMIC); - if (!decrypt_work) { - bio->bi_status = BLK_STS_RESOURCE; - goto out; - } - - INIT_WORK(&decrypt_work->work, blk_crypto_decrypt_bio); - decrypt_work->bio = bio; - queue_work(blk_crypto_wq, &decrypt_work->work); - - return true; -out: - blk_crypto_free_fallback_crypt_ctx(bio); - return false; -} - -/** - * blk_crypto_start_using_mode() - Start using a crypto algorithm on a device - * @mode_num: the blk_crypto_mode we want to allocate ciphers for. - * @data_unit_size: the data unit size that will be used - * @q: the request queue for the device - * - * Upper layers must call this function to ensure that a the crypto API fallback - * has transforms for this algorithm, if they become necessary. - * - * Return: 0 on success and -err on error. - */ -int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, - unsigned int data_unit_size, - struct request_queue *q) -{ - struct blk_crypto_keyslot *slotp; - unsigned int i; - int err = 0; - - /* - * Fast path - * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] - * for each i are visible before we try to access them. - */ - if (likely(smp_load_acquire(&tfms_inited[mode_num]))) - return 0; - - /* - * If the keyslot manager of the request queue supports this - * crypto mode, then we don't need to allocate this mode. - */ - if (keyslot_manager_crypto_mode_supported(q->ksm, mode_num, - data_unit_size)) - return 0; - - mutex_lock(&tfms_init_lock); - if (likely(tfms_inited[mode_num])) - goto out; - - for (i = 0; i < blk_crypto_num_keyslots; i++) { - slotp = &blk_crypto_keyslots[i]; - slotp->tfms[mode_num] = crypto_alloc_skcipher( - blk_crypto_modes[mode_num].cipher_str, - 0, 0); - if (IS_ERR(slotp->tfms[mode_num])) { - err = PTR_ERR(slotp->tfms[mode_num]); - slotp->tfms[mode_num] = NULL; - goto out_free_tfms; - } - - crypto_skcipher_set_flags(slotp->tfms[mode_num], - CRYPTO_TFM_REQ_WEAK_KEY); - } - - /* - * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] - * for each i are visible before we set tfms_inited[mode_num]. - */ - smp_store_release(&tfms_inited[mode_num], true); - goto out; - -out_free_tfms: - for (i = 0; i < blk_crypto_num_keyslots; i++) { - slotp = &blk_crypto_keyslots[i]; - crypto_free_skcipher(slotp->tfms[mode_num]); - slotp->tfms[mode_num] = NULL; - } -out: - mutex_unlock(&tfms_init_lock); - return err; -} -EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode); - -int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) -{ - return keyslot_manager_evict_key(blk_crypto_ksm, key); -} - -int blk_crypto_fallback_submit_bio(struct bio **bio_ptr) -{ - struct bio *bio = *bio_ptr; - struct bio_crypt_ctx *bc = bio->bi_crypt_context; - struct bio_fallback_crypt_ctx *f_ctx; - - if (!tfms_inited[bc->bc_key->crypto_mode]) { - bio->bi_status = BLK_STS_IOERR; - return -EIO; - } - - if (bio_data_dir(bio) == WRITE) - return blk_crypto_encrypt_bio(bio_ptr); - - /* - * Mark bio as fallback crypted and replace the bio_crypt_ctx with - * another one contained in a bio_fallback_crypt_ctx, so that the - * fallback has space to store the info it needs for decryption. - */ - bc->bc_ksm = blk_crypto_ksm; - f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO); - f_ctx->crypt_ctx = *bc; - memcpy(f_ctx->fallback_dun, bc->bc_dun, sizeof(f_ctx->fallback_dun)); - f_ctx->crypt_iter = bio->bi_iter; - - bio_crypt_free_ctx(bio); - bio->bi_crypt_context = &f_ctx->crypt_ctx; - - return 0; -} - -int __init blk_crypto_fallback_init(void) -{ - int i; - unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX]; - - prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); - - /* All blk-crypto modes have a crypto API fallback. */ - for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) - crypto_mode_supported[i] = 0xFFFFFFFF; - crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; - - blk_crypto_ksm = keyslot_manager_create(blk_crypto_num_keyslots, - &blk_crypto_ksm_ll_ops, - crypto_mode_supported, NULL); - if (!blk_crypto_ksm) - return -ENOMEM; - - blk_crypto_wq = alloc_workqueue("blk_crypto_wq", - WQ_UNBOUND | WQ_HIGHPRI | - WQ_MEM_RECLAIM, num_online_cpus()); - if (!blk_crypto_wq) - return -ENOMEM; - - blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots, - sizeof(blk_crypto_keyslots[0]), - GFP_KERNEL); - if (!blk_crypto_keyslots) - return -ENOMEM; - - blk_crypto_bounce_page_pool = - mempool_create_page_pool(num_prealloc_bounce_pg, 0); - if (!blk_crypto_bounce_page_pool) - return -ENOMEM; - - blk_crypto_decrypt_work_cache = KMEM_CACHE(blk_crypto_decrypt_work, - SLAB_RECLAIM_ACCOUNT); - if (!blk_crypto_decrypt_work_cache) - return -ENOMEM; - - bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0); - if (!bio_fallback_crypt_ctx_cache) - return -ENOMEM; - - bio_fallback_crypt_ctx_pool = - mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs, - bio_fallback_crypt_ctx_cache); - if (!bio_fallback_crypt_ctx_pool) - return -ENOMEM; - - return 0; -} diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h deleted file mode 100644 index 40d826b743da..000000000000 --- a/block/blk-crypto-internal.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright 2019 Google LLC - */ - -#ifndef __LINUX_BLK_CRYPTO_INTERNAL_H -#define __LINUX_BLK_CRYPTO_INTERNAL_H - -#include - -/* Represents a crypto mode supported by blk-crypto */ -struct blk_crypto_mode { - const char *cipher_str; /* crypto API name (for fallback case) */ - unsigned int keysize; /* key size in bytes */ - unsigned int ivsize; /* iv size in bytes */ -}; - -extern const struct blk_crypto_mode blk_crypto_modes[]; - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK - -int blk_crypto_fallback_submit_bio(struct bio **bio_ptr); - -bool blk_crypto_queue_decrypt_bio(struct bio *bio); - -int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key); - -bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc); - -#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ - -static inline bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc) -{ - return false; -} - -static inline int blk_crypto_fallback_submit_bio(struct bio **bio_ptr) -{ - pr_warn_once("crypto API fallback disabled; failing request\n"); - (*bio_ptr)->bi_status = BLK_STS_NOTSUPP; - return -EIO; -} - -static inline bool blk_crypto_queue_decrypt_bio(struct bio *bio) -{ - WARN_ON(1); - return false; -} - -static inline int -blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) -{ - return 0; -} - -#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ - -#endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */ diff --git a/block/blk-crypto.c b/block/blk-crypto.c deleted file mode 100644 index a8de0d9680e0..000000000000 --- a/block/blk-crypto.c +++ /dev/null @@ -1,251 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2019 Google LLC - */ - -/* - * Refer to Documentation/block/inline-encryption.rst for detailed explanation. - */ - -#define pr_fmt(fmt) "blk-crypto: " fmt - -#include -#include -#include -#include -#include - -#include "blk-crypto-internal.h" - -const struct blk_crypto_mode blk_crypto_modes[] = { - [BLK_ENCRYPTION_MODE_AES_256_XTS] = { - .cipher_str = "xts(aes)", - .keysize = 64, - .ivsize = 16, - }, - [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { - .cipher_str = "essiv(cbc(aes),sha256)", - .keysize = 16, - .ivsize = 16, - }, - [BLK_ENCRYPTION_MODE_ADIANTUM] = { - .cipher_str = "adiantum(xchacha12,aes)", - .keysize = 32, - .ivsize = 32, - }, -}; - -/* Check that all I/O segments are data unit aligned */ -static int bio_crypt_check_alignment(struct bio *bio) -{ - const unsigned int data_unit_size = - bio->bi_crypt_context->bc_key->data_unit_size; - struct bvec_iter iter; - struct bio_vec bv; - - bio_for_each_segment(bv, bio, iter) { - if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) - return -EIO; - } - return 0; -} - -/** - * blk_crypto_submit_bio - handle submitting bio for inline encryption - * - * @bio_ptr: pointer to original bio pointer - * - * If the bio doesn't have inline encryption enabled or the submitter already - * specified a keyslot for the target device, do nothing. Else, a raw key must - * have been provided, so acquire a device keyslot for it if supported. Else, - * use the crypto API fallback. - * - * When the crypto API fallback is used for encryption, blk-crypto may choose to - * split the bio into 2 - the first one that will continue to be processed and - * the second one that will be resubmitted via generic_make_request. - * A bounce bio will be allocated to encrypt the contents of the aforementioned - * "first one", and *bio_ptr will be updated to this bounce bio. - * - * Return: 0 if bio submission should continue; nonzero if bio_endio() was - * already called so bio submission should abort. - */ -int blk_crypto_submit_bio(struct bio **bio_ptr) -{ - struct bio *bio = *bio_ptr; - struct request_queue *q; - struct bio_crypt_ctx *bc = bio->bi_crypt_context; - int err; - - if (!bc || !bio_has_data(bio)) - return 0; - - /* - * When a read bio is marked for fallback decryption, its bi_iter is - * saved so that when we decrypt the bio later, we know what part of it - * was marked for fallback decryption (when the bio is passed down after - * blk_crypto_submit bio, it may be split or advanced so we cannot rely - * on the bi_iter while decrypting in blk_crypto_endio) - */ - if (bio_crypt_fallback_crypted(bc)) - return 0; - - err = bio_crypt_check_alignment(bio); - if (err) { - bio->bi_status = BLK_STS_IOERR; - goto out; - } - - q = bio->bi_disk->queue; - - if (bc->bc_ksm) { - /* Key already programmed into device? */ - if (q->ksm == bc->bc_ksm) - return 0; - - /* Nope, release the existing keyslot. */ - bio_crypt_ctx_release_keyslot(bc); - } - - /* Get device keyslot if supported */ - if (keyslot_manager_crypto_mode_supported(q->ksm, - bc->bc_key->crypto_mode, - bc->bc_key->data_unit_size)) { - err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm); - if (!err) - return 0; - - pr_warn_once("Failed to acquire keyslot for %s (err=%d). Falling back to crypto API.\n", - bio->bi_disk->disk_name, err); - } - - /* Fallback to crypto API */ - err = blk_crypto_fallback_submit_bio(bio_ptr); - if (err) - goto out; - - return 0; -out: - bio_endio(*bio_ptr); - return err; -} - -/** - * blk_crypto_endio - clean up bio w.r.t inline encryption during bio_endio - * - * @bio: the bio to clean up - * - * If blk_crypto_submit_bio decided to fallback to crypto API for this bio, - * we queue the bio for decryption into a workqueue and return false, - * and call bio_endio(bio) at a later time (after the bio has been decrypted). - * - * If the bio is not to be decrypted by the crypto API, this function releases - * the reference to the keyslot that blk_crypto_submit_bio got. - * - * Return: true if bio_endio should continue; false otherwise (bio_endio will - * be called again when bio has been decrypted). - */ -bool blk_crypto_endio(struct bio *bio) -{ - struct bio_crypt_ctx *bc = bio->bi_crypt_context; - - if (!bc) - return true; - - if (bio_crypt_fallback_crypted(bc)) { - /* - * The only bios who's crypto is handled by the blk-crypto - * fallback when they reach here are those with - * bio_data_dir(bio) == READ, since WRITE bios that are - * encrypted by the crypto API fallback are handled by - * blk_crypto_encrypt_endio(). - */ - return !blk_crypto_queue_decrypt_bio(bio); - } - - if (bc->bc_keyslot >= 0) - bio_crypt_ctx_release_keyslot(bc); - - return true; -} - -/** - * blk_crypto_init_key() - Prepare a key for use with blk-crypto - * @blk_key: Pointer to the blk_crypto_key to initialize. - * @raw_key: Pointer to the raw key. - * @raw_key_size: Size of raw key. Must be at least the required size for the - * chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed - * to be longer than the mode's actual key size, in order to - * support inline encryption hardware that accepts wrapped keys.) - * @crypto_mode: identifier for the encryption algorithm to use - * @data_unit_size: the data unit size to use for en/decryption - * - * Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error. When - * done using the key, it must be freed with blk_crypto_free_key(). - */ -int blk_crypto_init_key(struct blk_crypto_key *blk_key, - const u8 *raw_key, unsigned int raw_key_size, - enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size) -{ - const struct blk_crypto_mode *mode; - static siphash_key_t hash_key; - - memset(blk_key, 0, sizeof(*blk_key)); - - if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes)) - return -EINVAL; - - BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE); - - mode = &blk_crypto_modes[crypto_mode]; - if (raw_key_size < mode->keysize || - raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE) - return -EINVAL; - - if (!is_power_of_2(data_unit_size)) - return -EINVAL; - - blk_key->crypto_mode = crypto_mode; - blk_key->data_unit_size = data_unit_size; - blk_key->data_unit_size_bits = ilog2(data_unit_size); - blk_key->size = raw_key_size; - memcpy(blk_key->raw, raw_key, raw_key_size); - - /* - * The keyslot manager uses the SipHash of the key to implement O(1) key - * lookups while avoiding leaking information about the keys. It's - * precomputed here so that it only needs to be computed once per key. - */ - get_random_once(&hash_key, sizeof(hash_key)); - blk_key->hash = siphash(raw_key, raw_key_size, &hash_key); - - return 0; -} -EXPORT_SYMBOL_GPL(blk_crypto_init_key); - -/** - * blk_crypto_evict_key() - Evict a key from any inline encryption hardware - * it may have been programmed into - * @q: The request queue who's keyslot manager this key might have been - * programmed into - * @key: The key to evict - * - * Upper layers (filesystems) should call this function to ensure that a key - * is evicted from hardware that it might have been programmed into. This - * will call keyslot_manager_evict_key on the queue's keyslot manager, if one - * exists, and supports the crypto algorithm with the specified data unit size. - * Otherwise, it will evict the key from the blk-crypto-fallback's ksm. - * - * Return: 0 on success, -err on error. - */ -int blk_crypto_evict_key(struct request_queue *q, - const struct blk_crypto_key *key) -{ - if (q->ksm && - keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode, - key->data_unit_size)) - return keyslot_manager_evict_key(q->ksm, key); - - return blk_crypto_fallback_evict_key(key); -} -EXPORT_SYMBOL_GPL(blk_crypto_evict_key); diff --git a/block/blk-merge.c b/block/blk-merge.c index 9e322d62d9f4..de29a4054666 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -514,8 +514,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, req_set_nomerge(q, req); return 0; } - if (!bio_crypt_ctx_mergeable(req->bio, blk_rq_bytes(req), bio)) - return 0; if (!bio_flagged(req->biotail, BIO_SEG_VALID)) blk_recount_segments(q, req->biotail); if (!bio_flagged(bio, BIO_SEG_VALID)) @@ -538,8 +536,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, req_set_nomerge(q, req); return 0; } - if (!bio_crypt_ctx_mergeable(bio, bio->bi_iter.bi_size, req->bio)) - return 0; if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); if (!bio_flagged(req->bio, BIO_SEG_VALID)) @@ -616,9 +612,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, if (blk_integrity_merge_rq(q, req, next) == false) return 0; - if (!bio_crypt_ctx_mergeable(req->bio, blk_rq_bytes(req), next->bio)) - return 0; - /* Merge is OK... */ req->nr_phys_segments = total_phys_segments; return 1; @@ -840,10 +833,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (rq->write_hint != bio->bi_write_hint) return false; - /* Only merge if the crypt contexts are compatible */ - if (!bio_crypt_ctx_compatible(bio, rq->bio)) - return false; - return true; } diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c deleted file mode 100644 index 7e42813c9de0..000000000000 --- a/block/keyslot-manager.c +++ /dev/null @@ -1,560 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2019 Google LLC - */ - -/** - * DOC: The Keyslot Manager - * - * Many devices with inline encryption support have a limited number of "slots" - * into which encryption contexts may be programmed, and requests can be tagged - * with a slot number to specify the key to use for en/decryption. - * - * As the number of slots are limited, and programming keys is expensive on - * many inline encryption hardware, we don't want to program the same key into - * multiple slots - if multiple requests are using the same key, we want to - * program just one slot with that key and use that slot for all requests. - * - * The keyslot manager manages these keyslots appropriately, and also acts as - * an abstraction between the inline encryption hardware and the upper layers. - * - * Lower layer devices will set up a keyslot manager in their request queue - * and tell it how to perform device specific operations like programming/ - * evicting keys from keyslots. - * - * Upper layers will call keyslot_manager_get_slot_for_key() to program a - * key into some slot in the inline encryption hardware. - */ -#include -#include -#include -#include -#include -#include -#include - -struct keyslot { - atomic_t slot_refs; - struct list_head idle_slot_node; - struct hlist_node hash_node; - struct blk_crypto_key key; -}; - -struct keyslot_manager { - unsigned int num_slots; - struct keyslot_mgmt_ll_ops ksm_ll_ops; - unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX]; - void *ll_priv_data; - - /* Protects programming and evicting keys from the device */ - struct rw_semaphore lock; - - /* List of idle slots, with least recently used slot at front */ - wait_queue_head_t idle_slots_wait_queue; - struct list_head idle_slots; - spinlock_t idle_slots_lock; - - /* - * Hash table which maps key hashes to keyslots, so that we can find a - * key's keyslot in O(1) time rather than O(num_slots). Protected by - * 'lock'. A cryptographic hash function is used so that timing attacks - * can't leak information about the raw keys. - */ - struct hlist_head *slot_hashtable; - unsigned int slot_hashtable_size; - - /* Per-keyslot data */ - struct keyslot slots[]; -}; - -static inline bool keyslot_manager_is_passthrough(struct keyslot_manager *ksm) -{ - return ksm->num_slots == 0; -} - -/** - * keyslot_manager_create() - Create a keyslot manager - * @num_slots: The number of key slots to manage. - * @ksm_ll_ops: The struct keyslot_mgmt_ll_ops for the device that this keyslot - * manager will use to perform operations like programming and - * evicting keys. - * @crypto_mode_supported: Array of size BLK_ENCRYPTION_MODE_MAX of - * bitmasks that represents whether a crypto mode - * and data unit size are supported. The i'th bit - * of crypto_mode_supported[crypto_mode] is set iff - * a data unit size of (1 << i) is supported. We - * only support data unit sizes that are powers of - * 2. - * @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops. - * - * Allocate memory for and initialize a keyslot manager. Called by e.g. - * storage drivers to set up a keyslot manager in their request_queue. - * - * Context: May sleep - * Return: Pointer to constructed keyslot manager or NULL on error. - */ -struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, - const struct keyslot_mgmt_ll_ops *ksm_ll_ops, - const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], - void *ll_priv_data) -{ - struct keyslot_manager *ksm; - unsigned int slot; - unsigned int i; - - if (num_slots == 0) - return NULL; - - /* Check that all ops are specified */ - if (ksm_ll_ops->keyslot_program == NULL || - ksm_ll_ops->keyslot_evict == NULL) - return NULL; - - ksm = kvzalloc(struct_size(ksm, slots, num_slots), GFP_KERNEL); - if (!ksm) - return NULL; - - ksm->num_slots = num_slots; - ksm->ksm_ll_ops = *ksm_ll_ops; - memcpy(ksm->crypto_mode_supported, crypto_mode_supported, - sizeof(ksm->crypto_mode_supported)); - ksm->ll_priv_data = ll_priv_data; - - init_rwsem(&ksm->lock); - - init_waitqueue_head(&ksm->idle_slots_wait_queue); - INIT_LIST_HEAD(&ksm->idle_slots); - - for (slot = 0; slot < num_slots; slot++) { - list_add_tail(&ksm->slots[slot].idle_slot_node, - &ksm->idle_slots); - } - - spin_lock_init(&ksm->idle_slots_lock); - - ksm->slot_hashtable_size = roundup_pow_of_two(num_slots); - ksm->slot_hashtable = kvmalloc_array(ksm->slot_hashtable_size, - sizeof(ksm->slot_hashtable[0]), - GFP_KERNEL); - if (!ksm->slot_hashtable) - goto err_free_ksm; - for (i = 0; i < ksm->slot_hashtable_size; i++) - INIT_HLIST_HEAD(&ksm->slot_hashtable[i]); - - return ksm; - -err_free_ksm: - keyslot_manager_destroy(ksm); - return NULL; -} -EXPORT_SYMBOL_GPL(keyslot_manager_create); - -static inline struct hlist_head * -hash_bucket_for_key(struct keyslot_manager *ksm, - const struct blk_crypto_key *key) -{ - return &ksm->slot_hashtable[key->hash & (ksm->slot_hashtable_size - 1)]; -} - -static void remove_slot_from_lru_list(struct keyslot_manager *ksm, int slot) -{ - unsigned long flags; - - spin_lock_irqsave(&ksm->idle_slots_lock, flags); - list_del(&ksm->slots[slot].idle_slot_node); - spin_unlock_irqrestore(&ksm->idle_slots_lock, flags); -} - -static int find_keyslot(struct keyslot_manager *ksm, - const struct blk_crypto_key *key) -{ - const struct hlist_head *head = hash_bucket_for_key(ksm, key); - const struct keyslot *slotp; - - hlist_for_each_entry(slotp, head, hash_node) { - if (slotp->key.hash == key->hash && - slotp->key.crypto_mode == key->crypto_mode && - slotp->key.size == key->size && - slotp->key.data_unit_size == key->data_unit_size && - !crypto_memneq(slotp->key.raw, key->raw, key->size)) - return slotp - ksm->slots; - } - return -ENOKEY; -} - -static int find_and_grab_keyslot(struct keyslot_manager *ksm, - const struct blk_crypto_key *key) -{ - int slot; - - slot = find_keyslot(ksm, key); - if (slot < 0) - return slot; - if (atomic_inc_return(&ksm->slots[slot].slot_refs) == 1) { - /* Took first reference to this slot; remove it from LRU list */ - remove_slot_from_lru_list(ksm, slot); - } - return slot; -} - -/** - * keyslot_manager_get_slot_for_key() - Program a key into a keyslot. - * @ksm: The keyslot manager to program the key into. - * @key: Pointer to the key object to program, including the raw key, crypto - * mode, and data unit size. - * - * Get a keyslot that's been programmed with the specified key. If one already - * exists, return it with incremented refcount. Otherwise, wait for a keyslot - * to become idle and program it. - * - * Context: Process context. Takes and releases ksm->lock. - * Return: The keyslot on success, else a -errno value. - */ -int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm, - const struct blk_crypto_key *key) -{ - int slot; - int err; - struct keyslot *idle_slot; - - if (keyslot_manager_is_passthrough(ksm)) - return 0; - - down_read(&ksm->lock); - slot = find_and_grab_keyslot(ksm, key); - up_read(&ksm->lock); - if (slot != -ENOKEY) - return slot; - - for (;;) { - down_write(&ksm->lock); - slot = find_and_grab_keyslot(ksm, key); - if (slot != -ENOKEY) { - up_write(&ksm->lock); - return slot; - } - - /* - * If we're here, that means there wasn't a slot that was - * already programmed with the key. So try to program it. - */ - if (!list_empty(&ksm->idle_slots)) - break; - - up_write(&ksm->lock); - wait_event(ksm->idle_slots_wait_queue, - !list_empty(&ksm->idle_slots)); - } - - idle_slot = list_first_entry(&ksm->idle_slots, struct keyslot, - idle_slot_node); - slot = idle_slot - ksm->slots; - - err = ksm->ksm_ll_ops.keyslot_program(ksm, key, slot); - if (err) { - wake_up(&ksm->idle_slots_wait_queue); - up_write(&ksm->lock); - return err; - } - - /* Move this slot to the hash list for the new key. */ - if (idle_slot->key.crypto_mode != BLK_ENCRYPTION_MODE_INVALID) - hlist_del(&idle_slot->hash_node); - hlist_add_head(&idle_slot->hash_node, hash_bucket_for_key(ksm, key)); - - atomic_set(&idle_slot->slot_refs, 1); - idle_slot->key = *key; - - remove_slot_from_lru_list(ksm, slot); - - up_write(&ksm->lock); - return slot; -} - -/** - * keyslot_manager_get_slot() - Increment the refcount on the specified slot. - * @ksm: The keyslot manager that we want to modify. - * @slot: The slot to increment the refcount of. - * - * This function assumes that there is already an active reference to that slot - * and simply increments the refcount. This is useful when cloning a bio that - * already has a reference to a keyslot, and we want the cloned bio to also have - * its own reference. - * - * Context: Any context. - */ -void keyslot_manager_get_slot(struct keyslot_manager *ksm, unsigned int slot) -{ - if (keyslot_manager_is_passthrough(ksm)) - return; - - if (WARN_ON(slot >= ksm->num_slots)) - return; - - WARN_ON(atomic_inc_return(&ksm->slots[slot].slot_refs) < 2); -} - -/** - * keyslot_manager_put_slot() - Release a reference to a slot - * @ksm: The keyslot manager to release the reference from. - * @slot: The slot to release the reference from. - * - * Context: Any context. - */ -void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot) -{ - unsigned long flags; - - if (keyslot_manager_is_passthrough(ksm)) - return; - - if (WARN_ON(slot >= ksm->num_slots)) - return; - - if (atomic_dec_and_lock_irqsave(&ksm->slots[slot].slot_refs, - &ksm->idle_slots_lock, flags)) { - list_add_tail(&ksm->slots[slot].idle_slot_node, - &ksm->idle_slots); - spin_unlock_irqrestore(&ksm->idle_slots_lock, flags); - wake_up(&ksm->idle_slots_wait_queue); - } -} - -/** - * keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode/data - * unit size combination is supported - * by a ksm. - * @ksm: The keyslot manager to check - * @crypto_mode: The crypto mode to check for. - * @data_unit_size: The data_unit_size for the mode. - * - * Calls and returns the result of the crypto_mode_supported function specified - * by the ksm. - * - * Context: Process context. - * Return: Whether or not this ksm supports the specified crypto_mode/ - * data_unit_size combo. - */ -bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, - enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size) -{ - if (!ksm) - return false; - if (WARN_ON(crypto_mode >= BLK_ENCRYPTION_MODE_MAX)) - return false; - if (WARN_ON(!is_power_of_2(data_unit_size))) - return false; - return ksm->crypto_mode_supported[crypto_mode] & data_unit_size; -} - -/** - * keyslot_manager_evict_key() - Evict a key from the lower layer device. - * @ksm: The keyslot manager to evict from - * @key: The key to evict - * - * Find the keyslot that the specified key was programmed into, and evict that - * slot from the lower layer device if that slot is not currently in use. - * - * Context: Process context. Takes and releases ksm->lock. - * Return: 0 on success, -EBUSY if the key is still in use, or another - * -errno value on other error. - */ -int keyslot_manager_evict_key(struct keyslot_manager *ksm, - const struct blk_crypto_key *key) -{ - int slot; - int err; - struct keyslot *slotp; - - if (keyslot_manager_is_passthrough(ksm)) { - if (ksm->ksm_ll_ops.keyslot_evict) { - down_write(&ksm->lock); - err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, -1); - up_write(&ksm->lock); - return err; - } - return 0; - } - - down_write(&ksm->lock); - slot = find_keyslot(ksm, key); - if (slot < 0) { - err = slot; - goto out_unlock; - } - slotp = &ksm->slots[slot]; - - if (atomic_read(&slotp->slot_refs) != 0) { - err = -EBUSY; - goto out_unlock; - } - err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, slot); - if (err) - goto out_unlock; - - hlist_del(&slotp->hash_node); - memzero_explicit(&slotp->key, sizeof(slotp->key)); - err = 0; -out_unlock: - up_write(&ksm->lock); - return err; -} - -/** - * keyslot_manager_reprogram_all_keys() - Re-program all keyslots. - * @ksm: The keyslot manager - * - * Re-program all keyslots that are supposed to have a key programmed. This is - * intended only for use by drivers for hardware that loses its keys on reset. - * - * Context: Process context. Takes and releases ksm->lock. - */ -void keyslot_manager_reprogram_all_keys(struct keyslot_manager *ksm) -{ - unsigned int slot; - - if (WARN_ON(keyslot_manager_is_passthrough(ksm))) - return; - - down_write(&ksm->lock); - for (slot = 0; slot < ksm->num_slots; slot++) { - const struct keyslot *slotp = &ksm->slots[slot]; - int err; - - if (slotp->key.crypto_mode == BLK_ENCRYPTION_MODE_INVALID) - continue; - - err = ksm->ksm_ll_ops.keyslot_program(ksm, &slotp->key, slot); - WARN_ON(err); - } - up_write(&ksm->lock); -} -EXPORT_SYMBOL_GPL(keyslot_manager_reprogram_all_keys); - -/** - * keyslot_manager_private() - return the private data stored with ksm - * @ksm: The keyslot manager - * - * Returns the private data passed to the ksm when it was created. - */ -void *keyslot_manager_private(struct keyslot_manager *ksm) -{ - return ksm->ll_priv_data; -} -EXPORT_SYMBOL_GPL(keyslot_manager_private); - -void keyslot_manager_destroy(struct keyslot_manager *ksm) -{ - if (ksm) { - kvfree(ksm->slot_hashtable); - memzero_explicit(ksm, struct_size(ksm, slots, ksm->num_slots)); - kvfree(ksm); - } -} -EXPORT_SYMBOL_GPL(keyslot_manager_destroy); - -/** - * keyslot_manager_create_passthrough() - Create a passthrough keyslot manager - * @ksm_ll_ops: The struct keyslot_mgmt_ll_ops - * @crypto_mode_supported: Bitmasks for supported encryption modes - * @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops. - * - * Allocate memory for and initialize a passthrough keyslot manager. - * Called by e.g. storage drivers to set up a keyslot manager in their - * request_queue, when the storage driver wants to manage its keys by itself. - * This is useful for inline encryption hardware that don't have a small fixed - * number of keyslots, and for layered devices. - * - * See keyslot_manager_create() for more details about the parameters. - * - * Context: This function may sleep - * Return: Pointer to constructed keyslot manager or NULL on error. - */ -struct keyslot_manager *keyslot_manager_create_passthrough( - const struct keyslot_mgmt_ll_ops *ksm_ll_ops, - const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], - void *ll_priv_data) -{ - struct keyslot_manager *ksm; - - ksm = kzalloc(sizeof(*ksm), GFP_KERNEL); - if (!ksm) - return NULL; - - ksm->ksm_ll_ops = *ksm_ll_ops; - memcpy(ksm->crypto_mode_supported, crypto_mode_supported, - sizeof(ksm->crypto_mode_supported)); - ksm->ll_priv_data = ll_priv_data; - - init_rwsem(&ksm->lock); - - return ksm; -} -EXPORT_SYMBOL_GPL(keyslot_manager_create_passthrough); - -/** - * keyslot_manager_intersect_modes() - restrict supported modes by child device - * @parent: The keyslot manager for parent device - * @child: The keyslot manager for child device, or NULL - * - * Clear any crypto mode support bits in @parent that aren't set in @child. - * If @child is NULL, then all parent bits are cleared. - * - * Only use this when setting up the keyslot manager for a layered device, - * before it's been exposed yet. - */ -void keyslot_manager_intersect_modes(struct keyslot_manager *parent, - const struct keyslot_manager *child) -{ - if (child) { - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(child->crypto_mode_supported); i++) { - parent->crypto_mode_supported[i] &= - child->crypto_mode_supported[i]; - } - } else { - memset(parent->crypto_mode_supported, 0, - sizeof(parent->crypto_mode_supported)); - } -} -EXPORT_SYMBOL_GPL(keyslot_manager_intersect_modes); - -/** - * keyslot_manager_derive_raw_secret() - Derive software secret from wrapped key - * @ksm: The keyslot manager - * @wrapped_key: The wrapped key - * @wrapped_key_size: Size of the wrapped key in bytes - * @secret: (output) the software secret - * @secret_size: (output) the number of secret bytes to derive - * - * Given a hardware-wrapped key, ask the hardware to derive a secret which - * software can use for cryptographic tasks other than inline encryption. The - * derived secret is guaranteed to be cryptographically isolated from the key - * with which any inline encryption with this wrapped key would actually be - * done. I.e., both will be derived from the unwrapped key. - * - * Return: 0 on success, -EOPNOTSUPP if hardware-wrapped keys are unsupported, - * or another -errno code. - */ -int keyslot_manager_derive_raw_secret(struct keyslot_manager *ksm, - const u8 *wrapped_key, - unsigned int wrapped_key_size, - u8 *secret, unsigned int secret_size) -{ - int err; - - down_write(&ksm->lock); - if (ksm->ksm_ll_ops.derive_raw_secret) { - err = ksm->ksm_ll_ops.derive_raw_secret(ksm, wrapped_key, - wrapped_key_size, - secret, secret_size); - } else { - err = -EOPNOTSUPP; - } - up_write(&ksm->lock); - - return err; -} -EXPORT_SYMBOL_GPL(keyslot_manager_derive_raw_secret); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index f3d7db1cc828..747edadb39ae 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -286,27 +286,6 @@ config DM_CRYPT If unsure, say N. -config DM_DEFAULT_KEY - tristate "Default-key target support" - depends on BLK_DEV_DM - depends on BLK_INLINE_ENCRYPTION - # dm-default-key doesn't require -o inlinecrypt, but it does currently - # rely on the inline encryption hooks being built into the kernel. - depends on FS_ENCRYPTION_INLINE_CRYPT - help - This device-mapper target allows you to create a device that - assigns a default encryption key to bios that aren't for the - contents of an encrypted file. - - This ensures that all blocks on-disk will be encrypted with - some key, without the performance hit of file contents being - encrypted twice when fscrypt (File-Based Encryption) is used. - - It is only appropriate to use dm-default-key when key - configuration is tightly controlled, like it is in Android, - such that all fscrypt keys are at least as hard to compromise - as the default key. - config DM_SNAPSHOT tristate "Snapshot target" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 1a03ebd1cee7..27962abad668 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -43,7 +43,6 @@ obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o obj-$(CONFIG_DM_BUFIO) += dm-bufio.o obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o obj-$(CONFIG_DM_CRYPT) += dm-crypt.o -obj-$(CONFIG_DM_DEFAULT_KEY) += dm-default-key.o obj-$(CONFIG_DM_DELAY) += dm-delay.o obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o diff --git a/drivers/md/dm-bow.c b/drivers/md/dm-bow.c index 96ddba82ed24..47289850c445 100644 --- a/drivers/md/dm-bow.c +++ b/drivers/md/dm-bow.c @@ -789,7 +789,6 @@ static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv) rb_insert_color(&br->node, &bc->ranges); ti->discards_supported = true; - ti->may_passthrough_inline_crypto = true; return 0; diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c deleted file mode 100644 index 43a30c076aa6..000000000000 --- a/drivers/md/dm-default-key.c +++ /dev/null @@ -1,403 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2017 Google, Inc. - */ - -#include -#include -#include - -#define DM_MSG_PREFIX "default-key" - -#define DM_DEFAULT_KEY_MAX_KEY_SIZE 64 - -#define SECTOR_SIZE (1 << SECTOR_SHIFT) - -static const struct dm_default_key_cipher { - const char *name; - enum blk_crypto_mode_num mode_num; - int key_size; -} dm_default_key_ciphers[] = { - { - .name = "aes-xts-plain64", - .mode_num = BLK_ENCRYPTION_MODE_AES_256_XTS, - .key_size = 64, - }, { - .name = "xchacha12,aes-adiantum-plain64", - .mode_num = BLK_ENCRYPTION_MODE_ADIANTUM, - .key_size = 32, - }, -}; - -/** - * struct dm_default_c - private data of a default-key target - * @dev: the underlying device - * @start: starting sector of the range of @dev which this target actually maps. - * For this purpose a "sector" is 512 bytes. - * @cipher_string: the name of the encryption algorithm being used - * @iv_offset: starting offset for IVs. IVs are generated as if the target were - * preceded by @iv_offset 512-byte sectors. - * @sector_size: crypto sector size in bytes (usually 4096) - * @sector_bits: log2(sector_size) - * @key: the encryption key to use - */ -struct default_key_c { - struct dm_dev *dev; - sector_t start; - const char *cipher_string; - u64 iv_offset; - unsigned int sector_size; - unsigned int sector_bits; - struct blk_crypto_key key; -}; - -static const struct dm_default_key_cipher * -lookup_cipher(const char *cipher_string) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(dm_default_key_ciphers); i++) { - if (strcmp(cipher_string, dm_default_key_ciphers[i].name) == 0) - return &dm_default_key_ciphers[i]; - } - return NULL; -} - -static void default_key_dtr(struct dm_target *ti) -{ - struct default_key_c *dkc = ti->private; - int err; - - if (dkc->dev) { - err = blk_crypto_evict_key(dkc->dev->bdev->bd_queue, &dkc->key); - if (err && err != -ENOKEY) - DMWARN("Failed to evict crypto key: %d", err); - dm_put_device(ti, dkc->dev); - } - kzfree(dkc->cipher_string); - kzfree(dkc); -} - -static int default_key_ctr_optional(struct dm_target *ti, - unsigned int argc, char **argv) -{ - struct default_key_c *dkc = ti->private; - struct dm_arg_set as; - static const struct dm_arg _args[] = { - {0, 3, "Invalid number of feature args"}, - }; - unsigned int opt_params; - const char *opt_string; - bool iv_large_sectors = false; - char dummy; - int err; - - as.argc = argc; - as.argv = argv; - - err = dm_read_arg_group(_args, &as, &opt_params, &ti->error); - if (err) - return err; - - while (opt_params--) { - opt_string = dm_shift_arg(&as); - if (!opt_string) { - ti->error = "Not enough feature arguments"; - return -EINVAL; - } - if (!strcmp(opt_string, "allow_discards")) { - ti->num_discard_bios = 1; - } else if (sscanf(opt_string, "sector_size:%u%c", - &dkc->sector_size, &dummy) == 1) { - if (dkc->sector_size < SECTOR_SIZE || - dkc->sector_size > 4096 || - !is_power_of_2(dkc->sector_size)) { - ti->error = "Invalid sector_size"; - return -EINVAL; - } - } else if (!strcmp(opt_string, "iv_large_sectors")) { - iv_large_sectors = true; - } else { - ti->error = "Invalid feature arguments"; - return -EINVAL; - } - } - - /* dm-default-key doesn't implement iv_large_sectors=false. */ - if (dkc->sector_size != SECTOR_SIZE && !iv_large_sectors) { - ti->error = "iv_large_sectors must be specified"; - return -EINVAL; - } - - return 0; -} - -/* - * Construct a default-key mapping: - * - * - * This syntax matches dm-crypt's, but lots of unneeded functionality has been - * removed. Also, dm-default-key requires that the "iv_large_sectors" option be - * given whenever a non-default sector size is used. - */ -static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) -{ - struct default_key_c *dkc; - const struct dm_default_key_cipher *cipher; - u8 raw_key[DM_DEFAULT_KEY_MAX_KEY_SIZE]; - unsigned long long tmpll; - char dummy; - int err; - - if (argc < 5) { - ti->error = "Not enough arguments"; - return -EINVAL; - } - - dkc = kzalloc(sizeof(*dkc), GFP_KERNEL); - if (!dkc) { - ti->error = "Out of memory"; - return -ENOMEM; - } - ti->private = dkc; - - /* */ - dkc->cipher_string = kstrdup(argv[0], GFP_KERNEL); - if (!dkc->cipher_string) { - ti->error = "Out of memory"; - err = -ENOMEM; - goto bad; - } - cipher = lookup_cipher(dkc->cipher_string); - if (!cipher) { - ti->error = "Unsupported cipher"; - err = -EINVAL; - goto bad; - } - - /* */ - if (strlen(argv[1]) != 2 * cipher->key_size) { - ti->error = "Incorrect key size for cipher"; - err = -EINVAL; - goto bad; - } - if (hex2bin(raw_key, argv[1], cipher->key_size) != 0) { - ti->error = "Malformed key string"; - err = -EINVAL; - goto bad; - } - - /* */ - if (sscanf(argv[2], "%llu%c", &dkc->iv_offset, &dummy) != 1) { - ti->error = "Invalid iv_offset sector"; - err = -EINVAL; - goto bad; - } - - /* */ - err = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), - &dkc->dev); - if (err) { - ti->error = "Device lookup failed"; - goto bad; - } - - /* */ - if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || - tmpll != (sector_t)tmpll) { - ti->error = "Invalid start sector"; - err = -EINVAL; - goto bad; - } - dkc->start = tmpll; - - /* optional arguments */ - dkc->sector_size = SECTOR_SIZE; - if (argc > 5) { - err = default_key_ctr_optional(ti, argc - 5, &argv[5]); - if (err) - goto bad; - } - dkc->sector_bits = ilog2(dkc->sector_size); - if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) { - ti->error = "Device size is not a multiple of sector_size"; - err = -EINVAL; - goto bad; - } - - err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size, - cipher->mode_num, dkc->sector_size); - if (err) { - ti->error = "Error initializing blk-crypto key"; - goto bad; - } - - err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size, - dkc->dev->bdev->bd_queue); - if (err) { - ti->error = "Error starting to use blk-crypto"; - goto bad; - } - - ti->num_flush_bios = 1; - - ti->may_passthrough_inline_crypto = true; - - err = 0; - goto out; - -bad: - default_key_dtr(ti); -out: - memzero_explicit(raw_key, sizeof(raw_key)); - return err; -} - -static int default_key_map(struct dm_target *ti, struct bio *bio) -{ - const struct default_key_c *dkc = ti->private; - sector_t sector_in_target; - u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE] = { 0 }; - - bio_set_dev(bio, dkc->dev->bdev); - - /* - * If the bio is a device-level request which doesn't target a specific - * sector, there's nothing more to do. - */ - if (bio_sectors(bio) == 0) - return DM_MAPIO_REMAPPED; - - /* Map the bio's sector to the underlying device. (512-byte sectors) */ - sector_in_target = dm_target_offset(ti, bio->bi_iter.bi_sector); - bio->bi_iter.bi_sector = dkc->start + sector_in_target; - - /* - * If the bio should skip dm-default-key (i.e. if it's for an encrypted - * file's contents), or if it doesn't have any data (e.g. if it's a - * DISCARD request), there's nothing more to do. - */ - if (bio_should_skip_dm_default_key(bio) || !bio_has_data(bio)) - return DM_MAPIO_REMAPPED; - - /* - * Else, dm-default-key needs to set this bio's encryption context. - * It must not already have one. - */ - if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) - return DM_MAPIO_KILL; - - /* Calculate the DUN and enforce data-unit (crypto sector) alignment. */ - dun[0] = dkc->iv_offset + sector_in_target; /* 512-byte sectors */ - if (dun[0] & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) - return DM_MAPIO_KILL; - dun[0] >>= dkc->sector_bits - SECTOR_SHIFT; /* crypto sectors */ - - bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO); - - return DM_MAPIO_REMAPPED; -} - -static void default_key_status(struct dm_target *ti, status_type_t type, - unsigned int status_flags, char *result, - unsigned int maxlen) -{ - const struct default_key_c *dkc = ti->private; - unsigned int sz = 0; - int num_feature_args = 0; - - switch (type) { - case STATUSTYPE_INFO: - result[0] = '\0'; - break; - - case STATUSTYPE_TABLE: - /* Omit the key for now. */ - DMEMIT("%s - %llu %s %llu", dkc->cipher_string, dkc->iv_offset, - dkc->dev->name, (unsigned long long)dkc->start); - - num_feature_args += !!ti->num_discard_bios; - if (dkc->sector_size != SECTOR_SIZE) - num_feature_args += 2; - if (num_feature_args != 0) { - DMEMIT(" %d", num_feature_args); - if (ti->num_discard_bios) - DMEMIT(" allow_discards"); - if (dkc->sector_size != SECTOR_SIZE) { - DMEMIT(" sector_size:%u", dkc->sector_size); - DMEMIT(" iv_large_sectors"); - } - } - break; - } -} - -static int default_key_prepare_ioctl(struct dm_target *ti, - struct block_device **bdev, - fmode_t *mode) -{ - const struct default_key_c *dkc = ti->private; - const struct dm_dev *dev = dkc->dev; - - *bdev = dev->bdev; - - /* Only pass ioctls through if the device sizes match exactly. */ - if (dkc->start != 0 || - ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) - return 1; - return 0; -} - -static int default_key_iterate_devices(struct dm_target *ti, - iterate_devices_callout_fn fn, - void *data) -{ - const struct default_key_c *dkc = ti->private; - - return fn(ti, dkc->dev, dkc->start, ti->len, data); -} - -static void default_key_io_hints(struct dm_target *ti, - struct queue_limits *limits) -{ - const struct default_key_c *dkc = ti->private; - const unsigned int sector_size = dkc->sector_size; - - limits->logical_block_size = - max_t(unsigned short, limits->logical_block_size, sector_size); - limits->physical_block_size = - max_t(unsigned int, limits->physical_block_size, sector_size); - limits->io_min = max_t(unsigned int, limits->io_min, sector_size); -} - -static struct target_type default_key_target = { - .name = "default-key", - .version = {2, 0, 0}, - .module = THIS_MODULE, - .ctr = default_key_ctr, - .dtr = default_key_dtr, - .map = default_key_map, - .status = default_key_status, - .prepare_ioctl = default_key_prepare_ioctl, - .iterate_devices = default_key_iterate_devices, - .io_hints = default_key_io_hints, -}; - -static int __init dm_default_key_init(void) -{ - return dm_register_target(&default_key_target); -} - -static void __exit dm_default_key_exit(void) -{ - dm_unregister_target(&default_key_target); -} - -module_init(dm_default_key_init); -module_exit(dm_default_key_exit); - -MODULE_AUTHOR("Paul Lawrence "); -MODULE_AUTHOR("Paul Crowley "); -MODULE_AUTHOR("Eric Biggers "); -MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata"); -MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index d1fbf3d8b4cc..c06517031592 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -61,7 +61,6 @@ int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->num_discard_bios = 1; ti->num_write_same_bios = 1; ti->num_write_zeroes_bios = 1; - ti->may_passthrough_inline_crypto = true; ti->private = lc; return 0; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index f96075563620..852350e3cfe7 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -22,8 +22,6 @@ #include #include #include -#include -#include #define DM_MSG_PREFIX "table" @@ -1599,54 +1597,6 @@ static void dm_table_verify_integrity(struct dm_table *t) } } -#ifdef CONFIG_BLK_INLINE_ENCRYPTION -static int device_intersect_crypto_modes(struct dm_target *ti, - struct dm_dev *dev, sector_t start, - sector_t len, void *data) -{ - struct keyslot_manager *parent = data; - struct keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm; - - keyslot_manager_intersect_modes(parent, child); - return 0; -} - -/* - * Update the inline crypto modes supported by 'q->ksm' to be the intersection - * of the modes supported by all targets in the table. - * - * For any mode to be supported at all, all targets must have explicitly - * declared that they can pass through inline crypto support. For a particular - * mode to be supported, all underlying devices must also support it. - * - * Assume that 'q->ksm' initially declares all modes to be supported. - */ -static void dm_calculate_supported_crypto_modes(struct dm_table *t, - struct request_queue *q) -{ - struct dm_target *ti; - unsigned int i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->may_passthrough_inline_crypto) { - keyslot_manager_intersect_modes(q->ksm, NULL); - return; - } - if (!ti->type->iterate_devices) - continue; - ti->type->iterate_devices(ti, device_intersect_crypto_modes, - q->ksm); - } -} -#else /* CONFIG_BLK_INLINE_ENCRYPTION */ -static inline void dm_calculate_supported_crypto_modes(struct dm_table *t, - struct request_queue *q) -{ -} -#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ - static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1921,8 +1871,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, dm_table_verify_integrity(t); - dm_calculate_supported_crypto_modes(t, q); - /* * Some devices don't use blk_integrity but still want stable pages * because they do their own checksumming. diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0189f70e87a0..02ba6849f89d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -24,8 +24,6 @@ #include #include #include -#include -#include #define DM_MSG_PREFIX "core" @@ -1251,10 +1249,9 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, __bio_clone_fast(clone, bio); - bio_crypt_clone(clone, bio, GFP_NOIO); - if (unlikely(bio_integrity(bio) != NULL)) { int r; + if (unlikely(!dm_target_has_integrity(tio->ti->type) && !dm_target_passes_integrity(tio->ti->type))) { DMWARN("%s: the target %s doesn't support integrity data.", @@ -1664,8 +1661,6 @@ void dm_init_normal_md_queue(struct mapped_device *md) md->queue->backing_dev_info->congested_fn = dm_any_congested; } -static void dm_destroy_inline_encryption(struct request_queue *q); - static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) @@ -1690,10 +1685,8 @@ static void cleanup_mapped_device(struct mapped_device *md) put_disk(md->disk); } - if (md->queue) { - dm_destroy_inline_encryption(md->queue); + if (md->queue) blk_cleanup_queue(md->queue); - } cleanup_srcu_struct(&md->io_barrier); @@ -2042,89 +2035,6 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_get_queue_limits); -#ifdef CONFIG_BLK_INLINE_ENCRYPTION -struct dm_keyslot_evict_args { - const struct blk_crypto_key *key; - int err; -}; - -static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - struct dm_keyslot_evict_args *args = data; - int err; - - err = blk_crypto_evict_key(dev->bdev->bd_queue, args->key); - if (!args->err) - args->err = err; - /* Always try to evict the key from all devices. */ - return 0; -} - -/* - * When an inline encryption key is evicted from a device-mapper device, evict - * it from all the underlying devices. - */ -static int dm_keyslot_evict(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, unsigned int slot) -{ - struct mapped_device *md = keyslot_manager_private(ksm); - struct dm_keyslot_evict_args args = { key }; - struct dm_table *t; - int srcu_idx; - int i; - struct dm_target *ti; - - t = dm_get_live_table(md, &srcu_idx); - if (!t) - return 0; - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - if (!ti->type->iterate_devices) - continue; - ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args); - } - dm_put_live_table(md, srcu_idx); - return args.err; -} - -static struct keyslot_mgmt_ll_ops dm_ksm_ll_ops = { - .keyslot_evict = dm_keyslot_evict, -}; - -static int dm_init_inline_encryption(struct mapped_device *md) -{ - unsigned int mode_masks[BLK_ENCRYPTION_MODE_MAX]; - - /* - * Start out with all crypto mode support bits set. Any unsupported - * bits will be cleared later when calculating the device restrictions. - */ - memset(mode_masks, 0xFF, sizeof(mode_masks)); - - md->queue->ksm = keyslot_manager_create_passthrough(&dm_ksm_ll_ops, - mode_masks, md); - if (!md->queue->ksm) - return -ENOMEM; - return 0; -} - -static void dm_destroy_inline_encryption(struct request_queue *q) -{ - keyslot_manager_destroy(q->ksm); - q->ksm = NULL; -} -#else /* CONFIG_BLK_INLINE_ENCRYPTION */ -static inline int dm_init_inline_encryption(struct mapped_device *md) -{ - return 0; -} - -static inline void dm_destroy_inline_encryption(struct request_queue *q) -{ -} -#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ - /* * Setup the DM device's queue based on md's type */ @@ -2163,12 +2073,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) break; } - r = dm_init_inline_encryption(md); - if (r) { - DMERR("Cannot initialize inline encryption"); - return r; - } - return 0; } diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index e63ed53620d7..8d4ef369aa15 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -122,12 +122,3 @@ config SCSI_UFSHCD_CMD_LOGGING Select this if you want above mentioned debug information captured. If unsure, say N. - -config SCSI_UFS_CRYPTO - bool "UFS Crypto Engine Support" - depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION - help - Enable Crypto Engine Support in UFS. - Enabling this makes it possible for the kernel to use the crypto - capabilities of the UFS device (if present) to perform crypto - operations on data being transferred to/from the device. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 93a2e1a10335..bf374ee1f6e2 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -3,10 +3,8 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o -obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o -ufshcd-core-y := ufshcd.o +obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o obj-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufs-qcom-debugfs.o -ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index c93a6f5048d4..ff66f7c5893a 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1452,12 +1452,6 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) if (host->disable_lpm) hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; - /* - * Inline crypto is currently broken with ufs-qcom at least because the - * device tree doesn't include the crypto registers. There are likely - * to be other issues that will need to be addressed too. - */ - //hba->quirks |= UFSHCD_QUIRK_BROKEN_CRYPTO; } static void ufs_qcom_set_caps(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c deleted file mode 100644 index 276b49ad13be..000000000000 --- a/drivers/scsi/ufs/ufshcd-crypto.c +++ /dev/null @@ -1,499 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2019 Google LLC - */ - -#include -#include "ufshcd.h" -#include "ufshcd-crypto.h" - -static bool ufshcd_cap_idx_valid(struct ufs_hba *hba, unsigned int cap_idx) -{ - return cap_idx < hba->crypto_capabilities.num_crypto_cap; -} - -static u8 get_data_unit_size_mask(unsigned int data_unit_size) -{ - if (data_unit_size < 512 || data_unit_size > 65536 || - !is_power_of_2(data_unit_size)) - return 0; - - return data_unit_size / 512; -} - -static size_t get_keysize_bytes(enum ufs_crypto_key_size size) -{ - switch (size) { - case UFS_CRYPTO_KEY_SIZE_128: - return 16; - case UFS_CRYPTO_KEY_SIZE_192: - return 24; - case UFS_CRYPTO_KEY_SIZE_256: - return 32; - case UFS_CRYPTO_KEY_SIZE_512: - return 64; - default: - return 0; - } -} - -int ufshcd_crypto_cap_find(struct ufs_hba *hba, - enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size) -{ - enum ufs_crypto_alg ufs_alg; - u8 data_unit_mask; - int cap_idx; - enum ufs_crypto_key_size ufs_key_size; - union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array; - - if (!ufshcd_hba_is_crypto_supported(hba)) - return -EINVAL; - - switch (crypto_mode) { - case BLK_ENCRYPTION_MODE_AES_256_XTS: - ufs_alg = UFS_CRYPTO_ALG_AES_XTS; - ufs_key_size = UFS_CRYPTO_KEY_SIZE_256; - break; - default: - return -EINVAL; - } - - data_unit_mask = get_data_unit_size_mask(data_unit_size); - - for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap; - cap_idx++) { - if (ccap_array[cap_idx].algorithm_id == ufs_alg && - (ccap_array[cap_idx].sdus_mask & data_unit_mask) && - ccap_array[cap_idx].key_size == ufs_key_size) - return cap_idx; - } - - return -EINVAL; -} -EXPORT_SYMBOL(ufshcd_crypto_cap_find); - -/** - * ufshcd_crypto_cfg_entry_write_key - Write a key into a crypto_cfg_entry - * - * Writes the key with the appropriate format - for AES_XTS, - * the first half of the key is copied as is, the second half is - * copied with an offset halfway into the cfg->crypto_key array. - * For the other supported crypto algs, the key is just copied. - * - * @cfg: The crypto config to write to - * @key: The key to write - * @cap: The crypto capability (which specifies the crypto alg and key size) - * - * Returns 0 on success, or -EINVAL - */ -static int ufshcd_crypto_cfg_entry_write_key(union ufs_crypto_cfg_entry *cfg, - const u8 *key, - union ufs_crypto_cap_entry cap) -{ - size_t key_size_bytes = get_keysize_bytes(cap.key_size); - - if (key_size_bytes == 0) - return -EINVAL; - - switch (cap.algorithm_id) { - case UFS_CRYPTO_ALG_AES_XTS: - key_size_bytes *= 2; - if (key_size_bytes > UFS_CRYPTO_KEY_MAX_SIZE) - return -EINVAL; - - memcpy(cfg->crypto_key, key, key_size_bytes/2); - memcpy(cfg->crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2, - key + key_size_bytes/2, key_size_bytes/2); - return 0; - case UFS_CRYPTO_ALG_BITLOCKER_AES_CBC: - /* fall through */ - case UFS_CRYPTO_ALG_AES_ECB: - /* fall through */ - case UFS_CRYPTO_ALG_ESSIV_AES_CBC: - memcpy(cfg->crypto_key, key, key_size_bytes); - return 0; - } - - return -EINVAL; -} - -static int ufshcd_program_key(struct ufs_hba *hba, - const union ufs_crypto_cfg_entry *cfg, int slot) -{ - int i; - u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg); - int err; - - pm_runtime_get_sync(hba->dev); - ufshcd_hold(hba, false); - - if (hba->vops->program_key) { - err = hba->vops->program_key(hba, cfg, slot); - goto out; - } - - /* Clear the dword 16 */ - ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0])); - /* Ensure that CFGE is cleared before programming the key */ - wmb(); - for (i = 0; i < 16; i++) { - ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[i]), - slot_offset + i * sizeof(cfg->reg_val[0])); - /* Spec says each dword in key must be written sequentially */ - wmb(); - } - /* Write dword 17 */ - ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[17]), - slot_offset + 17 * sizeof(cfg->reg_val[0])); - /* Dword 16 must be written last */ - wmb(); - /* Write dword 16 */ - ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]), - slot_offset + 16 * sizeof(cfg->reg_val[0])); - wmb(); - err = 0; -out: - ufshcd_release(hba); - pm_runtime_put_sync(hba->dev); - return err; -} - -static void ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) -{ - union ufs_crypto_cfg_entry cfg = { 0 }; - int err; - - err = ufshcd_program_key(hba, &cfg, slot); - WARN_ON_ONCE(err); -} - -/* Clear all keyslots at driver init time */ -static void ufshcd_clear_all_keyslots(struct ufs_hba *hba) -{ - int slot; - - for (slot = 0; slot < ufshcd_num_keyslots(hba); slot++) - ufshcd_clear_keyslot(hba, slot); -} - -static int ufshcd_crypto_keyslot_program(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) -{ - struct ufs_hba *hba = keyslot_manager_private(ksm); - int err = 0; - u8 data_unit_mask; - union ufs_crypto_cfg_entry cfg; - int cap_idx; - - cap_idx = ufshcd_crypto_cap_find(hba, key->crypto_mode, - key->data_unit_size); - - if (!ufshcd_is_crypto_enabled(hba) || - !ufshcd_keyslot_valid(hba, slot) || - !ufshcd_cap_idx_valid(hba, cap_idx)) - return -EINVAL; - - data_unit_mask = get_data_unit_size_mask(key->data_unit_size); - - if (!(data_unit_mask & hba->crypto_cap_array[cap_idx].sdus_mask)) - return -EINVAL; - - memset(&cfg, 0, sizeof(cfg)); - cfg.data_unit_size = data_unit_mask; - cfg.crypto_cap_idx = cap_idx; - cfg.config_enable |= UFS_CRYPTO_CONFIGURATION_ENABLE; - - err = ufshcd_crypto_cfg_entry_write_key(&cfg, key->raw, - hba->crypto_cap_array[cap_idx]); - if (err) - return err; - - err = ufshcd_program_key(hba, &cfg, slot); - - memzero_explicit(&cfg, sizeof(cfg)); - - return err; -} - -static int ufshcd_crypto_keyslot_evict(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) -{ - struct ufs_hba *hba = keyslot_manager_private(ksm); - - if (!ufshcd_is_crypto_enabled(hba) || - !ufshcd_keyslot_valid(hba, slot)) - return -EINVAL; - - /* - * Clear the crypto cfg on the device. Clearing CFGE - * might not be sufficient, so just clear the entire cfg. - */ - ufshcd_clear_keyslot(hba, slot); - - return 0; -} - -/* Functions implementing UFSHCI v2.1 specification behaviour */ -void ufshcd_crypto_enable_spec(struct ufs_hba *hba) -{ - if (!ufshcd_hba_is_crypto_supported(hba)) - return; - - hba->caps |= UFSHCD_CAP_CRYPTO; - - /* Reset might clear all keys, so reprogram all the keys. */ - keyslot_manager_reprogram_all_keys(hba->ksm); -} -EXPORT_SYMBOL_GPL(ufshcd_crypto_enable_spec); - -void ufshcd_crypto_disable_spec(struct ufs_hba *hba) -{ - hba->caps &= ~UFSHCD_CAP_CRYPTO; -} -EXPORT_SYMBOL_GPL(ufshcd_crypto_disable_spec); - -static const struct keyslot_mgmt_ll_ops ufshcd_ksm_ops = { - .keyslot_program = ufshcd_crypto_keyslot_program, - .keyslot_evict = ufshcd_crypto_keyslot_evict, -}; - -enum blk_crypto_mode_num ufshcd_blk_crypto_mode_num_for_alg_dusize( - enum ufs_crypto_alg ufs_crypto_alg, - enum ufs_crypto_key_size key_size) -{ - /* - * This is currently the only mode that UFS and blk-crypto both support. - */ - if (ufs_crypto_alg == UFS_CRYPTO_ALG_AES_XTS && - key_size == UFS_CRYPTO_KEY_SIZE_256) - return BLK_ENCRYPTION_MODE_AES_256_XTS; - - return BLK_ENCRYPTION_MODE_INVALID; -} - -/** - * ufshcd_hba_init_crypto - Read crypto capabilities, init crypto fields in hba - * @hba: Per adapter instance - * - * Return: 0 if crypto was initialized or is not supported, else a -errno value. - */ -int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba, - const struct keyslot_mgmt_ll_ops *ksm_ops) -{ - int cap_idx = 0; - int err = 0; - unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; - enum blk_crypto_mode_num blk_mode_num; - - /* Default to disabling crypto */ - hba->caps &= ~UFSHCD_CAP_CRYPTO; - - /* Return 0 if crypto support isn't present */ - if (!(hba->capabilities & MASK_CRYPTO_SUPPORT) || - (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO)) - goto out; - - /* - * Crypto Capabilities should never be 0, because the - * config_array_ptr > 04h. So we use a 0 value to indicate that - * crypto init failed, and can't be enabled. - */ - hba->crypto_capabilities.reg_val = - cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP)); - hba->crypto_cfg_register = - (u32)hba->crypto_capabilities.config_array_ptr * 0x100; - hba->crypto_cap_array = - devm_kcalloc(hba->dev, - hba->crypto_capabilities.num_crypto_cap, - sizeof(hba->crypto_cap_array[0]), - GFP_KERNEL); - if (!hba->crypto_cap_array) { - err = -ENOMEM; - goto out; - } - - memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported)); - /* - * Store all the capabilities now so that we don't need to repeatedly - * access the device each time we want to know its capabilities - */ - for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap; - cap_idx++) { - hba->crypto_cap_array[cap_idx].reg_val = - cpu_to_le32(ufshcd_readl(hba, - REG_UFS_CRYPTOCAP + - cap_idx * sizeof(__le32))); - blk_mode_num = ufshcd_blk_crypto_mode_num_for_alg_dusize( - hba->crypto_cap_array[cap_idx].algorithm_id, - hba->crypto_cap_array[cap_idx].key_size); - if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID) - continue; - crypto_modes_supported[blk_mode_num] |= - hba->crypto_cap_array[cap_idx].sdus_mask * 512; - } - - ufshcd_clear_all_keyslots(hba); - - hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), ksm_ops, - crypto_modes_supported, hba); - - if (!hba->ksm) { - err = -ENOMEM; - goto out_free_caps; - } - - return 0; - -out_free_caps: - devm_kfree(hba->dev, hba->crypto_cap_array); -out: - /* Indicate that init failed by setting crypto_capabilities to 0 */ - hba->crypto_capabilities.reg_val = 0; - return err; -} -EXPORT_SYMBOL_GPL(ufshcd_hba_init_crypto_spec); - -void ufshcd_crypto_setup_rq_keyslot_manager_spec(struct ufs_hba *hba, - struct request_queue *q) -{ - if (!ufshcd_hba_is_crypto_supported(hba) || !q) - return; - - q->ksm = hba->ksm; -} -EXPORT_SYMBOL_GPL(ufshcd_crypto_setup_rq_keyslot_manager_spec); - -void ufshcd_crypto_destroy_rq_keyslot_manager_spec(struct ufs_hba *hba, - struct request_queue *q) -{ - keyslot_manager_destroy(hba->ksm); -} -EXPORT_SYMBOL_GPL(ufshcd_crypto_destroy_rq_keyslot_manager_spec); - -int ufshcd_prepare_lrbp_crypto_spec(struct ufs_hba *hba, - struct scsi_cmnd *cmd, - struct ufshcd_lrb *lrbp) -{ - struct bio_crypt_ctx *bc; - - if (!bio_crypt_should_process(cmd->request)) { - lrbp->crypto_enable = false; - return 0; - } - bc = cmd->request->bio->bi_crypt_context; - - if (WARN_ON(!ufshcd_is_crypto_enabled(hba))) { - /* - * Upper layer asked us to do inline encryption - * but that isn't enabled, so we fail this request. - */ - return -EINVAL; - } - if (!ufshcd_keyslot_valid(hba, bc->bc_keyslot)) - return -EINVAL; - - lrbp->crypto_enable = true; - lrbp->crypto_key_slot = bc->bc_keyslot; - lrbp->data_unit_num = bc->bc_dun[0]; - - return 0; -} -EXPORT_SYMBOL_GPL(ufshcd_prepare_lrbp_crypto_spec); - -/* Crypto Variant Ops Support */ - -void ufshcd_crypto_enable(struct ufs_hba *hba) -{ - if (hba->crypto_vops && hba->crypto_vops->enable) - return hba->crypto_vops->enable(hba); - - return ufshcd_crypto_enable_spec(hba); -} - -void ufshcd_crypto_disable(struct ufs_hba *hba) -{ - if (hba->crypto_vops && hba->crypto_vops->disable) - return hba->crypto_vops->disable(hba); - - return ufshcd_crypto_disable_spec(hba); -} - -int ufshcd_hba_init_crypto(struct ufs_hba *hba) -{ - if (hba->crypto_vops && hba->crypto_vops->hba_init_crypto) - return hba->crypto_vops->hba_init_crypto(hba, - &ufshcd_ksm_ops); - - return ufshcd_hba_init_crypto_spec(hba, &ufshcd_ksm_ops); -} - -void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q) -{ - if (hba->crypto_vops && hba->crypto_vops->setup_rq_keyslot_manager) - return hba->crypto_vops->setup_rq_keyslot_manager(hba, q); - - return ufshcd_crypto_setup_rq_keyslot_manager_spec(hba, q); -} - -void ufshcd_crypto_destroy_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q) -{ - if (hba->crypto_vops && hba->crypto_vops->destroy_rq_keyslot_manager) - return hba->crypto_vops->destroy_rq_keyslot_manager(hba, q); - - return ufshcd_crypto_destroy_rq_keyslot_manager_spec(hba, q); -} - -int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, - struct scsi_cmnd *cmd, - struct ufshcd_lrb *lrbp) -{ - if (hba->crypto_vops && hba->crypto_vops->prepare_lrbp_crypto) - return hba->crypto_vops->prepare_lrbp_crypto(hba, cmd, lrbp); - - return ufshcd_prepare_lrbp_crypto_spec(hba, cmd, lrbp); -} - -int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba, - struct scsi_cmnd *cmd, - struct ufshcd_lrb *lrbp) -{ - if (hba->crypto_vops && hba->crypto_vops->complete_lrbp_crypto) - return hba->crypto_vops->complete_lrbp_crypto(hba, cmd, lrbp); - - return 0; -} - -void ufshcd_crypto_debug(struct ufs_hba *hba) -{ - if (hba->crypto_vops && hba->crypto_vops->debug) - hba->crypto_vops->debug(hba); -} - -int ufshcd_crypto_suspend(struct ufs_hba *hba, - enum ufs_pm_op pm_op) -{ - if (hba->crypto_vops && hba->crypto_vops->suspend) - return hba->crypto_vops->suspend(hba, pm_op); - - return 0; -} - -int ufshcd_crypto_resume(struct ufs_hba *hba, - enum ufs_pm_op pm_op) -{ - if (hba->crypto_vops && hba->crypto_vops->resume) - return hba->crypto_vops->resume(hba, pm_op); - - return 0; -} - -void ufshcd_crypto_set_vops(struct ufs_hba *hba, - struct ufs_hba_crypto_variant_ops *crypto_vops) -{ - hba->crypto_vops = crypto_vops; -} diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h deleted file mode 100644 index 95f37c9f7672..000000000000 --- a/drivers/scsi/ufs/ufshcd-crypto.h +++ /dev/null @@ -1,167 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright 2019 Google LLC - */ - -#ifndef _UFSHCD_CRYPTO_H -#define _UFSHCD_CRYPTO_H - -#ifdef CONFIG_SCSI_UFS_CRYPTO -#include -#include "ufshcd.h" -#include "ufshci.h" - -static inline int ufshcd_num_keyslots(struct ufs_hba *hba) -{ - return hba->crypto_capabilities.config_count + 1; -} - -static inline bool ufshcd_keyslot_valid(struct ufs_hba *hba, unsigned int slot) -{ - /* - * The actual number of configurations supported is (CFGC+1), so slot - * numbers range from 0 to config_count inclusive. - */ - return slot < ufshcd_num_keyslots(hba); -} - -static inline bool ufshcd_hba_is_crypto_supported(struct ufs_hba *hba) -{ - return hba->crypto_capabilities.reg_val != 0; -} - -static inline bool ufshcd_is_crypto_enabled(struct ufs_hba *hba) -{ - return hba->caps & UFSHCD_CAP_CRYPTO; -} - -/* Functions implementing UFSHCI v2.1 specification behaviour */ -int ufshcd_crypto_cap_find(struct ufs_hba *hba, - enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size); - -int ufshcd_prepare_lrbp_crypto_spec(struct ufs_hba *hba, - struct scsi_cmnd *cmd, - struct ufshcd_lrb *lrbp); - -void ufshcd_crypto_enable_spec(struct ufs_hba *hba); - -void ufshcd_crypto_disable_spec(struct ufs_hba *hba); - -struct keyslot_mgmt_ll_ops; -int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba, - const struct keyslot_mgmt_ll_ops *ksm_ops); - -void ufshcd_crypto_setup_rq_keyslot_manager_spec(struct ufs_hba *hba, - struct request_queue *q); - -void ufshcd_crypto_destroy_rq_keyslot_manager_spec(struct ufs_hba *hba, - struct request_queue *q); - -static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp) -{ - return lrbp->crypto_enable; -} - -/* Crypto Variant Ops Support */ -void ufshcd_crypto_enable(struct ufs_hba *hba); - -void ufshcd_crypto_disable(struct ufs_hba *hba); - -int ufshcd_hba_init_crypto(struct ufs_hba *hba); - -void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q); - -void ufshcd_crypto_destroy_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q); - -int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, - struct scsi_cmnd *cmd, - struct ufshcd_lrb *lrbp); - -int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba, - struct scsi_cmnd *cmd, - struct ufshcd_lrb *lrbp); - -void ufshcd_crypto_debug(struct ufs_hba *hba); - -int ufshcd_crypto_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op); - -int ufshcd_crypto_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op); - -void ufshcd_crypto_set_vops(struct ufs_hba *hba, - struct ufs_hba_crypto_variant_ops *crypto_vops); - -#else /* CONFIG_SCSI_UFS_CRYPTO */ - -static inline bool ufshcd_keyslot_valid(struct ufs_hba *hba, - unsigned int slot) -{ - return false; -} - -static inline bool ufshcd_hba_is_crypto_supported(struct ufs_hba *hba) -{ - return false; -} - -static inline bool ufshcd_is_crypto_enabled(struct ufs_hba *hba) -{ - return false; -} - -static inline void ufshcd_crypto_enable(struct ufs_hba *hba) { } - -static inline void ufshcd_crypto_disable(struct ufs_hba *hba) { } - -static inline int ufshcd_hba_init_crypto(struct ufs_hba *hba) -{ - return 0; -} - -static inline void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q) { } - -static inline void ufshcd_crypto_destroy_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q) { } - -static inline int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba, - struct scsi_cmnd *cmd, - struct ufshcd_lrb *lrbp) -{ - return 0; -} - -static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp) -{ - return false; -} - -static inline int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba, - struct scsi_cmnd *cmd, - struct ufshcd_lrb *lrbp) -{ - return 0; -} - -static inline void ufshcd_crypto_debug(struct ufs_hba *hba) { } - -static inline int ufshcd_crypto_suspend(struct ufs_hba *hba, - enum ufs_pm_op pm_op) -{ - return 0; -} - -static inline int ufshcd_crypto_resume(struct ufs_hba *hba, - enum ufs_pm_op pm_op) -{ - return 0; -} - -static inline void ufshcd_crypto_set_vops(struct ufs_hba *hba, - struct ufs_hba_crypto_variant_ops *crypto_vops) { } - -#endif /* CONFIG_SCSI_UFS_CRYPTO */ - -#endif /* _UFSHCD_CRYPTO_H */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 55f2f1645c1f..d125c70bfe72 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -197,7 +197,6 @@ static void ufshcd_update_uic_error_cnt(struct ufs_hba *hba, u32 reg, int type) break; } } -#include "ufshcd-crypto.h" #define CREATE_TRACE_POINTS #include @@ -919,8 +918,6 @@ static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep) static void ufshcd_print_host_regs(struct ufs_hba *hba) { __ufshcd_print_host_regs(hba, false); - - ufshcd_crypto_debug(hba); } static @@ -1412,11 +1409,6 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba) { u32 val = CONTROLLER_ENABLE; - if (ufshcd_hba_is_crypto_supported(hba)) { - ufshcd_crypto_enable(hba); - val |= CRYPTO_GENERAL_ENABLE; - } - ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); } @@ -3399,23 +3391,9 @@ static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, dword_0 |= UTP_REQ_DESC_INT_CMD; /* Transfer request descriptor header fields */ - if (ufshcd_lrbp_crypto_enabled(lrbp)) { -#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO) - dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD; - dword_0 |= lrbp->crypto_key_slot; - req_desc->header.dword_1 = - cpu_to_le32(lower_32_bits(lrbp->data_unit_num)); - req_desc->header.dword_3 = - cpu_to_le32(upper_32_bits(lrbp->data_unit_num)); -#endif /* CONFIG_SCSI_UFS_CRYPTO */ - } else { - /* dword_1 and dword_3 are reserved, hence they are set to 0 */ - req_desc->header.dword_1 = 0; - req_desc->header.dword_3 = 0; - } - req_desc->header.dword_0 = cpu_to_le32(dword_0); - + /* dword_1 is reserved, hence it is set to 0 */ + req_desc->header.dword_1 = 0; /* * assigning invalid value for command status. Controller * updates OCS on command completion, with the command @@ -3423,6 +3401,8 @@ static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, */ req_desc->header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + /* dword_3 is reserved, hence it is set to 0 */ + req_desc->header.dword_3 = 0; req_desc->prd_table_length = 0; @@ -3800,13 +3780,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->task_tag = tag; lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; - - err = ufshcd_prepare_lrbp_crypto(hba, cmd, lrbp); - if (err) { - lrbp->cmd = NULL; - clear_bit_unlock(tag, &hba->lrb_in_use); - goto out; - } lrbp->req_abort_skip = false; err = ufshcd_comp_scsi_upiu(hba, lrbp); @@ -3870,9 +3843,6 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, lrbp->task_tag = tag; lrbp->lun = 0; /* device management cmd is not specific to any LUN */ lrbp->intr_cmd = true; /* No interrupt aggregation */ -#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO) - lrbp->crypto_enable = false; /* No crypto operations */ -#endif hba->dev_cmd.type = cmd_type; return ufshcd_comp_devman_upiu(hba, lrbp); @@ -5726,8 +5696,6 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep) { int err; - ufshcd_crypto_disable(hba); - ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE, CONTROLLER_DISABLE, @@ -6099,8 +6067,8 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) */ static int ufshcd_slave_configure(struct scsi_device *sdev) { - struct ufs_hba *hba = shost_priv(sdev->host); struct request_queue *q = sdev->request_queue; + struct ufs_hba *hba = shost_priv(sdev->host); blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX); @@ -6112,7 +6080,6 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS; sdev->use_rpm_auto = 1; - ufshcd_crypto_setup_rq_keyslot_manager(hba, q); return 0; } @@ -6124,7 +6091,6 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) static void ufshcd_slave_destroy(struct scsi_device *sdev) { struct ufs_hba *hba; - struct request_queue *q = sdev->request_queue; hba = shost_priv(sdev->host); /* Drop the reference as it won't be needed anymore */ @@ -6135,8 +6101,6 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) hba->sdev_ufs_device = NULL; spin_unlock_irqrestore(hba->host->host_lock, flags); } - - ufshcd_crypto_destroy_rq_keyslot_manager(hba, q); } /** @@ -6412,7 +6376,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, clear_bit_unlock(index, &hba->lrb_in_use); lrbp->complete_time_stamp = ktime_get(); update_req_stats(hba, lrbp); - ufshcd_complete_lrbp_crypto(hba, cmd, lrbp); /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL; @@ -10142,10 +10105,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) req_link_state = UIC_LINK_OFF_STATE; } - ret = ufshcd_crypto_suspend(hba, pm_op); - if (ret) - goto out; - /* * If we can't transition into any of the low power modes * just gate the clocks. @@ -10266,7 +10225,6 @@ enable_gating: hba->hibern8_on_idle.is_suspended = false; hba->clk_gating.is_suspended = false; ufshcd_release_all(hba); - ufshcd_crypto_resume(hba, pm_op); out: hba->pm_op_in_progress = 0; @@ -10290,11 +10248,9 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { int ret; enum uic_link_state old_link_state; - enum ufs_dev_pwr_mode old_pwr_mode; hba->pm_op_in_progress = 1; old_link_state = hba->uic_link_state; - old_pwr_mode = hba->curr_dev_pwr_mode; ufshcd_hba_vreg_set_hpm(hba); /* Make sure clocks are enabled before accessing controller */ @@ -10371,10 +10327,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto set_old_link_state; } - ret = ufshcd_crypto_resume(hba, pm_op); - if (ret) - goto set_old_dev_pwr_mode; - if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) ufshcd_enable_auto_bkops(hba); else @@ -10395,9 +10347,6 @@ skip_dev_ops: ufshcd_release_all(hba); goto out; -set_old_dev_pwr_mode: - if (old_pwr_mode != hba->curr_dev_pwr_mode) - ufshcd_set_dev_pwr_mode(hba, old_pwr_mode); set_old_link_state: ufshcd_link_state_transition(hba, old_link_state, 0); if (ufshcd_is_link_hibern8(hba) && @@ -11228,12 +11177,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) if (hba->force_g4) hba->reinit_g4_rate_A = true; - /* Init crypto */ - err = ufshcd_hba_init_crypto(hba); - if (err) { - dev_err(hba->dev, "crypto setup failed\n"); - goto out_remove_scsi_host; - } /* Host controller enable */ err = ufshcd_hba_enable(hba); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 790e2be33995..a51cc94ad603 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -197,9 +197,6 @@ struct ufs_pm_lvl_states { * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) * @issue_time_stamp: time stamp for debug purposes * @complete_time_stamp: time stamp for statistics - * @crypto_enable: whether or not the request needs inline crypto operations - * @crypto_key_slot: the key slot to use for inline crypto - * @data_unit_num: the data unit number for the first block for inline crypto * @req_abort_skip: skip request abort task flag */ struct ufshcd_lrb { @@ -224,11 +221,6 @@ struct ufshcd_lrb { bool intr_cmd; ktime_t issue_time_stamp; ktime_t complete_time_stamp; -#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO) - bool crypto_enable; - u8 crypto_key_slot; - u64 data_unit_num; -#endif /* CONFIG_SCSI_UFS_CRYPTO */ bool req_abort_skip; }; @@ -310,8 +302,6 @@ struct ufs_pwr_mode_info { struct ufs_pa_layer_attr info; }; -union ufs_crypto_cfg_entry; - /** * struct ufs_hba_variant_ops - variant specific callbacks * @init: called when the driver is initialized @@ -342,7 +332,6 @@ union ufs_crypto_cfg_entry; * scale down * @set_bus_vote: called to vote for the required bus bandwidth * @phy_initialization: used to initialize phys - * @program_key: program an inline encryption key into a keyslot */ struct ufs_hba_variant_ops { int (*init)(struct ufs_hba *); @@ -378,8 +367,6 @@ struct ufs_hba_variant_ops { void (*add_debugfs)(struct ufs_hba *hba, struct dentry *root); void (*remove_debugfs)(struct ufs_hba *hba); #endif - int (*program_key)(struct ufs_hba *hba, - const union ufs_crypto_cfg_entry *cfg, int slot); }; /** @@ -401,28 +388,6 @@ struct ufs_hba_variant { struct ufs_hba_pm_qos_variant_ops *pm_qos_vops; }; -struct keyslot_mgmt_ll_ops; -struct ufs_hba_crypto_variant_ops { - void (*setup_rq_keyslot_manager)(struct ufs_hba *hba, - struct request_queue *q); - void (*destroy_rq_keyslot_manager)(struct ufs_hba *hba, - struct request_queue *q); - int (*hba_init_crypto)(struct ufs_hba *hba, - const struct keyslot_mgmt_ll_ops *ksm_ops); - void (*enable)(struct ufs_hba *hba); - void (*disable)(struct ufs_hba *hba); - int (*suspend)(struct ufs_hba *hba, enum ufs_pm_op pm_op); - int (*resume)(struct ufs_hba *hba, enum ufs_pm_op pm_op); - int (*debug)(struct ufs_hba *hba); - int (*prepare_lrbp_crypto)(struct ufs_hba *hba, - struct scsi_cmnd *cmd, - struct ufshcd_lrb *lrbp); - int (*complete_lrbp_crypto)(struct ufs_hba *hba, - struct scsi_cmnd *cmd, - struct ufshcd_lrb *lrbp); - void *priv; -}; - /* clock gating state */ enum clk_gating_state { CLKS_OFF, @@ -784,10 +749,6 @@ enum ufshcd_card_state { * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for * device is known or not. * @scsi_block_reqs_cnt: reference counting for scsi block requests - * @crypto_capabilities: Content of crypto capabilities register (0x100) - * @crypto_cap_array: Array of crypto capabilities - * @crypto_cfg_register: Start of the crypto cfg array - * @ksm: the keyslot manager tied to this hba */ struct ufs_hba { void __iomem *mmio_base; @@ -833,7 +794,6 @@ struct ufs_hba { struct ufs_hba_variant *var; void *priv; size_t sg_entry_size; - const struct ufs_hba_crypto_variant_ops *crypto_vops; unsigned int irq; bool is_irq_enabled; bool crash_on_err; @@ -923,12 +883,6 @@ struct ufs_hba { /* Auto hibern8 support is broken */ #define UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 UFS_BIT(15) - /* - * This quirk needs to be enabled if the host controller advertises - * inline encryption support but it doesn't work correctly. - */ - #define UFSHCD_QUIRK_BROKEN_CRYPTO UFS_BIT(16) - unsigned int quirks; /* Deviations from standard UFSHCI spec. */ wait_queue_head_t tm_wq; @@ -1041,11 +995,6 @@ struct ufs_hba { * in hibern8 then enable this cap. */ #define UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8 (1 << 7) - /* - * This capability allows the host controller driver to use the - * inline crypto engine, if it is present - */ -#define UFSHCD_CAP_CRYPTO (1 << 8) struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; @@ -1078,14 +1027,6 @@ struct ufs_hba { bool force_g4; /* distinguish between resume and restore */ bool restore; - -#ifdef CONFIG_SCSI_UFS_CRYPTO - /* crypto */ - union ufs_crypto_capabilities crypto_capabilities; - union ufs_crypto_cap_entry *crypto_cap_array; - u32 crypto_cfg_register; - struct keyslot_manager *ksm; -#endif /* CONFIG_SCSI_UFS_CRYPTO */ }; static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 764662fc685d..91f852764932 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -347,61 +347,6 @@ enum { INTERRUPT_MASK_ALL_VER_21 = 0x71FFF, }; -/* CCAP - Crypto Capability 100h */ -union ufs_crypto_capabilities { - __le32 reg_val; - struct { - u8 num_crypto_cap; - u8 config_count; - u8 reserved; - u8 config_array_ptr; - }; -}; - -enum ufs_crypto_key_size { - UFS_CRYPTO_KEY_SIZE_INVALID = 0x0, - UFS_CRYPTO_KEY_SIZE_128 = 0x1, - UFS_CRYPTO_KEY_SIZE_192 = 0x2, - UFS_CRYPTO_KEY_SIZE_256 = 0x3, - UFS_CRYPTO_KEY_SIZE_512 = 0x4, -}; - -enum ufs_crypto_alg { - UFS_CRYPTO_ALG_AES_XTS = 0x0, - UFS_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1, - UFS_CRYPTO_ALG_AES_ECB = 0x2, - UFS_CRYPTO_ALG_ESSIV_AES_CBC = 0x3, -}; - -/* x-CRYPTOCAP - Crypto Capability X */ -union ufs_crypto_cap_entry { - __le32 reg_val; - struct { - u8 algorithm_id; - u8 sdus_mask; /* Supported data unit size mask */ - u8 key_size; - u8 reserved; - }; -}; - -#define UFS_CRYPTO_CONFIGURATION_ENABLE (1 << 7) -#define UFS_CRYPTO_KEY_MAX_SIZE 64 -/* x-CRYPTOCFG - Crypto Configuration X */ -union ufs_crypto_cfg_entry { - __le32 reg_val[32]; - struct { - u8 crypto_key[UFS_CRYPTO_KEY_MAX_SIZE]; - u8 data_unit_size; - u8 crypto_cap_idx; - u8 reserved_1; - u8 config_enable; - u8 reserved_multi_host; - u8 reserved_2; - u8 vsb[2]; - u8 reserved_3[56]; - }; -}; - /* * Request Descriptor Definitions */ @@ -423,7 +368,6 @@ enum { UTP_NATIVE_UFS_COMMAND = 0x10000000, UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000, UTP_REQ_DESC_INT_CMD = 0x01000000, - UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000, }; /* UTP Transfer Request Data Direction (DD) */ diff --git a/fs/buffer.c b/fs/buffer.c index 5c85f4ef66bd..758b6056ad91 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -46,7 +46,6 @@ #include #include #include -#include static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, @@ -3173,8 +3172,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, */ bio = bio_alloc(GFP_NOIO, 1); - fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); - if (wbc) { wbc_init_bio(wbc, bio); wbc_account_io(wbc, bh->b_page, bh->b_size); diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index 0701bb90f99c..4bc66f2c571e 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig @@ -15,9 +15,3 @@ config FS_ENCRYPTION efficient since it avoids caching the encrypted and decrypted pages in the page cache. Currently Ext4, F2FS and UBIFS make use of this feature. - -config FS_ENCRYPTION_INLINE_CRYPT - bool "Enable fscrypt to use inline crypto" - depends on FS_ENCRYPTION && BLK_INLINE_ENCRYPTION - help - Enable fscrypt to use inline encryption hardware if available. diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile index 1a6b0774f3ff..0a78543f6cec 100644 --- a/fs/crypto/Makefile +++ b/fs/crypto/Makefile @@ -10,4 +10,3 @@ fscrypto-y := crypto.o \ policy.o fscrypto-$(CONFIG_BLOCK) += bio.o -fscrypto-$(CONFIG_FS_ENCRYPTION_INLINE_CRYPT) += inline_crypt.o diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 9601e4bfc004..699bb4d426f2 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -46,35 +46,26 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, { const unsigned int blockbits = inode->i_blkbits; const unsigned int blocksize = 1 << blockbits; - const bool inlinecrypt = fscrypt_inode_uses_inline_crypto(inode); struct page *ciphertext_page; struct bio *bio; int ret, err = 0; - if (inlinecrypt) { - ciphertext_page = ZERO_PAGE(0); - } else { - ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT); - if (!ciphertext_page) - return -ENOMEM; - } + ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT); + if (!ciphertext_page) + return -ENOMEM; while (len--) { - if (!inlinecrypt) { - err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, - ZERO_PAGE(0), ciphertext_page, - blocksize, 0, GFP_NOFS); - if (err) - goto errout; - } + err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, + ZERO_PAGE(0), ciphertext_page, + blocksize, 0, GFP_NOFS); + if (err) + goto errout; bio = bio_alloc(GFP_NOWAIT, 1); if (!bio) { err = -ENOMEM; goto errout; } - fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOIO); - bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - 9); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -96,8 +87,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, } err = 0; errout: - if (!inlinecrypt) - fscrypt_free_bounce_page(ciphertext_page); + fscrypt_free_bounce_page(ciphertext_page); return err; } EXPORT_SYMBOL(fscrypt_zeroout_range); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 41b4fe15b4b6..6e6f39ea18a7 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -96,7 +96,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, DECLARE_CRYPTO_WAIT(wait); struct scatterlist dst, src; struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_key.tfm; + struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; if (WARN_ON_ONCE(len <= 0)) diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 3aafddaab703..3da3707c10e3 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -40,7 +40,7 @@ int fname_encrypt(struct inode *inode, const struct qstr *iname, struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_key.tfm; + struct crypto_skcipher *tfm = ci->ci_ctfm; union fscrypt_iv iv; struct scatterlist sg; int res; @@ -93,7 +93,7 @@ static int fname_decrypt(struct inode *inode, DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_key.tfm; + struct crypto_skcipher *tfm = ci->ci_ctfm; union fscrypt_iv iv; int res; diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 739d8a9d24f5..94da6bad5f19 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -13,14 +13,12 @@ #include #include -#include #define CONST_STRLEN(str) (sizeof(str) - 1) #define FS_KEY_DERIVATION_NONCE_SIZE 16 #define FSCRYPT_MIN_KEY_SIZE 16 -#define FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE 128 #define FSCRYPT_CONTEXT_V1 1 #define FSCRYPT_CONTEXT_V2 2 @@ -153,20 +151,6 @@ struct fscrypt_symlink_data { char encrypted_path[1]; } __packed; -/** - * struct fscrypt_prepared_key - a key prepared for actual encryption/decryption - * @tfm: crypto API transform object - * @blk_key: key for blk-crypto - * - * Normally only one of the fields will be non-NULL. - */ -struct fscrypt_prepared_key { - struct crypto_skcipher *tfm; -#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT - struct fscrypt_blk_crypto_key *blk_key; -#endif -}; - /* * fscrypt_info - the "encryption key" for an inode * @@ -175,20 +159,15 @@ struct fscrypt_prepared_key { * inode is evicted. */ struct fscrypt_info { - /* The key in a form prepared for actual encryption/decryption */ - struct fscrypt_prepared_key ci_key; + /* The actual crypto transform used for encryption and decryption */ + u8 ci_data_mode; + u8 ci_filename_mode; + u8 ci_flags; + struct crypto_skcipher *ci_ctfm; /* True if the key should be freed when this fscrypt_info is freed */ bool ci_owns_key; -#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT - /* - * True if this inode will use inline encryption (blk-crypto) instead of - * the traditional filesystem-layer encryption. - */ - bool ci_inlinecrypt; -#endif - /* * Encryption mode used for this inode. It corresponds to either the * contents or filenames encryption mode, depending on the inode type. @@ -213,7 +192,7 @@ struct fscrypt_info { /* * If non-NULL, then encryption is done using the master key directly - * and ci_key will equal ci_direct_key->dk_key. + * and ci_ctfm will equal ci_direct_key->dk_ctfm. */ struct fscrypt_direct_key *ci_direct_key; @@ -278,7 +257,6 @@ union fscrypt_iv { u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; }; u8 raw[FSCRYPT_MAX_IV_SIZE]; - __le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)]; }; void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, @@ -318,94 +296,6 @@ extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context, extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); -/* inline_crypt.c */ -#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT -extern void fscrypt_select_encryption_impl(struct fscrypt_info *ci); - -static inline bool -fscrypt_using_inline_encryption(const struct fscrypt_info *ci) -{ - return ci->ci_inlinecrypt; -} - -extern int fscrypt_prepare_inline_crypt_key( - struct fscrypt_prepared_key *prep_key, - const u8 *raw_key, - unsigned int raw_key_size, - const struct fscrypt_info *ci); - -extern void fscrypt_destroy_inline_crypt_key( - struct fscrypt_prepared_key *prep_key); - -extern int fscrypt_derive_raw_secret(struct super_block *sb, - const u8 *wrapped_key, - unsigned int wrapped_key_size, - u8 *raw_secret, - unsigned int raw_secret_size); - -/* - * Check whether the crypto transform or blk-crypto key has been allocated in - * @prep_key, depending on which encryption implementation the file will use. - */ -static inline bool -fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, - const struct fscrypt_info *ci) -{ - /* - * The READ_ONCE() here pairs with the smp_store_release() in - * fscrypt_prepare_key(). (This only matters for the per-mode keys, - * which are shared by multiple inodes.) - */ - if (fscrypt_using_inline_encryption(ci)) - return READ_ONCE(prep_key->blk_key) != NULL; - return READ_ONCE(prep_key->tfm) != NULL; -} - -#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ - -static inline void fscrypt_select_encryption_impl(struct fscrypt_info *ci) -{ -} - -static inline bool fscrypt_using_inline_encryption( - const struct fscrypt_info *ci) -{ - return false; -} - -static inline int -fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, - const u8 *raw_key, unsigned int raw_key_size, - const struct fscrypt_info *ci) -{ - WARN_ON(1); - return -EOPNOTSUPP; -} - -static inline void -fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) -{ -} - -static inline int fscrypt_derive_raw_secret(struct super_block *sb, - const u8 *wrapped_key, - unsigned int wrapped_key_size, - u8 *raw_secret, - unsigned int raw_secret_size) -{ - fscrypt_warn(NULL, - "kernel built without support for hardware-wrapped keys"); - return -EOPNOTSUPP; -} - -static inline bool -fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, - const struct fscrypt_info *ci) -{ - return READ_ONCE(prep_key->tfm) != NULL; -} -#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ - /* keyring.c */ /* @@ -422,15 +312,8 @@ struct fscrypt_master_key_secret { /* Size of the raw key in bytes. Set even if ->raw isn't set. */ u32 size; - /* True if the key in ->raw is a hardware-wrapped key. */ - bool is_hw_wrapped; - - /* - * For v1 policy keys: the raw key. Wiped for v2 policy keys, unless - * ->is_hw_wrapped is true, in which case this contains the wrapped key - * rather than the key with which 'hkdf' was keyed. - */ - u8 raw[FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE]; + /* For v1 policy keys: the raw key. Wiped for v2 policy keys. */ + u8 raw[FSCRYPT_MAX_KEY_SIZE]; } __randomize_layout; @@ -502,11 +385,14 @@ struct fscrypt_master_key { struct list_head mk_decrypted_inodes; spinlock_t mk_decrypted_inodes_lock; - /* Per-mode keys for DIRECT_KEY policies, allocated on-demand */ - struct fscrypt_prepared_key mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; + /* Crypto API transforms for DIRECT_KEY policies, allocated on-demand */ + struct crypto_skcipher *mk_direct_tfms[__FSCRYPT_MODE_MAX + 1]; - /* Per-mode keys for IV_INO_LBLK_64 policies, allocated on-demand */ - struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; + /* + * Crypto API transforms for filesystem-layer implementation of + * IV_INO_LBLK_64 policies, allocated on-demand. + */ + struct crypto_skcipher *mk_iv_ino_lblk_64_tfms[__FSCRYPT_MODE_MAX + 1]; } __randomize_layout; @@ -563,22 +449,17 @@ struct fscrypt_mode { int keysize; int ivsize; int logged_impl_name; - enum blk_crypto_mode_num blk_crypto_mode; }; -extern struct fscrypt_mode fscrypt_modes[]; - static inline bool fscrypt_mode_supports_direct_key(const struct fscrypt_mode *mode) { return mode->ivsize >= offsetofend(union fscrypt_iv, nonce); } -extern int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, - const u8 *raw_key, unsigned int raw_key_size, - const struct fscrypt_info *ci); - -extern void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key); +extern struct crypto_skcipher * +fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, + const struct inode *inode); extern int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key); diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c deleted file mode 100644 index 92c471d3db73..000000000000 --- a/fs/crypto/inline_crypt.c +++ /dev/null @@ -1,353 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Inline encryption support for fscrypt - * - * Copyright 2019 Google LLC - */ - -/* - * With "inline encryption", the block layer handles the decryption/encryption - * as part of the bio, instead of the filesystem doing the crypto itself via - * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still - * provides the key and IV to use. - */ - -#include -#include -#include -#include -#include - -#include "fscrypt_private.h" - -struct fscrypt_blk_crypto_key { - struct blk_crypto_key base; - int num_devs; - struct request_queue *devs[]; -}; - -/* Enable inline encryption for this file if supported. */ -void fscrypt_select_encryption_impl(struct fscrypt_info *ci) -{ - const struct inode *inode = ci->ci_inode; - struct super_block *sb = inode->i_sb; - - /* The file must need contents encryption, not filenames encryption */ - if (!S_ISREG(inode->i_mode)) - return; - - /* blk-crypto must implement the needed encryption algorithm */ - if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) - return; - - /* The filesystem must be mounted with -o inlinecrypt */ - if (!sb->s_cop->inline_crypt_enabled || - !sb->s_cop->inline_crypt_enabled(sb)) - return; - - ci->ci_inlinecrypt = true; -} - -int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, - const u8 *raw_key, - unsigned int raw_key_size, - const struct fscrypt_info *ci) -{ - const struct inode *inode = ci->ci_inode; - struct super_block *sb = inode->i_sb; - enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; - int num_devs = 1; - int queue_refs = 0; - struct fscrypt_blk_crypto_key *blk_key; - int err; - int i; - - if (sb->s_cop->get_num_devices) - num_devs = sb->s_cop->get_num_devices(sb); - if (WARN_ON(num_devs < 1)) - return -EINVAL; - - blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS); - if (!blk_key) - return -ENOMEM; - - blk_key->num_devs = num_devs; - if (num_devs == 1) - blk_key->devs[0] = bdev_get_queue(sb->s_bdev); - else - sb->s_cop->get_devices(sb, blk_key->devs); - - BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE > - BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE); - - err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size, - crypto_mode, sb->s_blocksize); - if (err) { - fscrypt_err(inode, "error %d initializing blk-crypto key", err); - goto fail; - } - - /* - * We have to start using blk-crypto on all the filesystem's devices. - * We also have to save all the request_queue's for later so that the - * key can be evicted from them. This is needed because some keys - * aren't destroyed until after the filesystem was already unmounted - * (namely, the per-mode keys in struct fscrypt_master_key). - */ - for (i = 0; i < num_devs; i++) { - if (!blk_get_queue(blk_key->devs[i])) { - fscrypt_err(inode, "couldn't get request_queue"); - err = -EAGAIN; - goto fail; - } - queue_refs++; - - err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize, - blk_key->devs[i]); - if (err) { - fscrypt_err(inode, - "error %d starting to use blk-crypto", err); - goto fail; - } - } - /* - * Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters - * for the per-mode keys, which are shared by multiple inodes.) - */ - smp_store_release(&prep_key->blk_key, blk_key); - return 0; - -fail: - for (i = 0; i < queue_refs; i++) - blk_put_queue(blk_key->devs[i]); - kzfree(blk_key); - return err; -} - -void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) -{ - struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key; - int i; - - if (blk_key) { - for (i = 0; i < blk_key->num_devs; i++) { - blk_crypto_evict_key(blk_key->devs[i], &blk_key->base); - blk_put_queue(blk_key->devs[i]); - } - kzfree(blk_key); - } -} - -int fscrypt_derive_raw_secret(struct super_block *sb, - const u8 *wrapped_key, - unsigned int wrapped_key_size, - u8 *raw_secret, unsigned int raw_secret_size) -{ - struct request_queue *q; - - q = sb->s_bdev->bd_queue; - if (!q->ksm) - return -EOPNOTSUPP; - - return keyslot_manager_derive_raw_secret(q->ksm, - wrapped_key, wrapped_key_size, - raw_secret, raw_secret_size); -} - -/** - * fscrypt_inode_uses_inline_crypto - test whether an inode uses inline - * encryption - * @inode: an inode - * - * Return: true if the inode requires file contents encryption and if the - * encryption should be done in the block layer via blk-crypto rather - * than in the filesystem layer. - */ -bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) -{ - return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && - inode->i_crypt_info->ci_inlinecrypt; -} -EXPORT_SYMBOL_GPL(fscrypt_inode_uses_inline_crypto); - -/** - * fscrypt_inode_uses_fs_layer_crypto - test whether an inode uses fs-layer - * encryption - * @inode: an inode - * - * Return: true if the inode requires file contents encryption and if the - * encryption should be done in the filesystem layer rather than in the - * block layer via blk-crypto. - */ -bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) -{ - return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && - !inode->i_crypt_info->ci_inlinecrypt; -} -EXPORT_SYMBOL_GPL(fscrypt_inode_uses_fs_layer_crypto); - -static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, - u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) -{ - union fscrypt_iv iv; - int i; - - fscrypt_generate_iv(&iv, lblk_num, ci); - - BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); - memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); - for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) - dun[i] = le64_to_cpu(iv.dun[i]); -} - -/** - * fscrypt_set_bio_crypt_ctx - prepare a file contents bio for inline encryption - * @bio: a bio which will eventually be submitted to the file - * @inode: the file's inode - * @first_lblk: the first file logical block number in the I/O - * @gfp_mask: memory allocation flags - these must be a waiting mask so that - * bio_crypt_set_ctx can't fail. - * - * If the contents of the file should be encrypted (or decrypted) with inline - * encryption, then assign the appropriate encryption context to the bio. - * - * Normally the bio should be newly allocated (i.e. no pages added yet), as - * otherwise fscrypt_mergeable_bio() won't work as intended. - * - * The encryption context will be freed automatically when the bio is freed. - * - * This function also handles setting bi_skip_dm_default_key when needed. - */ -void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, - u64 first_lblk, gfp_t gfp_mask) -{ - const struct fscrypt_info *ci = inode->i_crypt_info; - u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; - - if (fscrypt_inode_should_skip_dm_default_key(inode)) - bio_set_skip_dm_default_key(bio); - - if (!fscrypt_inode_uses_inline_crypto(inode)) - return; - - fscrypt_generate_dun(ci, first_lblk, dun); - bio_crypt_set_ctx(bio, &ci->ci_key.blk_key->base, dun, gfp_mask); -} -EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); - -/* Extract the inode and logical block number from a buffer_head. */ -static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, - const struct inode **inode_ret, - u64 *lblk_num_ret) -{ - struct page *page = bh->b_page; - const struct address_space *mapping; - const struct inode *inode; - - /* - * The ext4 journal (jbd2) can submit a buffer_head it directly created - * for a non-pagecache page. fscrypt doesn't care about these. - */ - mapping = page_mapping(page); - if (!mapping) - return false; - inode = mapping->host; - - *inode_ret = inode; - *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + - (bh_offset(bh) >> inode->i_blkbits); - return true; -} - -/** - * fscrypt_set_bio_crypt_ctx_bh - prepare a file contents bio for inline - * encryption - * @bio: a bio which will eventually be submitted to the file - * @first_bh: the first buffer_head for which I/O will be submitted - * @gfp_mask: memory allocation flags - * - * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead - * of an inode and block number directly. - */ -void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, - const struct buffer_head *first_bh, - gfp_t gfp_mask) -{ - const struct inode *inode; - u64 first_lblk; - - if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) - fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); -} -EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); - -/** - * fscrypt_mergeable_bio - test whether data can be added to a bio - * @bio: the bio being built up - * @inode: the inode for the next part of the I/O - * @next_lblk: the next file logical block number in the I/O - * - * When building a bio which may contain data which should undergo inline - * encryption (or decryption) via fscrypt, filesystems should call this function - * to ensure that the resulting bio contains only logically contiguous data. - * This will return false if the next part of the I/O cannot be merged with the - * bio because either the encryption key would be different or the encryption - * data unit numbers would be discontiguous. - * - * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. - * - * This function also returns false if the next part of the I/O would need to - * have a different value for the bi_skip_dm_default_key flag. - * - * Return: true iff the I/O is mergeable - */ -bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, - u64 next_lblk) -{ - const struct bio_crypt_ctx *bc = bio->bi_crypt_context; - u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; - - if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) - return false; - if (bio_should_skip_dm_default_key(bio) != - fscrypt_inode_should_skip_dm_default_key(inode)) - return false; - if (!bc) - return true; - - /* - * Comparing the key pointers is good enough, as all I/O for each key - * uses the same pointer. I.e., there's currently no need to support - * merging requests where the keys are the same but the pointers differ. - */ - if (bc->bc_key != &inode->i_crypt_info->ci_key.blk_key->base) - return false; - - fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); - return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); -} -EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); - -/** - * fscrypt_mergeable_bio_bh - test whether data can be added to a bio - * @bio: the bio being built up - * @next_bh: the next buffer_head for which I/O will be submitted - * - * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of - * an inode and block number directly. - * - * Return: true iff the I/O is mergeable - */ -bool fscrypt_mergeable_bio_bh(struct bio *bio, - const struct buffer_head *next_bh) -{ - const struct inode *inode; - u64 next_lblk; - - if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) - return !bio->bi_crypt_context && - !bio_should_skip_dm_default_key(bio); - - return fscrypt_mergeable_bio(bio, inode, next_lblk); -} -EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 40ea4bc1059d..687f76590761 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -44,8 +44,8 @@ static void free_master_key(struct fscrypt_master_key *mk) wipe_master_key_secret(&mk->mk_secret); for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { - fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]); - fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]); + crypto_free_skcipher(mk->mk_direct_tfms[i]); + crypto_free_skcipher(mk->mk_iv_ino_lblk_64_tfms[i]); } key_put(mk->mk_users); @@ -469,10 +469,8 @@ static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) { const struct fscrypt_provisioning_key_payload *payload = prep->data; - BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE < FSCRYPT_MAX_KEY_SIZE); - if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE || - prep->datalen > sizeof(*payload) + FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE) + prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE) return -EINVAL; if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && @@ -569,8 +567,6 @@ out_put: key_ref_put(ref); return err; } -/* Size of software "secret" derived from hardware-wrapped key */ -#define RAW_SECRET_SIZE 32 /* * Add a master encryption key to the filesystem, causing all files which were @@ -602,9 +598,6 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) struct fscrypt_add_key_arg __user *uarg = _uarg; struct fscrypt_add_key_arg arg; struct fscrypt_master_key_secret secret; - u8 _kdf_key[RAW_SECRET_SIZE]; - u8 *kdf_key; - unsigned int kdf_key_size; int err; if (copy_from_user(&arg, uarg, sizeof(arg))) @@ -616,9 +609,6 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; - BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE < - FSCRYPT_MAX_KEY_SIZE); - memset(&secret, 0, sizeof(secret)); if (arg.key_id) { @@ -627,20 +617,16 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret); if (err) goto out_wipe_secret; - err = -EINVAL; - if (!(arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) && - secret.size > FSCRYPT_MAX_KEY_SIZE) - goto out_wipe_secret; } else { if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE || - arg.raw_size > - ((arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ? - FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : FSCRYPT_MAX_KEY_SIZE)) + arg.raw_size > FSCRYPT_MAX_KEY_SIZE) return -EINVAL; + secret.size = arg.raw_size; err = -EFAULT; - if (copy_from_user(secret.raw, uarg->raw, secret.size)) + if (copy_from_user(secret.raw, uarg->raw, secret.size)) { goto out_wipe_secret; + } } switch (arg.key_spec.type) { @@ -653,37 +639,18 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) err = -EACCES; if (!capable(CAP_SYS_ADMIN)) goto out_wipe_secret; - - err = -EINVAL; - if (arg.__flags) - goto out_wipe_secret; break; case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: - err = -EINVAL; - if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) - goto out_wipe_secret; - if (arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) { - kdf_key = _kdf_key; - kdf_key_size = RAW_SECRET_SIZE; - err = fscrypt_derive_raw_secret(sb, secret.raw, - secret.size, - kdf_key, kdf_key_size); - if (err) - goto out_wipe_secret; - secret.is_hw_wrapped = true; - } else { - kdf_key = secret.raw; - kdf_key_size = secret.size; - } - err = fscrypt_init_hkdf(&secret.hkdf, kdf_key, kdf_key_size); - /* - * Now that the HKDF context is initialized, the raw HKDF - * key is no longer needed. - */ - memzero_explicit(kdf_key, kdf_key_size); + err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size); if (err) goto out_wipe_secret; + /* + * Now that the HKDF context is initialized, the raw key is no + * longer needed. + */ + memzero_explicit(secret.raw, secret.size); + /* Calculate the key identifier and return it to userspace. */ err = fscrypt_hkdf_expand(&secret.hkdf, HKDF_CONTEXT_KEY_IDENTIFIER, diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index b51fc41395e0..0380ae882441 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -13,13 +13,12 @@ #include "fscrypt_private.h" -struct fscrypt_mode fscrypt_modes[] = { +static struct fscrypt_mode available_modes[] = { [FSCRYPT_MODE_AES_256_XTS] = { .friendly_name = "AES-256-XTS", .cipher_str = "xts(aes)", .keysize = 64, .ivsize = 16, - .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS, }, [FSCRYPT_MODE_AES_256_CTS] = { .friendly_name = "AES-256-CTS-CBC", @@ -32,7 +31,6 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "essiv(cbc(aes),sha256)", .keysize = 16, .ivsize = 16, - .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, }, [FSCRYPT_MODE_AES_128_CTS] = { .friendly_name = "AES-128-CTS-CBC", @@ -45,7 +43,6 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "adiantum(xchacha12,aes)", .keysize = 32, .ivsize = 32, - .blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM, }, [FSCRYPT_MODE_PRIVATE] = { .friendly_name = "ICE", @@ -59,10 +56,10 @@ select_encryption_mode(const union fscrypt_policy *policy, const struct inode *inode) { if (S_ISREG(inode->i_mode)) - return &fscrypt_modes[fscrypt_policy_contents_mode(policy)]; + return &available_modes[fscrypt_policy_contents_mode(policy)]; if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - return &fscrypt_modes[fscrypt_policy_fnames_mode(policy)]; + return &available_modes[fscrypt_policy_fnames_mode(policy)]; WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n", inode->i_ino, (inode->i_mode & S_IFMT)); @@ -70,9 +67,9 @@ select_encryption_mode(const union fscrypt_policy *policy, } /* Create a symmetric cipher object for the given encryption mode and key */ -static struct crypto_skcipher * -fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, - const struct inode *inode) +struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode, + const u8 *raw_key, + const struct inode *inode) { struct crypto_skcipher *tfm; int err; @@ -112,61 +109,30 @@ err_free_tfm: return ERR_PTR(err); } -/* - * Prepare the crypto transform object or blk-crypto key in @prep_key, given the - * raw key, encryption mode, and flag indicating which encryption implementation - * (fs-layer or blk-crypto) will be used. - */ -int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, - const u8 *raw_key, unsigned int raw_key_size, - const struct fscrypt_info *ci) -{ - struct crypto_skcipher *tfm; - - if (fscrypt_using_inline_encryption(ci)) - return fscrypt_prepare_inline_crypt_key(prep_key, - raw_key, raw_key_size, ci); - - if (WARN_ON(raw_key_size != ci->ci_mode->keysize)) - return -EINVAL; - - tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - /* - * Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters - * for the per-mode keys, which are shared by multiple inodes.) - */ - smp_store_release(&prep_key->tfm, tfm); - return 0; -} - -/* Destroy a crypto transform object and/or blk-crypto key. */ -void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key) -{ - crypto_free_skcipher(prep_key->tfm); - fscrypt_destroy_inline_crypt_key(prep_key); -} - /* Given the per-file key, set up the file's crypto transform object */ int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key) { + struct crypto_skcipher *tfm; + + tfm = fscrypt_allocate_skcipher(ci->ci_mode, derived_key, ci->ci_inode); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + ci->ci_ctfm = tfm; ci->ci_owns_key = true; - return fscrypt_prepare_key(&ci->ci_key, derived_key, - ci->ci_mode->keysize, ci); + return 0; } static int setup_per_mode_key(struct fscrypt_info *ci, struct fscrypt_master_key *mk, - struct fscrypt_prepared_key *keys, + struct crypto_skcipher **tfms, u8 hkdf_context, bool include_fs_uuid) { - static DEFINE_MUTEX(mode_key_setup_mutex); const struct inode *inode = ci->ci_inode; const struct super_block *sb = inode->i_sb; struct fscrypt_mode *mode = ci->ci_mode; - const u8 mode_num = mode - fscrypt_modes; - struct fscrypt_prepared_key *prep_key; + u8 mode_num = mode - available_modes; + struct crypto_skcipher *tfm, *prev_tfm; u8 mode_key[FSCRYPT_MAX_KEY_SIZE]; u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)]; unsigned int hkdf_infolen = 0; @@ -175,65 +141,39 @@ static int setup_per_mode_key(struct fscrypt_info *ci, if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX)) return -EINVAL; - prep_key = &keys[mode_num]; - if (fscrypt_is_key_prepared(prep_key, ci)) { - ci->ci_key = *prep_key; - return 0; + /* pairs with cmpxchg() below */ + tfm = READ_ONCE(tfms[mode_num]); + if (likely(tfm != NULL)) + goto done; + + BUILD_BUG_ON(sizeof(mode_num) != 1); + BUILD_BUG_ON(sizeof(sb->s_uuid) != 16); + BUILD_BUG_ON(sizeof(hkdf_info) != 17); + hkdf_info[hkdf_infolen++] = mode_num; + if (include_fs_uuid) { + memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid, + sizeof(sb->s_uuid)); + hkdf_infolen += sizeof(sb->s_uuid); } + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, + hkdf_context, hkdf_info, hkdf_infolen, + mode_key, mode->keysize); + if (err) + return err; + tfm = fscrypt_allocate_skcipher(mode, mode_key, inode); + memzero_explicit(mode_key, mode->keysize); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); - mutex_lock(&mode_key_setup_mutex); - - if (fscrypt_is_key_prepared(prep_key, ci)) - goto done_unlock; - - if (mk->mk_secret.is_hw_wrapped && S_ISREG(inode->i_mode)) { - int i; - - if (!fscrypt_using_inline_encryption(ci)) { - fscrypt_warn(ci->ci_inode, - "Hardware-wrapped keys require inline encryption (-o inlinecrypt)"); - err = -EINVAL; - goto out_unlock; - } - for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { - if (fscrypt_is_key_prepared(&keys[i], ci)) { - fscrypt_warn(ci->ci_inode, - "Each hardware-wrapped key can only be used with one encryption mode"); - err = -EINVAL; - goto out_unlock; - } - } - err = fscrypt_prepare_key(prep_key, mk->mk_secret.raw, - mk->mk_secret.size, ci); - if (err) - goto out_unlock; - } else { - BUILD_BUG_ON(sizeof(mode_num) != 1); - BUILD_BUG_ON(sizeof(sb->s_uuid) != 16); - BUILD_BUG_ON(sizeof(hkdf_info) != 17); - hkdf_info[hkdf_infolen++] = mode_num; - if (include_fs_uuid) { - memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid, - sizeof(sb->s_uuid)); - hkdf_infolen += sizeof(sb->s_uuid); - } - err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, - hkdf_context, hkdf_info, hkdf_infolen, - mode_key, mode->keysize); - if (err) - goto out_unlock; - err = fscrypt_prepare_key(prep_key, mode_key, mode->keysize, - ci); - memzero_explicit(mode_key, mode->keysize); - if (err) - goto out_unlock; + /* pairs with READ_ONCE() above */ + prev_tfm = cmpxchg(&tfms[mode_num], NULL, tfm); + if (prev_tfm != NULL) { + crypto_free_skcipher(tfm); + tfm = prev_tfm; } -done_unlock: - ci->ci_key = *prep_key; - err = 0; -out_unlock: - mutex_unlock(&mode_key_setup_mutex); - return err; +done: + ci->ci_ctfm = tfm; + return 0; } static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, @@ -242,13 +182,6 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; int err; - if (mk->mk_secret.is_hw_wrapped && - !(ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)) { - fscrypt_warn(ci->ci_inode, - "Hardware-wrapped keys are only supported with IV_INO_LBLK_64 policies"); - return -EINVAL; - } - if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { /* * DIRECT_KEY: instead of deriving per-file keys, the per-file @@ -264,7 +197,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, ci->ci_mode->friendly_name); return -EINVAL; } - return setup_per_mode_key(ci, mk, mk->mk_direct_keys, + return setup_per_mode_key(ci, mk, mk->mk_direct_tfms, HKDF_CONTEXT_DIRECT_KEY, false); } else if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { @@ -274,7 +207,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, * the IVs. This format is optimized for use with inline * encryption hardware compliant with the UFS or eMMC standards. */ - return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_keys, + return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms, HKDF_CONTEXT_IV_INO_LBLK_64_KEY, true); } @@ -309,8 +242,6 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, struct fscrypt_key_specifier mk_spec; int err; - fscrypt_select_encryption_impl(ci); - switch (ci->ci_policy.version) { case FSCRYPT_POLICY_V1: mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; @@ -403,7 +334,7 @@ static void put_crypt_info(struct fscrypt_info *ci) if (ci->ci_direct_key) fscrypt_put_direct_key(ci->ci_direct_key); else if (ci->ci_owns_key) - fscrypt_destroy_prepared_key(&ci->ci_key); + crypto_free_skcipher(ci->ci_ctfm); key = ci->ci_master_key; if (key) { diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 47591c54dc3d..454fb03fc30e 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -146,7 +146,7 @@ struct fscrypt_direct_key { struct hlist_node dk_node; refcount_t dk_refcount; const struct fscrypt_mode *dk_mode; - struct fscrypt_prepared_key dk_key; + struct crypto_skcipher *dk_ctfm; u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; u8 dk_raw[FSCRYPT_MAX_KEY_SIZE]; }; @@ -154,7 +154,7 @@ struct fscrypt_direct_key { static void free_direct_key(struct fscrypt_direct_key *dk) { if (dk) { - fscrypt_destroy_prepared_key(&dk->dk_key); + crypto_free_skcipher(dk->dk_ctfm); kzfree(dk); } } @@ -199,8 +199,6 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert, continue; if (ci->ci_mode != dk->dk_mode) continue; - if (!fscrypt_is_key_prepared(&dk->dk_key, ci)) - continue; if (crypto_memneq(raw_key, dk->dk_raw, ci->ci_mode->keysize)) continue; /* using existing tfm with same (descriptor, mode, raw_key) */ @@ -233,10 +231,13 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key) return ERR_PTR(-ENOMEM); refcount_set(&dk->dk_refcount, 1); dk->dk_mode = ci->ci_mode; - err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci->ci_mode->keysize, - ci); - if (err) + dk->dk_ctfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, + ci->ci_inode); + if (IS_ERR(dk->dk_ctfm)) { + err = PTR_ERR(dk->dk_ctfm); + dk->dk_ctfm = NULL; goto err_free_dk; + } memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize); @@ -273,7 +274,7 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci, if (IS_ERR(dk)) return PTR_ERR(dk); ci->ci_direct_key = dk; - ci->ci_key = dk->dk_key; + ci->ci_ctfm = dk->dk_ctfm; return 0; } diff --git a/fs/direct-io.c b/fs/direct-io.c index 729c59213d2e..30bf22c989de 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -432,7 +431,6 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, sector_t first_sector, int nr_vecs) { struct bio *bio; - struct inode *inode = dio->inode; /* * bio_alloc() is guaranteed to return a bio when called with @@ -440,9 +438,6 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, */ bio = bio_alloc(GFP_KERNEL, nr_vecs); - fscrypt_set_bio_crypt_ctx(bio, inode, - sdio->cur_page_fs_offset >> inode->i_blkbits, - GFP_KERNEL); bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = first_sector; bio_set_op_attrs(bio, dio->op, dio->op_flags); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index a2d6e8f0eb97..6c129067c07e 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1155,7 +1155,6 @@ struct ext4_inode_info { #define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ -#define EXT4_MOUNT_INLINECRYPT 0x4000000 /* Inline encryption support */ #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 096e4cc053dc..c134c701a034 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1236,7 +1236,8 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, (block_start < from || block_end > to)) { ll_rw_block(REQ_OP_READ, 0, 1, &bh); *wait_bh++ = bh; - decrypt = fscrypt_inode_uses_fs_layer_crypto(inode); + decrypt = IS_ENCRYPTED(inode) && + S_ISREG(inode->i_mode); } } /* @@ -3855,12 +3856,10 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ssize_t ret; int rw = iov_iter_rw(iter); - if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode)) { - if (!fscrypt_inode_uses_inline_crypto(inode) || - !IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), - i_blocksize(inode))) - return 0; - } +#ifdef CONFIG_FS_ENCRYPTION + if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) + return 0; +#endif if (fsverity_active(inode)) return 0; @@ -4068,7 +4067,8 @@ static int __ext4_block_zero_page_range(handle_t *handle, /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; - if (fscrypt_inode_uses_fs_layer_crypto(inode)) { + if (S_ISREG(inode->i_mode) && + IS_ENCRYPTED(inode)) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); BUG_ON(blocksize != PAGE_SIZE); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 6481742d666d..a6ec98d494b8 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -366,7 +366,6 @@ static int io_submit_init_bio(struct ext4_io_submit *io, bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); if (!bio) return -ENOMEM; - fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); wbc_init_bio(io->io_wbc, bio); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio_set_dev(bio, bh->b_bdev); @@ -384,8 +383,7 @@ static int io_submit_add_bh(struct ext4_io_submit *io, { int ret; - if (io->io_bio && (bh->b_blocknr != io->io_next_block || - !fscrypt_mergeable_bio_bh(io->io_bio, bh))) { + if (io->io_bio && bh->b_blocknr != io->io_next_block) { submit_and_retry: ext4_io_submit(io); } @@ -471,7 +469,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, bh = head = page_buffers(page); - if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) { + if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) { gfp_t gfp_flags = GFP_NOFS; /* diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index e4f684a71002..aefcd712df85 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -198,7 +198,7 @@ static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode, unsigned int post_read_steps = 0; struct bio_post_read_ctx *ctx = NULL; - if (fscrypt_inode_uses_fs_layer_crypto(inode)) + if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) post_read_steps |= 1 << STEP_DECRYPT; if (ext4_need_verity(inode, first_idx)) @@ -259,7 +259,6 @@ int ext4_mpage_readpages(struct address_space *mapping, const unsigned blkbits = inode->i_blkbits; const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; - sector_t next_block; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; @@ -291,8 +290,7 @@ int ext4_mpage_readpages(struct address_space *mapping, if (page_has_buffers(page)) goto confused; - block_in_file = next_block = - (sector_t)page->index << (PAGE_SHIFT - blkbits); + block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (ext4_readpage_limit(inode) + blocksize - 1) >> blkbits; @@ -392,8 +390,7 @@ int ext4_mpage_readpages(struct address_space *mapping, * This page will go to BIO. Do we need to send this * BIO off first? */ - if (bio && (last_block_in_bio != blocks[0] - 1 || - !fscrypt_mergeable_bio(bio, inode, next_block))) { + if (bio && (last_block_in_bio != blocks[0] - 1)) { submit_and_realloc: ext4_submit_bio_read(bio); bio = NULL; @@ -405,8 +402,6 @@ int ext4_mpage_readpages(struct address_space *mapping, min_t(int, nr_pages, BIO_MAX_PAGES)); if (!bio) goto set_error_page; - fscrypt_set_bio_crypt_ctx(bio, inode, next_block, - GFP_KERNEL); ctx = get_bio_post_read_ctx(inode, bio, page->index); if (IS_ERR(ctx)) { bio_put(bio); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 8f6ee92e51db..c0e405bdab46 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1300,11 +1300,6 @@ static void ext4_get_ino_and_lblk_bits(struct super_block *sb, *lblk_bits_ret = 8 * sizeof(ext4_lblk_t); } -static bool ext4_inline_crypt_enabled(struct super_block *sb) -{ - return test_opt(sb, INLINECRYPT); -} - static const struct fscrypt_operations ext4_cryptops = { .key_prefix = "ext4:", .get_context = ext4_get_context, @@ -1314,7 +1309,6 @@ static const struct fscrypt_operations ext4_cryptops = { .max_namelen = EXT4_NAME_LEN, .has_stable_inodes = ext4_has_stable_inodes, .get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits, - .inline_crypt_enabled = ext4_inline_crypt_enabled, }; #endif @@ -1410,7 +1404,6 @@ enum { Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, - Opt_inlinecrypt, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, @@ -1504,7 +1497,6 @@ static const match_table_t tokens = { {Opt_noinit_itable, "noinit_itable"}, {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, - {Opt_inlinecrypt, "inlinecrypt"}, {Opt_nombcache, "nombcache"}, {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */ {Opt_removed, "check=none"}, /* mount option from ext2/3 */ @@ -1714,11 +1706,6 @@ static const struct mount_opts { {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT}, {Opt_max_dir_size_kb, 0, MOPT_GTE0}, {Opt_test_dummy_encryption, 0, MOPT_GTE0}, -#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT - {Opt_inlinecrypt, EXT4_MOUNT_INLINECRYPT, MOPT_SET}, -#else - {Opt_inlinecrypt, EXT4_MOUNT_INLINECRYPT, MOPT_NOSUPPORT}, -#endif {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, {Opt_err, 0, 0} }; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 19abef392da3..2d4c259624b3 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -467,37 +467,6 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) return bio; } -static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, - pgoff_t first_idx, - const struct f2fs_io_info *fio, - gfp_t gfp_mask) -{ - /* - * The f2fs garbage collector sets ->encrypted_page when it wants to - * read/write raw data without encryption. - */ - if (!fio || !fio->encrypted_page) - fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask); - else if (fscrypt_inode_should_skip_dm_default_key(inode)) - bio_set_skip_dm_default_key(bio); -} - -static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode, - pgoff_t next_idx, - const struct f2fs_io_info *fio) -{ - /* - * The f2fs garbage collector sets ->encrypted_page when it wants to - * read/write raw data without encryption. - */ - if (fio && fio->encrypted_page) - return !bio_has_crypt_ctx(bio) && - (bio_should_skip_dm_default_key(bio) == - fscrypt_inode_should_skip_dm_default_key(inode)); - - return fscrypt_mergeable_bio(bio, inode, next_idx); -} - static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio, enum page_type type) { @@ -743,9 +712,6 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) /* Allocate a new bio */ bio = __bio_alloc(fio, 1); - f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, - fio->page->index, fio, GFP_NOIO); - if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); return -EFAULT; @@ -929,6 +895,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) struct bio *bio = *fio->bio; struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; + struct inode *inode; if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) @@ -937,17 +904,14 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) trace_f2fs_submit_page_bio(page, fio); f2fs_trace_ios(fio, 0); - if (bio && (!page_is_mergeable(fio->sbi, bio, *fio->last_block, - fio->new_blkaddr) || - !f2fs_crypt_mergeable_bio(bio, fio->page->mapping->host, - fio->page->index, fio))) + inode = fio->page->mapping->host; + + if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, + fio->new_blkaddr)) f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); alloc_new: if (!bio) { bio = __bio_alloc(fio, BIO_MAX_PAGES); - f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, - fio->page->index, fio, - GFP_NOIO); bio_set_op_attrs(bio, fio->op, fio->op_flags); add_bio_entry(fio->sbi, bio, page, fio->temp); } else { @@ -1003,11 +967,8 @@ next: inc_page_count(sbi, WB_DATA_TYPE(bio_page)); - if (io->bio && - (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, - fio->new_blkaddr) || - !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host, - fio->page->index, fio))) + if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio, + io->last_block_in_bio, fio->new_blkaddr)) __submit_merged_bio(io); alloc_new: if (io->bio == NULL) { @@ -1019,9 +980,6 @@ alloc_new: goto skip; } io->bio = __bio_alloc(fio, BIO_MAX_PAGES); - f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host, - fio->page->index, fio, - GFP_NOIO); io->fio = *fio; } @@ -1066,14 +1024,11 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, for_write); if (!bio) return ERR_PTR(-ENOMEM); - - f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS); - f2fs_target_device(sbi, blkaddr, bio); bio->bi_end_io = f2fs_read_end_io; bio_set_op_attrs(bio, REQ_OP_READ, op_flag); - if (fscrypt_inode_uses_fs_layer_crypto(inode)) + if (f2fs_encrypted_file(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (f2fs_compressed_file(inode)) post_read_steps |= 1 << STEP_DECOMPRESS; @@ -2099,9 +2054,8 @@ zero_out: * This page will go to BIO. Do we need to send this * BIO off first? */ - if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio, - *last_block_in_bio, block_nr) || - !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) { + if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio, + *last_block_in_bio, block_nr)) { submit_and_realloc: __f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; @@ -2452,9 +2406,6 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio) /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); - if (fscrypt_inode_uses_inline_crypto(inode)) - return 0; - retry_encrypt: fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page, PAGE_SIZE, 0, gfp_flags); @@ -2628,7 +2579,7 @@ got_it: f2fs_unlock_op(fio->sbi); err = f2fs_inplace_write_data(fio); if (err) { - if (fscrypt_inode_uses_fs_layer_crypto(inode)) + if (f2fs_encrypted_file(inode)) fscrypt_finalize_bounce_page(&fio->encrypted_page); if (PageWriteback(page)) end_page_writeback(page); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 4a365cf7f068..1e2c9a59393a 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -139,9 +139,6 @@ struct f2fs_mount_info { int fs_mode; /* fs mode: LFS or ADAPTIVE */ int bggc_mode; /* bggc mode: off, on or sync */ bool test_dummy_encryption; /* test dummy encryption */ -#ifdef CONFIG_FS_ENCRYPTION - bool inlinecrypt; /* inline encryption enabled */ -#endif block_t unusable_cap; /* Amount of space allowed to be * unusable when disabling checkpoint */ @@ -4038,13 +4035,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int rw = iov_iter_rw(iter); - if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && f2fs_encrypted_file(inode)) { - if (!fscrypt_inode_uses_inline_crypto(inode) || - !IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), - F2FS_BLKSIZE)) - return true; - } - if (fsverity_active(inode)) + if (f2fs_encrypted_file(inode)) return true; if (f2fs_is_multi_device(sbi)) return true; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index de737389ba94..ecef87cee77d 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -137,7 +137,6 @@ enum { Opt_alloc, Opt_fsync, Opt_test_dummy_encryption, - Opt_inlinecrypt, Opt_checkpoint_disable, Opt_checkpoint_disable_cap, Opt_checkpoint_disable_cap_perc, @@ -203,7 +202,6 @@ static match_table_t f2fs_tokens = { {Opt_alloc, "alloc_mode=%s"}, {Opt_fsync, "fsync_mode=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, - {Opt_inlinecrypt, "inlinecrypt"}, {Opt_checkpoint_disable, "checkpoint=disable"}, {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"}, {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"}, @@ -789,13 +787,6 @@ static int parse_options(struct super_block *sb, char *options) f2fs_info(sbi, "Test dummy encryption mode enabled"); #else f2fs_info(sbi, "Test dummy encryption mount option ignored"); -#endif - break; - case Opt_inlinecrypt: -#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT - F2FS_OPTION(sbi).inlinecrypt = true; -#else - f2fs_info(sbi, "inline encryption not supported"); #endif break; case Opt_checkpoint_disable_cap_perc: @@ -1583,8 +1574,6 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) #ifdef CONFIG_FS_ENCRYPTION if (F2FS_OPTION(sbi).test_dummy_encryption) seq_puts(seq, ",test_dummy_encryption"); - if (F2FS_OPTION(sbi).inlinecrypt) - seq_puts(seq, ",inlinecrypt"); #endif if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) @@ -1615,9 +1604,6 @@ static void default_options(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; F2FS_OPTION(sbi).test_dummy_encryption = false; -#ifdef CONFIG_FS_ENCRYPTION - F2FS_OPTION(sbi).inlinecrypt = false; -#endif F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; @@ -2470,30 +2456,6 @@ static void f2fs_get_ino_and_lblk_bits(struct super_block *sb, *lblk_bits_ret = 8 * sizeof(block_t); } -static bool f2fs_inline_crypt_enabled(struct super_block *sb) -{ - return F2FS_OPTION(F2FS_SB(sb)).inlinecrypt; -} - -static int f2fs_get_num_devices(struct super_block *sb) -{ - struct f2fs_sb_info *sbi = F2FS_SB(sb); - - if (f2fs_is_multi_device(sbi)) - return sbi->s_ndevs; - return 1; -} - -static void f2fs_get_devices(struct super_block *sb, - struct request_queue **devs) -{ - struct f2fs_sb_info *sbi = F2FS_SB(sb); - int i; - - for (i = 0; i < sbi->s_ndevs; i++) - devs[i] = bdev_get_queue(FDEV(i).bdev); -} - static const struct fscrypt_operations f2fs_cryptops = { .key_prefix = "f2fs:", .get_context = f2fs_get_context, @@ -2503,9 +2465,6 @@ static const struct fscrypt_operations f2fs_cryptops = { .max_namelen = F2FS_NAME_LEN, .has_stable_inodes = f2fs_has_stable_inodes, .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits, - .inline_crypt_enabled = f2fs_inline_crypt_enabled, - .get_num_devices = f2fs_get_num_devices, - .get_devices = f2fs_get_devices, }; #endif diff --git a/fs/iomap.c b/fs/iomap.c index 1e573a59ea71..3f5b1655cfce 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -826,13 +825,10 @@ static blk_qc_t iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, unsigned len) { - struct inode *inode = file_inode(dio->iocb->ki_filp); struct page *page = ZERO_PAGE(0); struct bio *bio; bio = bio_alloc(GFP_KERNEL, 1); - fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, - GFP_KERNEL); bio_set_dev(bio, iomap->bdev); bio->bi_iter.bi_sector = iomap->blkno + ((pos - iomap->offset) >> 9); @@ -912,8 +908,6 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, return 0; bio = bio_alloc(GFP_KERNEL, nr_pages); - fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, - GFP_KERNEL); bio_set_dev(bio, iomap->bdev); bio->bi_iter.bi_sector = iomap->blkno + ((pos - iomap->offset) >> 9); diff --git a/include/linux/bio-crypt-ctx.h b/include/linux/bio-crypt-ctx.h deleted file mode 100644 index 12b46ece9c55..000000000000 --- a/include/linux/bio-crypt-ctx.h +++ /dev/null @@ -1,228 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright 2019 Google LLC - */ -#ifndef __LINUX_BIO_CRYPT_CTX_H -#define __LINUX_BIO_CRYPT_CTX_H - -#include - -enum blk_crypto_mode_num { - BLK_ENCRYPTION_MODE_INVALID, - BLK_ENCRYPTION_MODE_AES_256_XTS, - BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, - BLK_ENCRYPTION_MODE_ADIANTUM, - BLK_ENCRYPTION_MODE_MAX, -}; - -#ifdef CONFIG_BLOCK -#include - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - -#define BLK_CRYPTO_MAX_KEY_SIZE 64 -#define BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE 128 - -/** - * struct blk_crypto_key - an inline encryption key - * @crypto_mode: encryption algorithm this key is for - * @data_unit_size: the data unit size for all encryption/decryptions with this - * key. This is the size in bytes of each individual plaintext and - * ciphertext. This is always a power of 2. It might be e.g. the - * filesystem block size or the disk sector size. - * @data_unit_size_bits: log2 of data_unit_size - * @size: size of this key in bytes (determined by @crypto_mode) - * @hash: hash of this key, for keyslot manager use only - * @raw: the raw bytes of this key. Only the first @size bytes are used. - * - * A blk_crypto_key is immutable once created, and many bios can reference it at - * the same time. It must not be freed until all bios using it have completed. - */ -struct blk_crypto_key { - enum blk_crypto_mode_num crypto_mode; - unsigned int data_unit_size; - unsigned int data_unit_size_bits; - unsigned int size; - unsigned int hash; - u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; -}; - -#define BLK_CRYPTO_MAX_IV_SIZE 32 -#define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE/sizeof(u64)) - -/** - * struct bio_crypt_ctx - an inline encryption context - * @bc_key: the key, algorithm, and data unit size to use - * @bc_keyslot: the keyslot that has been assigned for this key in @bc_ksm, - * or -1 if no keyslot has been assigned yet. - * @bc_dun: the data unit number (starting IV) to use - * @bc_ksm: the keyslot manager into which the key has been programmed with - * @bc_keyslot, or NULL if this key hasn't yet been programmed. - * - * A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for - * write requests) or decrypted (for read requests) inline by the storage device - * or controller, or by the crypto API fallback. - */ -struct bio_crypt_ctx { - const struct blk_crypto_key *bc_key; - int bc_keyslot; - - /* Data unit number */ - u64 bc_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; - - /* - * The keyslot manager where the key has been programmed - * with keyslot. - */ - struct keyslot_manager *bc_ksm; -}; - -int bio_crypt_ctx_init(void); - -struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask); - -void bio_crypt_free_ctx(struct bio *bio); - -static inline bool bio_has_crypt_ctx(struct bio *bio) -{ - return bio->bi_crypt_context; -} - -void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); - -static inline void bio_crypt_set_ctx(struct bio *bio, - const struct blk_crypto_key *key, - u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], - gfp_t gfp_mask) -{ - struct bio_crypt_ctx *bc = bio_crypt_alloc_ctx(gfp_mask); - - bc->bc_key = key; - memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); - bc->bc_ksm = NULL; - bc->bc_keyslot = -1; - - bio->bi_crypt_context = bc; -} - -void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc); - -int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc, - struct keyslot_manager *ksm); - -struct request; -bool bio_crypt_should_process(struct request *rq); - -static inline bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, - unsigned int bytes, - u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) -{ - int i = 0; - unsigned int inc = bytes >> bc->bc_key->data_unit_size_bits; - - while (i < BLK_CRYPTO_DUN_ARRAY_SIZE) { - if (bc->bc_dun[i] + inc != next_dun[i]) - return false; - inc = ((bc->bc_dun[i] + inc) < inc); - i++; - } - - return true; -} - - -static inline void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], - unsigned int inc) -{ - int i = 0; - - while (inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE) { - dun[i] += inc; - inc = (dun[i] < inc); - i++; - } -} - -static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) -{ - struct bio_crypt_ctx *bc = bio->bi_crypt_context; - - if (!bc) - return; - - bio_crypt_dun_increment(bc->bc_dun, - bytes >> bc->bc_key->data_unit_size_bits); -} - -bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2); - -bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes, - struct bio *b_2); - -#else /* CONFIG_BLK_INLINE_ENCRYPTION */ -static inline int bio_crypt_ctx_init(void) -{ - return 0; -} - -static inline bool bio_has_crypt_ctx(struct bio *bio) -{ - return false; -} - -static inline void bio_crypt_clone(struct bio *dst, struct bio *src, - gfp_t gfp_mask) { } - -static inline void bio_crypt_free_ctx(struct bio *bio) { } - -static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) { } - -static inline bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2) -{ - return true; -} - -static inline bool bio_crypt_ctx_mergeable(struct bio *b_1, - unsigned int b1_bytes, - struct bio *b_2) -{ - return true; -} - -#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ - -#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY) -static inline void bio_set_skip_dm_default_key(struct bio *bio) -{ - bio->bi_skip_dm_default_key = true; -} - -static inline bool bio_should_skip_dm_default_key(const struct bio *bio) -{ - return bio->bi_skip_dm_default_key; -} - -static inline void bio_clone_skip_dm_default_key(struct bio *dst, - const struct bio *src) -{ - dst->bi_skip_dm_default_key = src->bi_skip_dm_default_key; -} -#else /* CONFIG_DM_DEFAULT_KEY */ -static inline void bio_set_skip_dm_default_key(struct bio *bio) -{ -} - -static inline bool bio_should_skip_dm_default_key(const struct bio *bio) -{ - return false; -} - -static inline void bio_clone_skip_dm_default_key(struct bio *dst, - const struct bio *src) -{ -} -#endif /* !CONFIG_DM_DEFAULT_KEY */ - -#endif /* CONFIG_BLOCK */ - -#endif /* __LINUX_BIO_CRYPT_CTX_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 2e08e3731376..e260f000b9ac 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -22,7 +22,6 @@ #include #include #include -#include #ifdef CONFIG_BLOCK diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h deleted file mode 100644 index 485cee0b92dd..000000000000 --- a/include/linux/blk-crypto.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright 2019 Google LLC - */ - -#ifndef __LINUX_BLK_CRYPTO_H -#define __LINUX_BLK_CRYPTO_H - -#include - -#define SECTOR_SHIFT 9 - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - -int blk_crypto_submit_bio(struct bio **bio_ptr); - -bool blk_crypto_endio(struct bio *bio); - -int blk_crypto_init_key(struct blk_crypto_key *blk_key, - const u8 *raw_key, unsigned int raw_key_size, - enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size); - -int blk_crypto_evict_key(struct request_queue *q, - const struct blk_crypto_key *key); - -#else /* CONFIG_BLK_INLINE_ENCRYPTION */ - -static inline int blk_crypto_submit_bio(struct bio **bio_ptr) -{ - return 0; -} - -static inline bool blk_crypto_endio(struct bio *bio) -{ - return true; -} - -#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK - -int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, - unsigned int data_unit_size, - struct request_queue *q); - -int blk_crypto_fallback_init(void); - -#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ - -static inline int -blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num, - unsigned int data_unit_size, - struct request_queue *q) -{ - return 0; -} - -static inline int blk_crypto_fallback_init(void) -{ - return 0; -} - -#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ - -#endif /* __LINUX_BLK_CRYPTO_H */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 41b2e8a10fdb..415811f0b24a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -17,7 +17,6 @@ struct block_device; struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); -struct bio_crypt_ctx; /* * Block error status values. See block/blk-core:blk_errors for the details. @@ -96,14 +95,6 @@ struct bio { struct blk_issue_stat bi_issue_stat; #endif #endif - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - struct bio_crypt_ctx *bi_crypt_context; -#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY) - bool bi_skip_dm_default_key; -#endif -#endif - union { #if defined(CONFIG_BLK_DEV_INTEGRITY) struct bio_integrity_payload *bi_integrity; /* data integrity */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 19c98d619b87..835a3cf3b47b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -43,7 +43,6 @@ struct pr_ops; struct rq_wb; struct blk_queue_stats; struct blk_stat_callback; -struct keyslot_manager; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -546,11 +545,6 @@ struct request_queue { */ unsigned int request_fn_active; -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - /* Inline crypto capabilities */ - struct keyslot_manager *ksm; -#endif - unsigned int rq_timeout; int poll_nsec; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 52834cd273b4..be0eb0118992 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -321,12 +321,6 @@ struct dm_target { * on max_io_len boundary. */ bool split_discard_bios:1; - - /* - * Set if inline crypto capabilities from this target's underlying - * device(s) can be exposed via the device-mapper device. - */ - bool may_passthrough_inline_crypto:1; }; /* Each target can link one of these into the table */ diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 3a2971075432..5977a6ced502 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -65,10 +65,6 @@ struct fscrypt_operations { bool (*has_stable_inodes)(struct super_block *sb); void (*get_ino_and_lblk_bits)(struct super_block *sb, int *ino_bits_ret, int *lblk_bits_ret); - bool (*inline_crypt_enabled)(struct super_block *sb); - int (*get_num_devices)(struct super_block *sb); - void (*get_devices)(struct super_block *sb, - struct request_queue **devs); }; static inline bool fscrypt_has_encryption_key(const struct inode *inode) @@ -537,74 +533,6 @@ static inline const char *fscrypt_get_symlink(struct inode *inode, } #endif /* !CONFIG_FS_ENCRYPTION */ -/* inline_crypt.c */ -#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT -extern bool fscrypt_inode_uses_inline_crypto(const struct inode *inode); - -extern bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode); - -extern void fscrypt_set_bio_crypt_ctx(struct bio *bio, - const struct inode *inode, - u64 first_lblk, gfp_t gfp_mask); - -extern void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, - const struct buffer_head *first_bh, - gfp_t gfp_mask); - -extern bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, - u64 next_lblk); - -extern bool fscrypt_mergeable_bio_bh(struct bio *bio, - const struct buffer_head *next_bh); - -#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ -static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) -{ - return false; -} - -static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) -{ - return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); -} - -static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio, - const struct inode *inode, - u64 first_lblk, gfp_t gfp_mask) { } - -static inline void fscrypt_set_bio_crypt_ctx_bh( - struct bio *bio, - const struct buffer_head *first_bh, - gfp_t gfp_mask) { } - -static inline bool fscrypt_mergeable_bio(struct bio *bio, - const struct inode *inode, - u64 next_lblk) -{ - return true; -} - -static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, - const struct buffer_head *next_bh) -{ - return true; -} -#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ - -#if IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_DM_DEFAULT_KEY) -static inline bool -fscrypt_inode_should_skip_dm_default_key(const struct inode *inode) -{ - return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); -} -#else -static inline bool -fscrypt_inode_should_skip_dm_default_key(const struct inode *inode) -{ - return false; -} -#endif - /** * fscrypt_require_key - require an inode's encryption key * @inode: the inode we need the key for diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h deleted file mode 100644 index 6d32a031218e..000000000000 --- a/include/linux/keyslot-manager.h +++ /dev/null @@ -1,84 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright 2019 Google LLC - */ - -#ifndef __LINUX_KEYSLOT_MANAGER_H -#define __LINUX_KEYSLOT_MANAGER_H - -#include - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - -struct keyslot_manager; - -/** - * struct keyslot_mgmt_ll_ops - functions to manage keyslots in hardware - * @keyslot_program: Program the specified key into the specified slot in the - * inline encryption hardware. - * @keyslot_evict: Evict key from the specified keyslot in the hardware. - * The key is provided so that e.g. dm layers can evict - * keys from the devices that they map over. - * Returns 0 on success, -errno otherwise. - * @derive_raw_secret: (Optional) Derive a software secret from a - * hardware-wrapped key. Returns 0 on success, -EOPNOTSUPP - * if unsupported on the hardware, or another -errno code. - * - * This structure should be provided by storage device drivers when they set up - * a keyslot manager - this structure holds the function ptrs that the keyslot - * manager will use to manipulate keyslots in the hardware. - */ -struct keyslot_mgmt_ll_ops { - int (*keyslot_program)(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot); - int (*keyslot_evict)(struct keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot); - int (*derive_raw_secret)(struct keyslot_manager *ksm, - const u8 *wrapped_key, - unsigned int wrapped_key_size, - u8 *secret, unsigned int secret_size); -}; - -struct keyslot_manager *keyslot_manager_create(unsigned int num_slots, - const struct keyslot_mgmt_ll_ops *ksm_ops, - const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], - void *ll_priv_data); - -int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm, - const struct blk_crypto_key *key); - -void keyslot_manager_get_slot(struct keyslot_manager *ksm, unsigned int slot); - -void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot); - -bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, - enum blk_crypto_mode_num crypto_mode, - unsigned int data_unit_size); - -int keyslot_manager_evict_key(struct keyslot_manager *ksm, - const struct blk_crypto_key *key); - -void keyslot_manager_reprogram_all_keys(struct keyslot_manager *ksm); - -void *keyslot_manager_private(struct keyslot_manager *ksm); - -void keyslot_manager_destroy(struct keyslot_manager *ksm); - -struct keyslot_manager *keyslot_manager_create_passthrough( - const struct keyslot_mgmt_ll_ops *ksm_ops, - const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], - void *ll_priv_data); - -void keyslot_manager_intersect_modes(struct keyslot_manager *parent, - const struct keyslot_manager *child); - -int keyslot_manager_derive_raw_secret(struct keyslot_manager *ksm, - const u8 *wrapped_key, - unsigned int wrapped_key_size, - u8 *secret, unsigned int secret_size); - -#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ - -#endif /* __LINUX_KEYSLOT_MANAGER_H */ diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 1b9cdb7a5c8f..393b5ae4e7d4 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -126,8 +126,6 @@ struct fscrypt_add_key_arg { __u32 raw_size; __u32 key_id; __u32 __reserved[7]; - /* N.B.: "temporary" flag, not reserved upstream */ -#define __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001 __u32 __flags; __u8 raw[]; }; From 265eb99b18255b16f926d78232c56296c38cb808 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 19:54:59 +0530 Subject: [PATCH 134/141] Revert "Remove Per File Key based hardware crypto framework" This reverts commit 7a42f09a94c614152495010bf18bc2d83ea3365a. Signed-off-by: UtsavBalar1231 --- .../configs/vendor/sdm429-bg-perf_defconfig | 5 + arch/arm/configs/vendor/sdm429-bg_defconfig | 5 + .../arm/configs/vendor/trinket-perf_defconfig | 5 + arch/arm/configs/vendor/trinket_defconfig | 5 + .../arm64/configs/vendor/atoll-perf_defconfig | 7 + arch/arm64/configs/vendor/atoll_defconfig | 7 + .../configs/vendor/gen3auto-capture_defconfig | 5 + .../configs/vendor/gen3auto-perf_defconfig | 5 + arch/arm64/configs/vendor/gen3auto_defconfig | 5 + .../configs/vendor/qcs403-perf_defconfig | 2 + arch/arm64/configs/vendor/qcs403_defconfig | 2 + .../configs/vendor/qcs405-perf_defconfig | 2 + arch/arm64/configs/vendor/qcs405_defconfig | 2 + .../vendor/qcs610-minimal-perf_defconfig | 6 + .../vendor/qti-quin-gvm-perf_defconfig | 5 + .../configs/vendor/qti-quin-gvm_defconfig | 5 + .../vendor/sa2150p-nand-perf_defconfig | 2 + .../configs/vendor/sa2150p-nand_defconfig | 2 + .../configs/vendor/sa2150p-perf_defconfig | 2 + arch/arm64/configs/vendor/sa2150p_defconfig | 2 + .../configs/vendor/sa8155-perf_defconfig | 5 + arch/arm64/configs/vendor/sa8155_defconfig | 5 + .../configs/vendor/sdm660-perf_defconfig | 7 + arch/arm64/configs/vendor/sdm660_defconfig | 7 + arch/arm64/configs/vendor/sdmshrike_defconfig | 5 + .../vendor/sdmsteppe-auto-perf_defconfig | 6 + .../configs/vendor/sdmsteppe-auto_defconfig | 6 + .../configs/vendor/sdmsteppe-perf_defconfig | 7 + arch/arm64/configs/vendor/sdmsteppe_defconfig | 7 + .../configs/vendor/sm8150-perf_defconfig | 7 + arch/arm64/configs/vendor/sm8150_defconfig | 7 + .../configs/vendor/trinket-perf_defconfig | 5 + arch/arm64/configs/vendor/trinket_defconfig | 5 + block/bio.c | 15 +- block/blk-core.c | 19 +- block/blk-merge.c | 27 +- block/elevator.c | 8 +- drivers/block/virtio_blk.c | 2 - drivers/crypto/Kconfig | 4 + drivers/crypto/msm/ice.c | 8 +- drivers/md/Kconfig | 18 + drivers/md/Makefile | 1 + drivers/md/dm-crypt.c | 17 +- drivers/md/dm-default-key.c | 306 ++++++ drivers/md/dm-table.c | 15 + drivers/misc/qseecom.c | 14 + drivers/mmc/core/queue.c | 4 + drivers/mmc/host/Kconfig | 11 + drivers/mmc/host/Makefile | 1 + drivers/mmc/host/cmdq_hci.c | 87 +- drivers/mmc/host/cmdq_hci.h | 12 +- drivers/mmc/host/sdhci-msm-ice.c | 587 +++++++++++ drivers/mmc/host/sdhci-msm-ice.h | 173 ++++ drivers/mmc/host/sdhci-msm.c | 157 ++- drivers/mmc/host/sdhci-msm.h | 11 +- drivers/mmc/host/sdhci.c | 130 +++ drivers/mmc/host/sdhci.h | 9 + drivers/scsi/scsi_lib.c | 2 + drivers/scsi/ufs/Kconfig | 13 + drivers/scsi/ufs/Makefile | 1 + drivers/scsi/ufs/ufs-qcom-ice.c | 777 ++++++++++++++ drivers/scsi/ufs/ufs-qcom-ice.h | 137 +++ drivers/scsi/ufs/ufs-qcom.c | 181 +++- drivers/scsi/ufs/ufs-qcom.h | 25 +- drivers/scsi/ufs/ufshcd.c | 81 +- drivers/scsi/ufs/ufshcd.h | 83 ++ fs/crypto/Makefile | 4 + fs/crypto/bio.c | 14 +- fs/crypto/fscrypt_ice.c | 190 ++++ fs/crypto/fscrypt_ice.h | 99 ++ fs/crypto/fscrypt_private.h | 9 +- fs/crypto/keysetup.c | 14 +- fs/crypto/keysetup_v1.c | 19 +- fs/direct-io.c | 41 + fs/ext4/Kconfig | 8 +- fs/ext4/ext4.h | 3 + fs/ext4/inode.c | 33 +- fs/ext4/move_extent.c | 11 +- fs/ext4/page-io.c | 37 +- fs/ext4/readpage.c | 3 +- fs/f2fs/data.c | 65 +- fs/f2fs/f2fs.h | 13 +- fs/namei.c | 10 + include/linux/bio.h | 8 + include/linux/blk_types.h | 19 +- include/linux/blkdev.h | 9 + include/linux/bvec.h | 3 + include/linux/fs.h | 2 + include/linux/fscrypt.h | 31 + include/linux/lsm_hooks.h | 3 + include/linux/mmc/core.h | 3 +- include/linux/pfk.h | 79 ++ include/linux/security.h | 10 + include/scsi/scsi_host.h | 3 + security/Kconfig | 4 + security/Makefile | 2 + security/pfe/Kconfig | 50 + security/pfe/Makefile | 15 + security/pfe/pfk.c | 570 +++++++++++ security/pfe/pfk_ext4.c | 212 ++++ security/pfe/pfk_ext4.h | 37 + security/pfe/pfk_f2fs.c | 200 ++++ security/pfe/pfk_f2fs.h | 37 + security/pfe/pfk_ice.c | 216 ++++ security/pfe/pfk_ice.h | 34 + security/pfe/pfk_internal.h | 34 + security/pfe/pfk_kc.c | 951 ++++++++++++++++++ security/pfe/pfk_kc.h | 34 + security/security.c | 8 + security/selinux/include/objsec.h | 7 +- 110 files changed, 6142 insertions(+), 108 deletions(-) create mode 100644 drivers/md/dm-default-key.c create mode 100644 drivers/mmc/host/sdhci-msm-ice.c create mode 100644 drivers/mmc/host/sdhci-msm-ice.h create mode 100644 drivers/scsi/ufs/ufs-qcom-ice.c create mode 100644 drivers/scsi/ufs/ufs-qcom-ice.h create mode 100644 fs/crypto/fscrypt_ice.c create mode 100644 fs/crypto/fscrypt_ice.h create mode 100644 include/linux/pfk.h create mode 100644 security/pfe/Kconfig create mode 100644 security/pfe/Makefile create mode 100644 security/pfe/pfk.c create mode 100644 security/pfe/pfk_ext4.c create mode 100644 security/pfe/pfk_ext4.h create mode 100644 security/pfe/pfk_f2fs.c create mode 100644 security/pfe/pfk_f2fs.h create mode 100644 security/pfe/pfk_ice.c create mode 100644 security/pfe/pfk_ice.h create mode 100644 security/pfe/pfk_internal.h create mode 100644 security/pfe/pfk_kc.c create mode 100644 security/pfe/pfk_kc.h diff --git a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig index 7157e39b767d..0177cd8bf01f 100644 --- a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig @@ -266,9 +266,11 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -492,6 +494,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_RTC_CLASS=y @@ -650,6 +653,7 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -663,3 +667,4 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y diff --git a/arch/arm/configs/vendor/sdm429-bg_defconfig b/arch/arm/configs/vendor/sdm429-bg_defconfig index c7a200f921aa..978012ae819b 100644 --- a/arch/arm/configs/vendor/sdm429-bg_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg_defconfig @@ -272,10 +272,12 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -507,6 +509,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_EDAC=y @@ -726,6 +729,7 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -740,5 +744,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_CRC8=y CONFIG_XZ_DEC=y diff --git a/arch/arm/configs/vendor/trinket-perf_defconfig b/arch/arm/configs/vendor/trinket-perf_defconfig index d50f0e7098f1..bb27b3b2c7f9 100644 --- a/arch/arm/configs/vendor/trinket-perf_defconfig +++ b/arch/arm/configs/vendor/trinket-perf_defconfig @@ -265,9 +265,11 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -489,6 +491,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_HAPTICS=y @@ -649,6 +652,7 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -662,3 +666,4 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y diff --git a/arch/arm/configs/vendor/trinket_defconfig b/arch/arm/configs/vendor/trinket_defconfig index c941cc9f033c..815167c2471b 100644 --- a/arch/arm/configs/vendor/trinket_defconfig +++ b/arch/arm/configs/vendor/trinket_defconfig @@ -273,10 +273,12 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -503,6 +505,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_HAPTICS=y @@ -729,6 +732,7 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -743,4 +747,5 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_XZ_DEC=y diff --git a/arch/arm64/configs/vendor/atoll-perf_defconfig b/arch/arm64/configs/vendor/atoll-perf_defconfig index 8703fffe9ba7..2b5babeda540 100644 --- a/arch/arm64/configs/vendor/atoll-perf_defconfig +++ b/arch/arm64/configs/vendor/atoll-perf_defconfig @@ -280,9 +280,11 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -507,6 +509,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -674,6 +677,8 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -715,6 +720,7 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -729,6 +735,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/atoll_defconfig b/arch/arm64/configs/vendor/atoll_defconfig index 35b0bb68edfe..38e428ac723f 100644 --- a/arch/arm64/configs/vendor/atoll_defconfig +++ b/arch/arm64/configs/vendor/atoll_defconfig @@ -290,10 +290,12 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -522,6 +524,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -704,6 +707,8 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -800,6 +805,7 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -815,6 +821,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/gen3auto-capture_defconfig b/arch/arm64/configs/vendor/gen3auto-capture_defconfig index db99f7087d9e..e16a32cd060b 100644 --- a/arch/arm64/configs/vendor/gen3auto-capture_defconfig +++ b/arch/arm64/configs/vendor/gen3auto-capture_defconfig @@ -289,6 +289,7 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -646,6 +647,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -719,6 +722,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_MEMTEST=y CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -734,6 +738,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/gen3auto-perf_defconfig b/arch/arm64/configs/vendor/gen3auto-perf_defconfig index 186548b8bb69..21275ae593c8 100644 --- a/arch/arm64/configs/vendor/gen3auto-perf_defconfig +++ b/arch/arm64/configs/vendor/gen3auto-perf_defconfig @@ -280,6 +280,7 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_UEVENT=y @@ -627,6 +628,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -649,6 +652,7 @@ CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_IPC_LOGGING=y CONFIG_DEBUG_ALIGN_RODATA=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -663,6 +667,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/gen3auto_defconfig b/arch/arm64/configs/vendor/gen3auto_defconfig index 80c473bb6fdd..7222359693cb 100644 --- a/arch/arm64/configs/vendor/gen3auto_defconfig +++ b/arch/arm64/configs/vendor/gen3auto_defconfig @@ -291,6 +291,7 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -659,6 +660,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -749,6 +752,7 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -764,6 +768,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/qcs403-perf_defconfig b/arch/arm64/configs/vendor/qcs403-perf_defconfig index c30b99e11678..bf405ee3c748 100644 --- a/arch/arm64/configs/vendor/qcs403-perf_defconfig +++ b/arch/arm64/configs/vendor/qcs403-perf_defconfig @@ -408,6 +408,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -549,4 +550,5 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/qcs403_defconfig b/arch/arm64/configs/vendor/qcs403_defconfig index 4d1ebdb00b84..5cc12640652c 100644 --- a/arch/arm64/configs/vendor/qcs403_defconfig +++ b/arch/arm64/configs/vendor/qcs403_defconfig @@ -422,6 +422,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -599,3 +600,4 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y diff --git a/arch/arm64/configs/vendor/qcs405-perf_defconfig b/arch/arm64/configs/vendor/qcs405-perf_defconfig index 433dcaf105f1..923e9c719145 100644 --- a/arch/arm64/configs/vendor/qcs405-perf_defconfig +++ b/arch/arm64/configs/vendor/qcs405-perf_defconfig @@ -408,6 +408,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -550,4 +551,5 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/qcs405_defconfig b/arch/arm64/configs/vendor/qcs405_defconfig index 0dac911cb584..3814ea6bbd5f 100644 --- a/arch/arm64/configs/vendor/qcs405_defconfig +++ b/arch/arm64/configs/vendor/qcs405_defconfig @@ -421,6 +421,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -599,3 +600,4 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y diff --git a/arch/arm64/configs/vendor/qcs610-minimal-perf_defconfig b/arch/arm64/configs/vendor/qcs610-minimal-perf_defconfig index ae508a225e02..159b155d5142 100644 --- a/arch/arm64/configs/vendor/qcs610-minimal-perf_defconfig +++ b/arch/arm64/configs/vendor/qcs610-minimal-perf_defconfig @@ -265,6 +265,7 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y @@ -425,6 +426,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -561,6 +563,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y @@ -595,6 +599,7 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y @@ -610,6 +615,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/qti-quin-gvm-perf_defconfig b/arch/arm64/configs/vendor/qti-quin-gvm-perf_defconfig index 4368321f9748..61f2563f8f77 100644 --- a/arch/arm64/configs/vendor/qti-quin-gvm-perf_defconfig +++ b/arch/arm64/configs/vendor/qti-quin-gvm-perf_defconfig @@ -473,6 +473,8 @@ CONFIG_ANDROID_BINDER_IPC=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -495,6 +497,9 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_PANIC_TIMEOUT=-1 CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set +CONFIG_PFK=y +CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y +CONFIG_PFK_VIRTUALIZED=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/configs/vendor/qti-quin-gvm_defconfig b/arch/arm64/configs/vendor/qti-quin-gvm_defconfig index fe39b2f6d43c..19a24df6f119 100644 --- a/arch/arm64/configs/vendor/qti-quin-gvm_defconfig +++ b/arch/arm64/configs/vendor/qti-quin-gvm_defconfig @@ -484,6 +484,8 @@ CONFIG_ANDROID_BINDER_IPC=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -539,6 +541,9 @@ CONFIG_ATOMIC64_SELFTEST=m CONFIG_TEST_USER_COPY=m CONFIG_MEMTEST=y CONFIG_PID_IN_CONTEXTIDR=y +CONFIG_PFK=y +CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y +CONFIG_PFK_VIRTUALIZED=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig b/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig index d78c6d646281..cd9423f0a52a 100644 --- a/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig +++ b/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig @@ -353,6 +353,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -494,4 +495,5 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/sa2150p-nand_defconfig b/arch/arm64/configs/vendor/sa2150p-nand_defconfig index 6194d3f50c3f..77c30567355a 100644 --- a/arch/arm64/configs/vendor/sa2150p-nand_defconfig +++ b/arch/arm64/configs/vendor/sa2150p-nand_defconfig @@ -354,6 +354,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -495,4 +496,5 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/sa2150p-perf_defconfig b/arch/arm64/configs/vendor/sa2150p-perf_defconfig index 3a0a2ed3cb33..e1e518583dcf 100644 --- a/arch/arm64/configs/vendor/sa2150p-perf_defconfig +++ b/arch/arm64/configs/vendor/sa2150p-perf_defconfig @@ -363,6 +363,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -503,4 +504,5 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/sa2150p_defconfig b/arch/arm64/configs/vendor/sa2150p_defconfig index 4a65bc770fc5..5a7550fc9fdf 100644 --- a/arch/arm64/configs/vendor/sa2150p_defconfig +++ b/arch/arm64/configs/vendor/sa2150p_defconfig @@ -364,6 +364,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -504,4 +505,5 @@ CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/sa8155-perf_defconfig b/arch/arm64/configs/vendor/sa8155-perf_defconfig index b3dc60f70e86..5032fae84d52 100644 --- a/arch/arm64/configs/vendor/sa8155-perf_defconfig +++ b/arch/arm64/configs/vendor/sa8155-perf_defconfig @@ -278,6 +278,7 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_UEVENT=y @@ -608,6 +609,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -630,6 +633,7 @@ CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_IPC_LOGGING=y CONFIG_DEBUG_ALIGN_RODATA=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -644,6 +648,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sa8155_defconfig b/arch/arm64/configs/vendor/sa8155_defconfig index 13d5861e2017..359ae8a3e879 100644 --- a/arch/arm64/configs/vendor/sa8155_defconfig +++ b/arch/arm64/configs/vendor/sa8155_defconfig @@ -290,6 +290,7 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -642,6 +643,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -728,6 +731,7 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -743,6 +747,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index 699621a225bb..072cd837bfb1 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -279,11 +279,13 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -506,6 +508,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QTI_TRI_LED=y @@ -634,6 +637,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -670,6 +675,7 @@ CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_HWEVENT=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -685,6 +691,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y CONFIG_CRYPTO_DEV_OTA_CRYPTO=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem" CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index 901b1c3b70a7..695a5619c0e3 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -287,12 +287,14 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -520,6 +522,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QTI_TRI_LED=y @@ -665,6 +668,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -759,6 +764,7 @@ CONFIG_CORESIGHT_HWEVENT=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -775,6 +781,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y CONFIG_CRYPTO_DEV_OTA_CRYPTO=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem" CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmshrike_defconfig b/arch/arm64/configs/vendor/sdmshrike_defconfig index 5325e4aeb38e..84bd6089822f 100644 --- a/arch/arm64/configs/vendor/sdmshrike_defconfig +++ b/arch/arm64/configs/vendor/sdmshrike_defconfig @@ -285,6 +285,7 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -635,6 +636,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -713,6 +716,7 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -726,6 +730,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig index 541777f5b2ad..151f629ca328 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig @@ -280,6 +280,7 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y @@ -490,6 +491,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -639,6 +641,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -665,6 +669,7 @@ CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_IPC_LOGGING=y CONFIG_DEBUG_ALIGN_RODATA=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -679,6 +684,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig b/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig index f806daee1e59..49733043c7b4 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig @@ -292,6 +292,7 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -512,6 +513,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -677,6 +679,8 @@ CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -775,6 +779,7 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -790,6 +795,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig index f06dced29a56..b36774b71729 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig @@ -274,9 +274,11 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -493,6 +495,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -652,6 +655,8 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -693,6 +698,7 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -707,6 +713,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmsteppe_defconfig b/arch/arm64/configs/vendor/sdmsteppe_defconfig index e23a67e5fa74..cdc6969fe1d4 100644 --- a/arch/arm64/configs/vendor/sdmsteppe_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe_defconfig @@ -284,10 +284,12 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -517,6 +519,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -689,6 +692,8 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -785,6 +790,7 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -800,6 +806,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sm8150-perf_defconfig b/arch/arm64/configs/vendor/sm8150-perf_defconfig index c5444dbd97af..6b8a37fc3af9 100644 --- a/arch/arm64/configs/vendor/sm8150-perf_defconfig +++ b/arch/arm64/configs/vendor/sm8150-perf_defconfig @@ -285,9 +285,11 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -661,6 +663,8 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -703,6 +707,8 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y +CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -717,6 +723,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sm8150_defconfig b/arch/arm64/configs/vendor/sm8150_defconfig index 53fd6b98411a..1f1866b3ebec 100644 --- a/arch/arm64/configs/vendor/sm8150_defconfig +++ b/arch/arm64/configs/vendor/sm8150_defconfig @@ -297,10 +297,12 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -691,6 +693,8 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_FS_ENCRYPTION=y @@ -787,6 +791,8 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y +CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -802,6 +808,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/trinket-perf_defconfig b/arch/arm64/configs/vendor/trinket-perf_defconfig index 67aea57b7d55..9248468b02eb 100644 --- a/arch/arm64/configs/vendor/trinket-perf_defconfig +++ b/arch/arm64/configs/vendor/trinket-perf_defconfig @@ -279,9 +279,11 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -508,6 +510,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_HAPTICS=y @@ -680,6 +683,7 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -694,6 +698,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/trinket_defconfig b/arch/arm64/configs/vendor/trinket_defconfig index 65fa34db016b..cf1f806d7c9b 100644 --- a/arch/arm64/configs/vendor/trinket_defconfig +++ b/arch/arm64/configs/vendor/trinket_defconfig @@ -289,10 +289,12 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y @@ -523,6 +525,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_HAPTICS=y @@ -765,6 +768,7 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -780,6 +784,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/block/bio.c b/block/bio.c index a3c4fd9ec478..ce70677b9b5e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -577,6 +577,18 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio) } EXPORT_SYMBOL(bio_phys_segments); +static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src) +{ +#ifdef CONFIG_PFK + dst->bi_iter.bi_dun = src->bi_iter.bi_dun; +#ifdef CONFIG_DM_DEFAULT_KEY + dst->bi_crypt_key = src->bi_crypt_key; + dst->bi_crypt_skip = src->bi_crypt_skip; +#endif + dst->bi_dio_inode = src->bi_dio_inode; +#endif +} + /** * __bio_clone_fast - clone a bio that shares the original bio's biovec * @bio: destination bio @@ -605,7 +617,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; - + bio_clone_crypt_key(bio, bio_src); bio_clone_blkcg_association(bio, bio_src); } EXPORT_SYMBOL(__bio_clone_fast); @@ -714,6 +726,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, } } + bio_clone_crypt_key(bio, bio_src); bio_clone_blkcg_association(bio, bio_src); return bio; diff --git a/block/blk-core.c b/block/blk-core.c index 52490014818f..7ce048c9861c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1462,6 +1462,9 @@ static struct request *blk_old_get_request(struct request_queue *q, /* q->queue_lock is unlocked at this point */ rq->__data_len = 0; rq->__sector = (sector_t) -1; +#ifdef CONFIG_PFK + rq->__dun = 0; +#endif rq->bio = rq->biotail = NULL; return rq; } @@ -1685,6 +1688,9 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, bio->bi_next = req->bio; req->bio = bio; +#ifdef CONFIG_PFK + req->__dun = bio->bi_iter.bi_dun; +#endif req->__sector = bio->bi_iter.bi_sector; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); @@ -1834,6 +1840,9 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio) else req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); req->write_hint = bio->bi_write_hint; +#ifdef CONFIG_PFK + req->__dun = bio->bi_iter.bi_dun; +#endif blk_rq_bio_prep(req->q, req, bio); } EXPORT_SYMBOL_GPL(blk_init_request_from_bio); @@ -2867,8 +2876,13 @@ bool blk_update_request(struct request *req, blk_status_t error, req->__data_len -= total_bytes; /* update sector only for requests with clear definition of sector */ - if (!blk_rq_is_passthrough(req)) + if (!blk_rq_is_passthrough(req)) { req->__sector += total_bytes >> 9; +#ifdef CONFIG_PFK + if (req->__dun) + req->__dun += total_bytes >> 12; +#endif + } /* mixed attributes always follow the first bio */ if (req->rq_flags & RQF_MIXED_MERGE) { @@ -3231,6 +3245,9 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); +#ifdef CONFIG_PFK + dst->__dun = blk_rq_dun(src); +#endif dst->__data_len = blk_rq_bytes(src); if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { dst->rq_flags |= RQF_SPECIAL_PAYLOAD; diff --git a/block/blk-merge.c b/block/blk-merge.c index de29a4054666..82dc0c1df283 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -9,7 +9,7 @@ #include #include - +#include #include "blk.h" static struct bio *blk_bio_discard_split(struct request_queue *q, @@ -509,6 +509,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, if (blk_integrity_rq(req) && integrity_req_gap_back_merge(req, bio)) return 0; + if (blk_try_merge(req, bio) != ELEVATOR_BACK_MERGE) + return 0; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req, blk_rq_pos(req))) { req_set_nomerge(q, req); @@ -531,6 +533,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, if (blk_integrity_rq(req) && integrity_req_gap_front_merge(req, bio)) return 0; + if (blk_try_merge(req, bio) != ELEVATOR_FRONT_MERGE) + return 0; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { req_set_nomerge(q, req); @@ -664,6 +668,11 @@ static void blk_account_io_merge(struct request *req) } } +static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt) +{ + return (!pfk_allow_merge_bio(bio, nxt)); +} + /* * For non-mq, this has to be called with the request spinlock acquired. * For mq with scheduling, the appropriate queue wide lock should be held. @@ -702,6 +711,9 @@ static struct request *attempt_merge(struct request_queue *q, if (req->write_hint != next->write_hint) return NULL; + if (crypto_not_mergeable(req->bio, next->bio)) + return 0; + /* * If we are allowed to merge, then append bio list * from next to rq and release next. merge_requests_fn @@ -839,11 +851,18 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) { if (req_op(rq) == REQ_OP_DISCARD && - queue_max_discard_segments(rq->q) > 1) + queue_max_discard_segments(rq->q) > 1) { return ELEVATOR_DISCARD_MERGE; - else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) + } else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == + bio->bi_iter.bi_sector) { + if (crypto_not_mergeable(rq->bio, bio)) + return ELEVATOR_NO_MERGE; return ELEVATOR_BACK_MERGE; - else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) + } else if (blk_rq_pos(rq) - bio_sectors(bio) == + bio->bi_iter.bi_sector) { + if (crypto_not_mergeable(bio, rq->bio)) + return ELEVATOR_NO_MERGE; return ELEVATOR_FRONT_MERGE; + } return ELEVATOR_NO_MERGE; } diff --git a/block/elevator.c b/block/elevator.c index 8320d97240be..2346c5b53b93 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -443,7 +443,7 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, { struct elevator_queue *e = q->elevator; struct request *__rq; - + enum elv_merge ret; /* * Levels of merges: * nomerges: No merges at all attempted @@ -456,9 +456,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, /* * First try one-hit cache. */ - if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { - enum elv_merge ret = blk_try_merge(q->last_merge, bio); + if (q->last_merge) { + if (!elv_bio_merge_ok(q->last_merge, bio)) + return ELEVATOR_NO_MERGE; + ret = blk_try_merge(q->last_merge, bio); if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; return ret; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 763308990dd8..2531bfb57fdc 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -15,9 +15,7 @@ #include #include #include -#ifdef CONFIG_PFK #include -#endif #include #define PART_BITS 4 diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 26e1103e49a6..113cd787ec7c 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -771,4 +771,8 @@ config CRYPTO_DEV_ARTPEC6 To compile this driver as a module, choose M here. +if ARCH_QCOM +source drivers/crypto/msm/Kconfig +endif + endif # CRYPTO_HW diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index 79301bbed969..fd34c0bc39f5 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1443,9 +1443,9 @@ static void qcom_ice_debug(struct platform_device *pdev) qcom_ice_dump_test_bus(ice_dev); pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n", - ice_dev->ice_instance_type, - (unsigned long long)ice_dev->ice_reset_start_time.tv64, - (unsigned long long)ice_dev->ice_reset_complete_time.tv64); + ice_dev->ice_instance_type, + (unsigned long long)ice_dev->ice_reset_start_time, + (unsigned long long)ice_dev->ice_reset_complete_time); if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time, ice_dev->ice_reset_start_time)) > 0) diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 747edadb39ae..c805d628d04d 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -286,6 +286,24 @@ config DM_CRYPT If unsure, say N. +config DM_DEFAULT_KEY + tristate "Default-key crypt target support" + depends on BLK_DEV_DM + depends on PFK + ---help--- + This (currently Android-specific) device-mapper target allows you to + create a device that assigns a default encryption key to bios that + don't already have one. This can sit between inline cryptographic + acceleration hardware and filesystems that use it. This ensures that + where the filesystem doesn't explicitly specify a key, such as for + filesystem metadata, a default key will be used instead, leaving no + sectors unencrypted. + + To compile this code as a module, choose M here: the module will be + called dm-default-key. + + If unsure, say N. + config DM_SNAPSHOT tristate "Snapshot target" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 27962abad668..1a03ebd1cee7 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o obj-$(CONFIG_DM_BUFIO) += dm-bufio.o obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o obj-$(CONFIG_DM_CRYPT) += dm-crypt.o +obj-$(CONFIG_DM_DEFAULT_KEY) += dm-default-key.o obj-$(CONFIG_DM_DELAY) += dm-delay.o obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index cb959a0e711d..669e18f0453b 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -126,7 +126,7 @@ struct iv_tcw_private { */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, -}; + DM_CRYPT_ENCRYPT_OVERRIDE }; enum cipher_flags { CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ @@ -2678,6 +2678,8 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; } else if (!strcasecmp(opt_string, "iv_large_sectors")) set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); + else if (!strcasecmp(opt_string, "allow_encrypt_override")) + set_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags); else { ti->error = "Invalid feature arguments"; return -EINVAL; @@ -2887,12 +2889,15 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) struct crypt_config *cc = ti->private; /* - * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. + * If bio is REQ_PREFLUSH, REQ_NOENCRYPT, or REQ_OP_DISCARD, + * just bypass crypt queues. * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight * - for REQ_OP_DISCARD caller must use flush if IO ordering matters */ - if (unlikely(bio->bi_opf & REQ_PREFLUSH || - bio_op(bio) == REQ_OP_DISCARD)) { + if (unlikely(bio->bi_opf & REQ_PREFLUSH) || + (unlikely(bio->bi_opf & REQ_NOENCRYPT) && + test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags)) || + bio_op(bio) == REQ_OP_DISCARD) { bio_set_dev(bio, cc->dev->bdev); if (bio_sectors(bio)) bio->bi_iter.bi_sector = cc->start + @@ -2979,6 +2984,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type, num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); + num_feature_args += test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, + &cc->flags); if (cc->on_disk_tag_size) num_feature_args++; if (num_feature_args) { @@ -2995,6 +3002,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type, DMEMIT(" sector_size:%d", cc->sector_size); if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) DMEMIT(" iv_large_sectors"); + if (test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags)) + DMEMIT(" allow_encrypt_override"); } break; diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c new file mode 100644 index 000000000000..0926bd65bd59 --- /dev/null +++ b/drivers/md/dm-default-key.c @@ -0,0 +1,306 @@ +/* + * Copyright (C) 2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#define DM_MSG_PREFIX "default-key" +#define DEFAULT_DUN_OFFSET 1 + +struct default_key_c { + struct dm_dev *dev; + sector_t start; + struct blk_encryption_key key; + bool set_dun; + u64 dun_offset; +}; + +static void default_key_dtr(struct dm_target *ti) +{ + struct default_key_c *dkc = ti->private; + + if (dkc->dev) + dm_put_device(ti, dkc->dev); + kzfree(dkc); +} + +static int default_key_ctr_optional(struct dm_target *ti, + unsigned int argc, char **argv) +{ + struct default_key_c *dkc = ti->private; + struct dm_arg_set as = {0}; + static const struct dm_arg _args[] = { + {0, 2, "Invalid number of feature args"}, + }; + unsigned int opt_params; + const char *opt_string; + char dummy; + int ret; + + as.argc = argc; + as.argv = argv; + + ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); + if (ret) + return ret; + + while (opt_params--) { + opt_string = dm_shift_arg(&as); + if (!opt_string) { + ti->error = "Not enough feature arguments"; + return -EINVAL; + } + + if (!strcasecmp(opt_string, "set_dun")) { + dkc->set_dun = true; + } else if (sscanf(opt_string, "dun_offset:%llu%c", + &dkc->dun_offset, &dummy) == 1) { + if (dkc->dun_offset == 0) { + ti->error = "dun_offset cannot be 0"; + return -EINVAL; + } + } else { + ti->error = "Invalid feature arguments"; + return -EINVAL; + } + } + + if (dkc->dun_offset && !dkc->set_dun) { + ti->error = "Invalid: dun_offset without set_dun"; + return -EINVAL; + } + + if (dkc->set_dun && !dkc->dun_offset) + dkc->dun_offset = DEFAULT_DUN_OFFSET; + + return 0; +} + +/* + * Construct a default-key mapping: + */ +static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct default_key_c *dkc; + size_t key_size; + unsigned long long tmp; + char dummy; + int err; + + if (argc < 4) { + ti->error = "Too few arguments"; + return -EINVAL; + } + + dkc = kzalloc(sizeof(*dkc), GFP_KERNEL); + if (!dkc) { + ti->error = "Out of memory"; + return -ENOMEM; + } + ti->private = dkc; + + if (strcmp(argv[0], "AES-256-XTS") != 0) { + ti->error = "Unsupported encryption mode"; + err = -EINVAL; + goto bad; + } + + key_size = strlen(argv[1]); + if (key_size != 2 * BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS) { + ti->error = "Unsupported key size"; + err = -EINVAL; + goto bad; + } + key_size /= 2; + + if (hex2bin(dkc->key.raw, argv[1], key_size) != 0) { + ti->error = "Malformed key string"; + err = -EINVAL; + goto bad; + } + + err = dm_get_device(ti, argv[2], dm_table_get_mode(ti->table), + &dkc->dev); + if (err) { + ti->error = "Device lookup failed"; + goto bad; + } + + if (sscanf(argv[3], "%llu%c", &tmp, &dummy) != 1) { + ti->error = "Invalid start sector"; + err = -EINVAL; + goto bad; + } + dkc->start = tmp; + + if (argc > 4) { + err = default_key_ctr_optional(ti, argc - 4, &argv[4]); + if (err) + goto bad; + } + + if (!blk_queue_inlinecrypt(bdev_get_queue(dkc->dev->bdev))) { + ti->error = "Device does not support inline encryption"; + err = -EINVAL; + goto bad; + } + + /* Pass flush requests through to the underlying device. */ + ti->num_flush_bios = 1; + + /* + * We pass discard requests through to the underlying device, although + * the discarded blocks will be zeroed, which leaks information about + * unused blocks. It's also impossible for dm-default-key to know not + * to decrypt discarded blocks, so they will not be read back as zeroes + * and we must set discard_zeroes_data_unsupported. + */ + ti->num_discard_bios = 1; + + /* + * It's unclear whether WRITE_SAME would work with inline encryption; it + * would depend on whether the hardware duplicates the data before or + * after encryption. But since the internal storage in some devices + * (MSM8998-based) doesn't claim to support WRITE_SAME anyway, we don't + * currently have a way to test it. Leave it disabled it for now. + */ + /*ti->num_write_same_bios = 1;*/ + + return 0; + +bad: + default_key_dtr(ti); + return err; +} + +static int default_key_map(struct dm_target *ti, struct bio *bio) +{ + const struct default_key_c *dkc = ti->private; + + bio_set_dev(bio, dkc->dev->bdev); + if (bio_sectors(bio)) { + bio->bi_iter.bi_sector = dkc->start + + dm_target_offset(ti, bio->bi_iter.bi_sector); + } + + if (!bio->bi_crypt_key && !bio->bi_crypt_skip) { + bio->bi_crypt_key = &dkc->key; + + if (dkc->set_dun) + bio_dun(bio) = (dm_target_offset(ti, + bio->bi_iter.bi_sector) + >> 3) + dkc->dun_offset; + } + + return DM_MAPIO_REMAPPED; +} + +static void default_key_status(struct dm_target *ti, status_type_t type, + unsigned int status_flags, char *result, + unsigned int maxlen) +{ + const struct default_key_c *dkc = ti->private; + unsigned int sz = 0; + int num_feature_args = 0; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + + /* encryption mode */ + DMEMIT("AES-256-XTS"); + + /* reserved for key; dm-crypt shows it, but we don't for now */ + DMEMIT(" -"); + + /* name of underlying device, and the start sector in it */ + DMEMIT(" %s %llu", dkc->dev->name, + (unsigned long long)dkc->start); + + num_feature_args += dkc->set_dun; + num_feature_args += dkc->set_dun + && dkc->dun_offset != DEFAULT_DUN_OFFSET; + + if (num_feature_args) { + DMEMIT(" %d", num_feature_args); + if (dkc->set_dun) + DMEMIT(" set_dun"); + if (dkc->set_dun + && dkc->dun_offset != DEFAULT_DUN_OFFSET) + DMEMIT(" dun_offset:%llu", dkc->dun_offset); + } + + break; + } +} + +static int default_key_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev, fmode_t *mode) +{ + struct default_key_c *dkc = ti->private; + struct dm_dev *dev = dkc->dev; + + *bdev = dev->bdev; + + /* + * Only pass ioctls through if the device sizes match exactly. + */ + if (dkc->start || + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) + return 1; + return 0; +} + +static int default_key_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, + void *data) +{ + struct default_key_c *dkc = ti->private; + + return fn(ti, dkc->dev, dkc->start, ti->len, data); +} + +static struct target_type default_key_target = { + .name = "default-key", + .version = {1, 1, 0}, + .module = THIS_MODULE, + .ctr = default_key_ctr, + .dtr = default_key_dtr, + .map = default_key_map, + .status = default_key_status, + .prepare_ioctl = default_key_prepare_ioctl, + .iterate_devices = default_key_iterate_devices, +}; + +static int __init dm_default_key_init(void) +{ + return dm_register_target(&default_key_target); +} + +static void __exit dm_default_key_exit(void) +{ + dm_unregister_target(&default_key_target); +} + +module_init(dm_default_key_init); +module_exit(dm_default_key_exit); + +MODULE_AUTHOR("Paul Lawrence "); +MODULE_AUTHOR("Paul Crowley "); +MODULE_AUTHOR("Eric Biggers "); +MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 852350e3cfe7..0f9a8087e1a0 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1689,6 +1689,16 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); } +static int queue_supports_inline_encryption(struct dm_target *ti, + struct dm_dev *dev, + sector_t start, sector_t len, + void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && blk_queue_inlinecrypt(q); +} + static bool dm_table_all_devices_attribute(struct dm_table *t, iterate_devices_callout_fn func) { @@ -1869,6 +1879,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + if (dm_table_all_devices_attribute(t, queue_supports_inline_encryption)) + queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q); + else + queue_flag_clear_unlocked(QUEUE_FLAG_INLINECRYPT, q); + dm_table_verify_integrity(t); /* diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 66ed5193add4..f3c12b118f3a 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -52,6 +52,7 @@ #include #include #include "compat_qseecom.h" +#include #include #define QSEECOM_DEV "qseecom" @@ -8114,6 +8115,19 @@ static long qseecom_ioctl(struct file *file, qcom_ice_set_fde_flag(ice_data.flag); break; } + case QSEECOM_IOCTL_FBE_CLEAR_KEY: { + struct qseecom_ice_key_data_t key_data; + + ret = copy_from_user(&key_data, argp, sizeof(key_data)); + if (ret) { + pr_err("copy from user failed\n"); + return -EFAULT; + } + pfk_fbe_clear_key((const unsigned char *) key_data.key, + key_data.key_len, (const unsigned char *) + key_data.salt, key_data.salt_len); + break; + } default: pr_err("Invalid IOCTL: 0x%x\n", cmd); return -EINVAL; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index ecc794323729..06febb56fa5f 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -216,6 +216,8 @@ void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card) host->max_req_size / 512)); blk_queue_max_segment_size(mq->queue, host->max_seg_size); blk_queue_max_segments(mq->queue, host->max_segs); + if (host->inlinecrypt_support) + queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue); } static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) @@ -479,6 +481,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); + if (host->inlinecrypt_support) + queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue); sema_init(&mq->thread_sem, 1); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 979b909704df..73e43441f4dd 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -439,6 +439,17 @@ config MMC_SDHCI_MSM If unsure, say N. +config MMC_SDHCI_MSM_ICE + bool "Qualcomm Technologies, Inc Inline Crypto Engine for SDHCI core" + depends on MMC_SDHCI_MSM && CRYPTO_DEV_QCOM_ICE + help + This selects the QTI specific additions to support Inline Crypto + Engine (ICE). ICE accelerates the crypto operations and maintains + the high SDHCI performance. + + Select this if you have ICE supported for SDHCI on QTI chipset. + If unsure, say N. + config MMC_MXC tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support" depends on ARCH_MXC || PPC_MPC512x diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 6389e8125299..f079ab6fb055 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -86,6 +86,7 @@ obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o +obj-$(CONFIG_MMC_SDHCI_MSM_ICE) += sdhci-msm-ice.o obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c index 87c1cb7abf39..8571e3171c0c 100644 --- a/drivers/mmc/host/cmdq_hci.c +++ b/drivers/mmc/host/cmdq_hci.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, 2020 The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -376,6 +376,7 @@ static int cmdq_enable(struct mmc_host *mmc) { int err = 0; u32 cqcfg; + u32 cqcap = 0; bool dcmd_enable; struct cmdq_host *cq_host = mmc_cmdq_private(mmc); @@ -404,6 +405,24 @@ static int cmdq_enable(struct mmc_host *mmc) cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) | (dcmd_enable ? CQ_DCMD : 0)); + cqcap = cmdq_readl(cq_host, CQCAP); + if (cqcap & CQCAP_CS) { + /* + * In case host controller supports cryptographic operations + * then, it uses 128bit task descriptor. Upper 64 bits of task + * descriptor would be used to pass crypto specific informaton. + */ + cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT | + CMDQ_TASK_DESC_SZ_128; + cqcfg |= CQ_ICE_ENABLE; + /* + * For SDHC v5.0 onwards, ICE 3.0 specific registers are added + * in CQ register space, due to which few CQ registers are + * shifted. Set offset_changed boolean to use updated address. + */ + cq_host->offset_changed = true; + } + cmdq_writel(cq_host, cqcfg, CQCFG); /* enable CQ_HOST */ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE, @@ -719,6 +738,30 @@ static void cmdq_prep_dcmd_desc(struct mmc_host *mmc, upper_32_bits(*task_desc)); } +static inline +void cmdq_prep_crypto_desc(struct cmdq_host *cq_host, u64 *task_desc, + u64 ice_ctx) +{ + u64 *ice_desc = NULL; + + if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) { + /* + * Get the address of ice context for the given task descriptor. + * ice context is present in the upper 64bits of task descriptor + * ice_conext_base_address = task_desc + 8-bytes + */ + ice_desc = (__le64 *)((u8 *)task_desc + + CQ_TASK_DESC_TASK_PARAMS_SIZE); + memset(ice_desc, 0, CQ_TASK_DESC_ICE_PARAMS_SIZE); + + /* + * Assign upper 64bits data of task descritor with ice context + */ + if (ice_ctx) + *ice_desc = cpu_to_le64(ice_ctx); + } +} + static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -742,6 +785,7 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq) u32 tag = mrq->cmdq_req->tag; struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc); struct sdhci_host *host = mmc_priv(mmc); + u64 ice_ctx = 0; if (!cq_host->enabled) { pr_err("%s: CMDQ host not enabled yet !!!\n", @@ -760,19 +804,31 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq) goto ring_doorbell; } + if (cq_host->ops->crypto_cfg) { + err = cq_host->ops->crypto_cfg(mmc, mrq, tag, &ice_ctx); + if (err) { + mmc->err_stats[MMC_ERR_ICE_CFG]++; + pr_err("%s: failed to configure crypto: err %d tag %d\n", + mmc_hostname(mmc), err, tag); + goto ice_err; + } + } + task_desc = (__le64 __force *)get_desc(cq_host, tag); cmdq_prep_task_desc(mrq, &data, 1, (mrq->cmdq_req->cmdq_req_flags & QBR)); *task_desc = cpu_to_le64(data); + cmdq_prep_crypto_desc(cq_host, task_desc, ice_ctx); + cmdq_log_task_desc_history(cq_host, *task_desc, false); err = cmdq_prep_tran_desc(mrq, cq_host, tag); if (err) { pr_err("%s: %s: failed to setup tx desc: %d\n", mmc_hostname(mmc), __func__, err); - goto out; + goto desc_err; } cq_host->mrq_slot[tag] = mrq; @@ -792,6 +848,20 @@ ring_doorbell: /* Commit the doorbell write immediately */ wmb(); + return err; + +desc_err: + if (cq_host->ops->crypto_cfg_end) { + err = cq_host->ops->crypto_cfg_end(mmc, mrq); + if (err) { + pr_err("%s: failed to end ice config: err %d tag %d\n", + mmc_hostname(mmc), err, tag); + } + } + if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) && + cq_host->ops->crypto_cfg_reset) + cq_host->ops->crypto_cfg_reset(mmc, tag); +ice_err: if (err) cmdq_runtime_pm_put(cq_host); out: @@ -803,6 +873,7 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag) struct mmc_request *mrq; struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc); int offset = 0; + int err = 0; if (cq_host->offset_changed) offset = CQ_V5_VENDOR_CFG; @@ -817,6 +888,18 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag) cmdq_runtime_pm_put(cq_host); + if (!(mrq->cmdq_req->cmdq_req_flags & DCMD)) { + if (cq_host->ops->crypto_cfg_end) { + err = cq_host->ops->crypto_cfg_end(mmc, mrq); + if (err) { + pr_err("%s: failed to end ice config: err %d tag %d\n", + mmc_hostname(mmc), err, tag); + } + } + } + if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) && + cq_host->ops->crypto_cfg_reset) + cq_host->ops->crypto_cfg_reset(mmc, tag); mrq->done(mrq); } diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h index 0b7c38710c34..03c78d7a891c 100644 --- a/drivers/mmc/host/cmdq_hci.h +++ b/drivers/mmc/host/cmdq_hci.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -18,11 +18,13 @@ #define CQVER 0x00 /* capabilities */ #define CQCAP 0x04 +#define CQCAP_CS (1 << 28) /* configuration */ #define CQCFG 0x08 #define CQ_DCMD 0x00001000 #define CQ_TASK_DESC_SZ 0x00000100 #define CQ_ENABLE 0x00000001 +#define CQ_ICE_ENABLE 0x00000002 /* control */ #define CQCTL 0x0C @@ -151,6 +153,9 @@ #define CQ_VENDOR_CFG 0x100 #define CMDQ_SEND_STATUS_TRIGGER (1 << 31) +#define CQ_TASK_DESC_TASK_PARAMS_SIZE 8 +#define CQ_TASK_DESC_ICE_PARAMS_SIZE 8 + struct task_history { u64 task; bool is_dcmd; @@ -168,6 +173,7 @@ struct cmdq_host { u32 dcmd_slot; u32 caps; #define CMDQ_TASK_DESC_SZ_128 0x1 +#define CMDQ_CAP_CRYPTO_SUPPORT 0x2 u32 quirks; #define CMDQ_QUIRK_SHORT_TXFR_DESC_SZ 0x1 @@ -216,6 +222,10 @@ struct cmdq_host_ops { void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set); int (*reset)(struct mmc_host *mmc); void (*post_cqe_halt)(struct mmc_host *mmc); + int (*crypto_cfg)(struct mmc_host *mmc, struct mmc_request *mrq, + u32 slot, u64 *ice_ctx); + int (*crypto_cfg_end)(struct mmc_host *mmc, struct mmc_request *mrq); + void (*crypto_cfg_reset)(struct mmc_host *mmc, unsigned int slot); }; static inline void cmdq_writel(struct cmdq_host *host, u32 val, int reg) diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c new file mode 100644 index 000000000000..317d8c3bfb0e --- /dev/null +++ b/drivers/mmc/host/sdhci-msm-ice.c @@ -0,0 +1,587 @@ +/* + * Copyright (c) 2015, 2017-2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sdhci-msm-ice.h" + +static void sdhci_msm_ice_error_cb(void *host_ctrl, u32 error) +{ + struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl; + + dev_err(&msm_host->pdev->dev, "%s: Error in ice operation 0x%x", + __func__, error); + + if (msm_host->ice.state == SDHCI_MSM_ICE_STATE_ACTIVE) + msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED; +} + +static struct platform_device *sdhci_msm_ice_get_pdevice(struct device *dev) +{ + struct device_node *node; + struct platform_device *ice_pdev = NULL; + + node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0); + if (!node) { + dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n", + __func__); + goto out; + } + ice_pdev = qcom_ice_get_pdevice(node); +out: + return ice_pdev; +} + +static +struct qcom_ice_variant_ops *sdhci_msm_ice_get_vops(struct device *dev) +{ + struct qcom_ice_variant_ops *ice_vops = NULL; + struct device_node *node; + + node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0); + if (!node) { + dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n", + __func__); + goto out; + } + ice_vops = qcom_ice_get_variant_ops(node); + of_node_put(node); +out: + return ice_vops; +} + +static +void sdhci_msm_enable_ice_hci(struct sdhci_host *host, bool enable) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + u32 config = 0; + u32 ice_cap = 0; + + /* + * Enable the cryptographic support inside SDHC. + * This is a global config which needs to be enabled + * all the time. + * Only when it it is enabled, the ICE_HCI capability + * will get reflected in CQCAP register. + */ + config = readl_relaxed(host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4); + + if (enable) + config &= ~DISABLE_CRYPTO; + else + config |= DISABLE_CRYPTO; + writel_relaxed(config, host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4); + + /* + * CQCAP register is in different register space from above + * ice global enable register. So a mb() is required to ensure + * above write gets completed before reading the CQCAP register. + */ + mb(); + + /* + * Check if ICE HCI capability support is present + * If present, enable it. + */ + ice_cap = readl_relaxed(msm_host->cryptoio + ICE_CQ_CAPABILITIES); + if (ice_cap & ICE_HCI_SUPPORT) { + config = readl_relaxed(msm_host->cryptoio + ICE_CQ_CONFIG); + + if (enable) + config |= CRYPTO_GENERAL_ENABLE; + else + config &= ~CRYPTO_GENERAL_ENABLE; + writel_relaxed(config, msm_host->cryptoio + ICE_CQ_CONFIG); + } +} + +int sdhci_msm_ice_get_dev(struct sdhci_host *host) +{ + struct device *sdhc_dev; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + + if (!msm_host || !msm_host->pdev) { + pr_err("%s: invalid msm_host %p or msm_host->pdev\n", + __func__, msm_host); + return -EINVAL; + } + + sdhc_dev = &msm_host->pdev->dev; + msm_host->ice.vops = sdhci_msm_ice_get_vops(sdhc_dev); + msm_host->ice.pdev = sdhci_msm_ice_get_pdevice(sdhc_dev); + + if (msm_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) { + dev_err(sdhc_dev, "%s: ICE device not probed yet\n", + __func__); + msm_host->ice.pdev = NULL; + msm_host->ice.vops = NULL; + return -EPROBE_DEFER; + } + + if (!msm_host->ice.pdev) { + dev_dbg(sdhc_dev, "%s: invalid platform device\n", __func__); + msm_host->ice.vops = NULL; + return -ENODEV; + } + if (!msm_host->ice.vops) { + dev_dbg(sdhc_dev, "%s: invalid ice vops\n", __func__); + msm_host->ice.pdev = NULL; + return -ENODEV; + } + msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED; + return 0; +} + +static +int sdhci_msm_ice_pltfm_init(struct sdhci_msm_host *msm_host) +{ + struct resource *ice_memres = NULL; + struct platform_device *pdev = msm_host->pdev; + int err = 0; + + if (!msm_host->ice_hci_support) + goto out; + /* + * ICE HCI registers are present in cmdq register space. + * So map the cmdq mem for accessing ICE HCI registers. + */ + ice_memres = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "cmdq_mem"); + if (!ice_memres) { + dev_err(&pdev->dev, "Failed to get iomem resource for ice\n"); + err = -EINVAL; + goto out; + } + msm_host->cryptoio = devm_ioremap(&pdev->dev, + ice_memres->start, + resource_size(ice_memres)); + if (!msm_host->cryptoio) { + dev_err(&pdev->dev, "Failed to remap registers\n"); + err = -ENOMEM; + } +out: + return err; +} + +int sdhci_msm_ice_init(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + int err = 0; + + if (msm_host->ice.vops->init) { + err = sdhci_msm_ice_pltfm_init(msm_host); + if (err) + goto out; + + if (msm_host->ice_hci_support) + sdhci_msm_enable_ice_hci(host, true); + + err = msm_host->ice.vops->init(msm_host->ice.pdev, + msm_host, + sdhci_msm_ice_error_cb); + if (err) { + pr_err("%s: ice init err %d\n", + mmc_hostname(host->mmc), err); + sdhci_msm_ice_print_regs(host); + if (msm_host->ice_hci_support) + sdhci_msm_enable_ice_hci(host, false); + goto out; + } + msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE; + } + +out: + return err; +} + +void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot) +{ + writel_relaxed(SDHCI_MSM_ICE_ENABLE_BYPASS, + host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot); +} + +static +int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req, + unsigned int *bypass, short *key_index) +{ + int err = 0; + struct ice_data_setting ice_set; + + memset(&ice_set, 0, sizeof(struct ice_data_setting)); + if (msm_host->ice.vops->config_start) { + err = msm_host->ice.vops->config_start( + msm_host->ice.pdev, + req, &ice_set, false); + if (err) { + pr_err("%s: ice config failed %d\n", + mmc_hostname(msm_host->mmc), err); + return err; + } + } + /* if writing data command */ + if (rq_data_dir(req) == WRITE) + *bypass = ice_set.encr_bypass ? + SDHCI_MSM_ICE_ENABLE_BYPASS : + SDHCI_MSM_ICE_DISABLE_BYPASS; + /* if reading data command */ + else if (rq_data_dir(req) == READ) + *bypass = ice_set.decr_bypass ? + SDHCI_MSM_ICE_ENABLE_BYPASS : + SDHCI_MSM_ICE_DISABLE_BYPASS; + *key_index = ice_set.crypto_data.key_index; + return err; +} + +static +void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba, u32 slot, + unsigned int bypass, short key_index, u32 cdu_sz) +{ + unsigned int ctrl_info_val = 0; + + /* Configure ICE index */ + ctrl_info_val = + (key_index & + MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX) + << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX; + + /* Configure data unit size of transfer request */ + ctrl_info_val |= + (cdu_sz & + MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU) + << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU; + + /* Configure ICE bypass mode */ + ctrl_info_val |= + (bypass & MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS) + << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS; + + writel_relaxed((lba & 0xFFFFFFFF), + host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n + 16 * slot); + writel_relaxed(((lba >> 32) & 0xFFFFFFFF), + host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n + 16 * slot); + writel_relaxed(ctrl_info_val, + host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot); + /* Ensure ICE registers are configured before issuing SDHCI request */ + mb(); +} + +static inline +void sdhci_msm_ice_hci_update_cmdq_cfg(u64 dun, unsigned int bypass, + short key_index, u64 *ice_ctx) +{ + /* + * The naming convention got changed between ICE2.0 and ICE3.0 + * registers fields. Below is the equivalent names for + * ICE3.0 Vs ICE2.0: + * Data Unit Number(DUN) == Logical Base address(LBA) + * Crypto Configuration index (CCI) == Key Index + * Crypto Enable (CE) == !BYPASS + */ + if (ice_ctx) + *ice_ctx = DATA_UNIT_NUM(dun) | + CRYPTO_CONFIG_INDEX(key_index) | + CRYPTO_ENABLE(!bypass); +} + +static +void sdhci_msm_ice_hci_update_noncq_cfg(struct sdhci_host *host, + u64 dun, unsigned int bypass, short key_index) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + unsigned int crypto_params = 0; + /* + * The naming convention got changed between ICE2.0 and ICE3.0 + * registers fields. Below is the equivalent names for + * ICE3.0 Vs ICE2.0: + * Data Unit Number(DUN) == Logical Base address(LBA) + * Crypto Configuration index (CCI) == Key Index + * Crypto Enable (CE) == !BYPASS + */ + /* Configure ICE bypass mode */ + crypto_params |= + ((!bypass) & MASK_SDHCI_MSM_ICE_HCI_PARAM_CE) + << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE; + /* Configure Crypto Configure Index (CCI) */ + crypto_params |= (key_index & + MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI) + << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI; + + writel_relaxed((crypto_params & 0xFFFFFFFF), + msm_host->cryptoio + ICE_NONCQ_CRYPTO_PARAMS); + + /* Update DUN */ + writel_relaxed((dun & 0xFFFFFFFF), + msm_host->cryptoio + ICE_NONCQ_CRYPTO_DUN); + /* Ensure ICE registers are configured before issuing SDHCI request */ + mb(); +} + +int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq, + u32 slot) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + int err = 0; + short key_index = 0; + u64 dun = 0; + unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS; + u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B; + struct request *req; + + if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { + pr_err("%s: ice is in invalid state %d\n", + mmc_hostname(host->mmc), msm_host->ice.state); + return -EINVAL; + } + + WARN_ON(!mrq); + if (!mrq) + return -EINVAL; + req = mrq->req; + if (req && req->bio) { +#ifdef CONFIG_PFK + if (bio_dun(req->bio)) { + dun = bio_dun(req->bio); + cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB; + } else { + dun = req->__sector; + } +#else + dun = req->__sector; +#endif + err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index); + if (err) + return err; + pr_debug("%s: %s: slot %d bypass %d key_index %d\n", + mmc_hostname(host->mmc), + (rq_data_dir(req) == WRITE) ? "WRITE" : "READ", + slot, bypass, key_index); + } + + if (msm_host->ice_hci_support) { + /* For ICE HCI / ICE3.0 */ + sdhci_msm_ice_hci_update_noncq_cfg(host, dun, bypass, + key_index); + } else { + /* For ICE versions earlier to ICE3.0 */ + sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index, + cdu_sz); + } + return 0; +} + +int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host, + struct mmc_request *mrq, u32 slot, u64 *ice_ctx) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + int err = 0; + short key_index = 0; + u64 dun = 0; + unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS; + struct request *req; + u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B; + + if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { + pr_err("%s: ice is in invalid state %d\n", + mmc_hostname(host->mmc), msm_host->ice.state); + return -EINVAL; + } + + WARN_ON(!mrq); + if (!mrq) + return -EINVAL; + req = mrq->req; + if (req && req->bio) { +#ifdef CONFIG_PFK + if (bio_dun(req->bio)) { + dun = bio_dun(req->bio); + cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB; + } else { + dun = req->__sector; + } +#else + dun = req->__sector; +#endif + err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index); + if (err) + return err; + pr_debug("%s: %s: slot %d bypass %d key_index %d\n", + mmc_hostname(host->mmc), + (rq_data_dir(req) == WRITE) ? "WRITE" : "READ", + slot, bypass, key_index); + } + + if (msm_host->ice_hci_support) { + /* For ICE HCI / ICE3.0 */ + sdhci_msm_ice_hci_update_cmdq_cfg(dun, bypass, key_index, + ice_ctx); + } else { + /* For ICE versions earlier to ICE3.0 */ + sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index, + cdu_sz); + } + return 0; +} + +int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + int err = 0; + struct request *req; + + if (!host->is_crypto_en) + return 0; + + if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { + pr_err("%s: ice is in invalid state %d\n", + mmc_hostname(host->mmc), msm_host->ice.state); + return -EINVAL; + } + + req = mrq->req; + if (req) { + if (msm_host->ice.vops->config_end) { + err = msm_host->ice.vops->config_end(req); + if (err) { + pr_err("%s: ice config end failed %d\n", + mmc_hostname(host->mmc), err); + return err; + } + } + } + + return 0; +} + +int sdhci_msm_ice_reset(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + int err = 0; + + if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { + pr_err("%s: ice is in invalid state before reset %d\n", + mmc_hostname(host->mmc), msm_host->ice.state); + return -EINVAL; + } + + if (msm_host->ice.vops->reset) { + err = msm_host->ice.vops->reset(msm_host->ice.pdev); + if (err) { + pr_err("%s: ice reset failed %d\n", + mmc_hostname(host->mmc), err); + sdhci_msm_ice_print_regs(host); + return err; + } + } + + /* If ICE HCI support is present then re-enable it */ + if (msm_host->ice_hci_support) + sdhci_msm_enable_ice_hci(host, true); + + if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { + pr_err("%s: ice is in invalid state after reset %d\n", + mmc_hostname(host->mmc), msm_host->ice.state); + return -EINVAL; + } + return 0; +} + +int sdhci_msm_ice_resume(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + int err = 0; + + if (msm_host->ice.state != + SDHCI_MSM_ICE_STATE_SUSPENDED) { + pr_err("%s: ice is in invalid state before resume %d\n", + mmc_hostname(host->mmc), msm_host->ice.state); + return -EINVAL; + } + + if (msm_host->ice.vops->resume) { + err = msm_host->ice.vops->resume(msm_host->ice.pdev); + if (err) { + pr_err("%s: ice resume failed %d\n", + mmc_hostname(host->mmc), err); + return err; + } + } + + msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE; + return 0; +} + +int sdhci_msm_ice_suspend(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + int err = 0; + + if (msm_host->ice.state != + SDHCI_MSM_ICE_STATE_ACTIVE) { + pr_err("%s: ice is in invalid state before resume %d\n", + mmc_hostname(host->mmc), msm_host->ice.state); + return -EINVAL; + } + + if (msm_host->ice.vops->suspend) { + err = msm_host->ice.vops->suspend(msm_host->ice.pdev); + if (err) { + pr_err("%s: ice suspend failed %d\n", + mmc_hostname(host->mmc), err); + return -EINVAL; + } + } + msm_host->ice.state = SDHCI_MSM_ICE_STATE_SUSPENDED; + return 0; +} + +int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + int stat = -EINVAL; + + if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { + pr_err("%s: ice is in invalid state %d\n", + mmc_hostname(host->mmc), msm_host->ice.state); + return -EINVAL; + } + + if (msm_host->ice.vops->status) { + *ice_status = 0; + stat = msm_host->ice.vops->status(msm_host->ice.pdev); + if (stat < 0) { + pr_err("%s: ice get sts failed %d\n", + mmc_hostname(host->mmc), stat); + return -EINVAL; + } + *ice_status = stat; + } + return 0; +} + +void sdhci_msm_ice_print_regs(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + + if (msm_host->ice.vops->debug) + msm_host->ice.vops->debug(msm_host->ice.pdev); +} diff --git a/drivers/mmc/host/sdhci-msm-ice.h b/drivers/mmc/host/sdhci-msm-ice.h new file mode 100644 index 000000000000..b256e285250d --- /dev/null +++ b/drivers/mmc/host/sdhci-msm-ice.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __SDHCI_MSM_ICE_H__ +#define __SDHCI_MSM_ICE_H__ + +#include +#include +#include +#include + +#include "sdhci-msm.h" + +#define SDHC_MSM_CRYPTO_LABEL "sdhc-msm-crypto" +/* Timeout waiting for ICE initialization, that requires TZ access */ +#define SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS 500 + +/* + * SDHCI host controller ICE registers. There are n [0..31] + * of each of these registers + */ +#define NUM_SDHCI_MSM_ICE_CTRL_INFO_n_REGS 32 + +#define CORE_VENDOR_SPEC_ICE_CTRL 0x300 +#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n 0x304 +#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n 0x308 +#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n 0x30C + +/* ICE3.0 register which got added cmdq reg space */ +#define ICE_CQ_CAPABILITIES 0x04 +#define ICE_HCI_SUPPORT (1 << 28) +#define ICE_CQ_CONFIG 0x08 +#define CRYPTO_GENERAL_ENABLE (1 << 1) +#define ICE_NONCQ_CRYPTO_PARAMS 0x70 +#define ICE_NONCQ_CRYPTO_DUN 0x74 + +/* ICE3.0 register which got added hc reg space */ +#define HC_VENDOR_SPECIFIC_FUNC4 0x260 +#define DISABLE_CRYPTO (1 << 15) +#define HC_VENDOR_SPECIFIC_ICE_CTRL 0x800 +#define ICE_SW_RST_EN (1 << 0) + +/* SDHCI MSM ICE CTRL Info register offset */ +enum { + OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0, + OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 1, + OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU = 6, + OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI = 0, + OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE = 8, +}; + +/* SDHCI MSM ICE CTRL Info register masks */ +enum { + MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0x1, + MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 0x1F, + MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU = 0x7, + MASK_SDHCI_MSM_ICE_HCI_PARAM_CE = 0x1, + MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI = 0xff +}; + +/* SDHCI MSM ICE encryption/decryption bypass state */ +enum { + SDHCI_MSM_ICE_DISABLE_BYPASS = 0, + SDHCI_MSM_ICE_ENABLE_BYPASS = 1, +}; + +/* SDHCI MSM ICE Crypto Data Unit of target DUN of Transfer Request */ +enum { + SDHCI_MSM_ICE_TR_DATA_UNIT_512_B = 0, + SDHCI_MSM_ICE_TR_DATA_UNIT_1_KB = 1, + SDHCI_MSM_ICE_TR_DATA_UNIT_2_KB = 2, + SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB = 3, + SDHCI_MSM_ICE_TR_DATA_UNIT_8_KB = 4, + SDHCI_MSM_ICE_TR_DATA_UNIT_16_KB = 5, + SDHCI_MSM_ICE_TR_DATA_UNIT_32_KB = 6, + SDHCI_MSM_ICE_TR_DATA_UNIT_64_KB = 7, +}; + +/* SDHCI MSM ICE internal state */ +enum { + SDHCI_MSM_ICE_STATE_DISABLED = 0, + SDHCI_MSM_ICE_STATE_ACTIVE = 1, + SDHCI_MSM_ICE_STATE_SUSPENDED = 2, +}; + +/* crypto context fields in cmdq data command task descriptor */ +#define DATA_UNIT_NUM(x) (((u64)(x) & 0xFFFFFFFF) << 0) +#define CRYPTO_CONFIG_INDEX(x) (((u64)(x) & 0xFF) << 32) +#define CRYPTO_ENABLE(x) (((u64)(x) & 0x1) << 47) + +#ifdef CONFIG_MMC_SDHCI_MSM_ICE +int sdhci_msm_ice_get_dev(struct sdhci_host *host); +int sdhci_msm_ice_init(struct sdhci_host *host); +void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot); +int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq, + u32 slot); +int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host, + struct mmc_request *mrq, u32 slot, u64 *ice_ctx); +int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq); +int sdhci_msm_ice_reset(struct sdhci_host *host); +int sdhci_msm_ice_resume(struct sdhci_host *host); +int sdhci_msm_ice_suspend(struct sdhci_host *host); +int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status); +void sdhci_msm_ice_print_regs(struct sdhci_host *host); +#else +inline int sdhci_msm_ice_get_dev(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + + if (msm_host) { + msm_host->ice.pdev = NULL; + msm_host->ice.vops = NULL; + } + return -ENODEV; +} +inline int sdhci_msm_ice_init(struct sdhci_host *host) +{ + return 0; +} + +inline void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot) +{ +} + +inline int sdhci_msm_ice_cfg(struct sdhci_host *host, + struct mmc_request *mrq, u32 slot) +{ + return 0; +} +static inline int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host, + struct mmc_request *mrq, u32 slot, u64 *ice_ctx) +{ + return 0; +} +static inline int sdhci_msm_ice_cfg_end(struct sdhci_host *host, + struct mmc_request *mrq) +{ + return 0; +} +inline int sdhci_msm_ice_reset(struct sdhci_host *host) +{ + return 0; +} +inline int sdhci_msm_ice_resume(struct sdhci_host *host) +{ + return 0; +} +inline int sdhci_msm_ice_suspend(struct sdhci_host *host) +{ + return 0; +} +inline int sdhci_msm_ice_get_status(struct sdhci_host *host, + int *ice_status) +{ + return 0; +} +inline void sdhci_msm_ice_print_regs(struct sdhci_host *host) +{ +} +#endif /* CONFIG_MMC_SDHCI_MSM_ICE */ +#endif /* __SDHCI_MSM_ICE_H__ */ diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index ed15b5bc8018..a6bd35e37a31 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -42,6 +42,7 @@ #include #include "sdhci-msm.h" +#include "sdhci-msm-ice.h" #include "cmdq_hci.h" #define QOS_REMOVE_DELAY_MS 10 @@ -2054,20 +2055,26 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, } } - if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates", - &ice_clk_table, &ice_clk_table_len, 0)) { - if (ice_clk_table && ice_clk_table_len) { - if (ice_clk_table_len != 2) { - dev_err(dev, "Need max and min frequencies\n"); - goto out; - } - pdata->sup_ice_clk_table = ice_clk_table; - pdata->sup_ice_clk_cnt = ice_clk_table_len; - pdata->ice_clk_max = pdata->sup_ice_clk_table[0]; - pdata->ice_clk_min = pdata->sup_ice_clk_table[1]; - dev_dbg(dev, "ICE clock rates (Hz): max: %u min: %u\n", - pdata->ice_clk_max, pdata->ice_clk_min); + if (msm_host->ice.pdev) { + if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates", + &ice_clk_table, &ice_clk_table_len, 0)) { + dev_err(dev, "failed parsing supported ice clock rates\n"); + goto out; } + if (!ice_clk_table || !ice_clk_table_len) { + dev_err(dev, "Invalid clock table\n"); + goto out; + } + if (ice_clk_table_len != 2) { + dev_err(dev, "Need max and min frequencies in the table\n"); + goto out; + } + pdata->sup_ice_clk_table = ice_clk_table; + pdata->sup_ice_clk_cnt = ice_clk_table_len; + pdata->ice_clk_max = pdata->sup_ice_clk_table[0]; + pdata->ice_clk_min = pdata->sup_ice_clk_table[1]; + dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n", + pdata->ice_clk_max, pdata->ice_clk_min); } pdata->vreg_data = devm_kzalloc(dev, sizeof(struct @@ -3775,6 +3782,7 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) int i, index = 0; u32 test_bus_val = 0; u32 debug_reg[MAX_TEST_BUS] = {0}; + u32 sts = 0; sdhci_msm_cache_debug_data(host); pr_info("----------- VENDOR REGISTER DUMP -----------\n"); @@ -3847,10 +3855,28 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, i + 3, debug_reg[i], debug_reg[i+1], debug_reg[i+2], debug_reg[i+3]); + if (host->is_crypto_en) { + sdhci_msm_ice_get_status(host, &sts); + pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts); + sdhci_msm_ice_print_regs(host); + } } static void sdhci_msm_reset(struct sdhci_host *host, u8 mask) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + + /* Set ICE core to be reset in sync with SDHC core */ + if (msm_host->ice.pdev) { + if (msm_host->ice_hci_support) + writel_relaxed(1, host->ioaddr + + HC_VENDOR_SPECIFIC_ICE_CTRL); + else + writel_relaxed(1, + host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL); + } + sdhci_reset(host, mask); } @@ -4490,6 +4516,11 @@ static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state) } static struct sdhci_ops sdhci_msm_ops = { + .crypto_engine_cfg = sdhci_msm_ice_cfg, + .crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg, + .crypto_engine_cfg_end = sdhci_msm_ice_cfg_end, + .crypto_cfg_reset = sdhci_msm_ice_cfg_reset, + .crypto_engine_reset = sdhci_msm_ice_reset, .set_uhs_signaling = sdhci_msm_set_uhs_signaling, .check_power_status = sdhci_msm_check_power_status, .platform_execute_tuning = sdhci_msm_execute_tuning, @@ -4615,6 +4646,7 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host, msm_host->caps_0 = caps; if ((major == 1) && (minor >= 0x6b)) { + msm_host->ice_hci_support = true; host->cdr_support = true; } @@ -4718,6 +4750,31 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_host->mmc = host->mmc; msm_host->pdev = pdev; + /* get the ice device vops if present */ + ret = sdhci_msm_ice_get_dev(host); + if (ret == -EPROBE_DEFER) { + /* + * SDHCI driver might be probed before ICE driver does. + * In that case we would like to return EPROBE_DEFER code + * in order to delay its probing. + */ + dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n", + __func__, ret); + goto pltfm_free; + + } else if (ret == -ENODEV) { + /* + * ICE device is not enabled in DTS file. No need for further + * initialization of ICE driver. + */ + dev_warn(&pdev->dev, "%s: ICE device is not enabled", + __func__); + } else if (ret) { + dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n", + __func__, ret); + goto pltfm_free; + } + /* Extract platform data */ if (pdev->dev.of_node) { ret = of_alias_get_id(pdev->dev.of_node, "sdhc"); @@ -4792,24 +4849,26 @@ static int sdhci_msm_probe(struct platform_device *pdev) } } - /* Setup SDC ICE clock */ - msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk"); - if (!IS_ERR(msm_host->ice_clk)) { - /* ICE core has only one clock frequency for now */ - ret = clk_set_rate(msm_host->ice_clk, - msm_host->pdata->ice_clk_max); - if (ret) { - dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n", - ret, - msm_host->pdata->ice_clk_max); - goto bus_aggr_clk_disable; - } - ret = clk_prepare_enable(msm_host->ice_clk); - if (ret) - goto bus_aggr_clk_disable; + if (msm_host->ice.pdev) { + /* Setup SDC ICE clock */ + msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk"); + if (!IS_ERR(msm_host->ice_clk)) { + /* ICE core has only one clock frequency for now */ + ret = clk_set_rate(msm_host->ice_clk, + msm_host->pdata->ice_clk_max); + if (ret) { + dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n", + ret, + msm_host->pdata->ice_clk_max); + goto bus_aggr_clk_disable; + } + ret = clk_prepare_enable(msm_host->ice_clk); + if (ret) + goto bus_aggr_clk_disable; - msm_host->ice_clk_rate = - msm_host->pdata->ice_clk_max; + msm_host->ice_clk_rate = + msm_host->pdata->ice_clk_max; + } } /* Setup SDC MMC clock */ @@ -5058,6 +5117,22 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa; + /* Initialize ICE if present */ + if (msm_host->ice.pdev) { + ret = sdhci_msm_ice_init(host); + if (ret) { + dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n", + mmc_hostname(host->mmc), ret); + ret = -EINVAL; + goto vreg_deinit; + } + host->is_crypto_en = true; + msm_host->mmc->inlinecrypt_support = true; + /* Packed commands cannot be encrypted/decrypted using ICE */ + msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR | + MMC_CAP2_PACKED_WR_CONTROL); + } + init_completion(&msm_host->pwr_irq_completion); if (gpio_is_valid(msm_host->pdata->status_gpio)) { @@ -5338,6 +5413,7 @@ static int sdhci_msm_runtime_suspend(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = pltfm_host->priv; ktime_t start = ktime_get(); + int ret; if (host->mmc->card && mmc_card_sdio(host->mmc->card)) goto defer_disable_host_irq; @@ -5357,6 +5433,12 @@ defer_disable_host_irq: sdhci_msm_bus_cancel_work_and_set_vote(host, 0); } + if (host->is_crypto_en) { + ret = sdhci_msm_ice_suspend(host); + if (ret < 0) + pr_err("%s: failed to suspend crypto engine %d\n", + mmc_hostname(host->mmc), ret); + } trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0, ktime_to_us(ktime_sub(ktime_get(), start))); return 0; @@ -5368,6 +5450,21 @@ static int sdhci_msm_runtime_resume(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = pltfm_host->priv; ktime_t start = ktime_get(); + int ret; + + if (host->is_crypto_en) { + ret = sdhci_msm_enable_controller_clock(host); + if (ret) { + pr_err("%s: Failed to enable reqd clocks\n", + mmc_hostname(host->mmc)); + goto skip_ice_resume; + } + ret = sdhci_msm_ice_resume(host); + if (ret) + pr_err("%s: failed to resume crypto engine %d\n", + mmc_hostname(host->mmc), ret); + } +skip_ice_resume: if (host->mmc->card && mmc_card_sdio(host->mmc->card)) goto defer_enable_host_irq; diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h index 8a52f8e9e201..66e48622498b 100644 --- a/drivers/mmc/host/sdhci-msm.h +++ b/drivers/mmc/host/sdhci-msm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -171,6 +171,12 @@ struct sdhci_msm_bus_vote { struct device_attribute max_bus_bw; }; +struct sdhci_msm_ice_data { + struct qcom_ice_variant_ops *vops; + struct platform_device *pdev; + int state; +}; + struct sdhci_msm_regs_restore { bool is_supported; bool is_valid; @@ -215,6 +221,8 @@ struct sdhci_msm_debug_data { struct sdhci_msm_host { struct platform_device *pdev; void __iomem *core_mem; /* MSM SDCC mapped address */ + void __iomem *cryptoio; /* ICE HCI mapped address */ + bool ice_hci_support; int pwr_irq; /* power irq */ struct clk *clk; /* main SD/MMC bus clock */ struct clk *pclk; /* SDHC peripheral bus clock */ @@ -248,6 +256,7 @@ struct sdhci_msm_host { bool enhanced_strobe; bool rclk_delay_fix; u32 caps_0; + struct sdhci_msm_ice_data ice; u32 ice_clk_rate; struct sdhci_msm_pm_qos_group *pm_qos; int pm_qos_prev_cpu; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index efd37a9d94f7..90ff537636b1 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1834,6 +1834,50 @@ static int sdhci_get_tuning_cmd(struct sdhci_host *host) return MMC_SEND_TUNING_BLOCK; } +static int sdhci_crypto_cfg(struct sdhci_host *host, struct mmc_request *mrq, + u32 slot) +{ + int err = 0; + + if (host->mmc->inlinecrypt_reset_needed && + host->ops->crypto_engine_reset) { + err = host->ops->crypto_engine_reset(host); + if (err) { + pr_err("%s: crypto reset failed\n", + mmc_hostname(host->mmc)); + goto out; + } + host->mmc->inlinecrypt_reset_needed = false; + } + + if (host->ops->crypto_engine_cfg) { + err = host->ops->crypto_engine_cfg(host, mrq, slot); + if (err) { + pr_err("%s: failed to configure crypto\n", + mmc_hostname(host->mmc)); + goto out; + } + } +out: + return err; +} + +static int sdhci_crypto_cfg_end(struct sdhci_host *host, + struct mmc_request *mrq) +{ + int err = 0; + + if (host->ops->crypto_engine_cfg_end) { + err = host->ops->crypto_engine_cfg_end(host, mrq); + if (err) { + pr_err("%s: failed to configure crypto\n", + mmc_hostname(host->mmc)); + return err; + } + } + return 0; +} + static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct sdhci_host *host; @@ -1900,6 +1944,13 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_get_tuning_cmd(host)); } + if (host->is_crypto_en) { + spin_unlock_irqrestore(&host->lock, flags); + if (sdhci_crypto_cfg(host, mrq, 0)) + goto end_req; + spin_lock_irqsave(&host->lock, flags); + } + if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) sdhci_send_command(host, mrq->sbc); else @@ -1909,6 +1960,11 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) mmiowb(); spin_unlock_irqrestore(&host->lock, flags); return; +end_req: + mrq->cmd->error = -EIO; + if (mrq->data) + mrq->data->error = -EIO; + mmc_request_done(host->mmc, mrq); } void sdhci_set_bus_width(struct sdhci_host *host, int width) @@ -2953,6 +3009,7 @@ static bool sdhci_request_done(struct sdhci_host *host) mmiowb(); spin_unlock_irqrestore(&host->lock, flags); + sdhci_crypto_cfg_end(host, mrq); mmc_request_done(host->mmc, mrq); return false; @@ -4030,6 +4087,59 @@ static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc) SDHCI_INT_RESPONSE, SDHCI_INT_ENABLE); sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); } +static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc, + struct mmc_request *mrq, u32 slot, u64 *ice_ctx) +{ + struct sdhci_host *host = mmc_priv(mmc); + int err = 0; + + if (!host->is_crypto_en) + return 0; + + if (mmc->inlinecrypt_reset_needed && host->ops->crypto_engine_reset) { + err = host->ops->crypto_engine_reset(host); + if (err) { + pr_err("%s: crypto reset failed\n", + mmc_hostname(host->mmc)); + goto out; + } + mmc->inlinecrypt_reset_needed = false; + } + + if (host->ops->crypto_engine_cmdq_cfg) { + err = host->ops->crypto_engine_cmdq_cfg(host, mrq, + slot, ice_ctx); + if (err) { + pr_err("%s: failed to configure crypto\n", + mmc_hostname(host->mmc)); + goto out; + } + } +out: + return err; +} + +static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct sdhci_host *host = mmc_priv(mmc); + + if (!host->is_crypto_en) + return 0; + + return sdhci_crypto_cfg_end(host, mrq); +} + +static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot) +{ + struct sdhci_host *host = mmc_priv(mmc); + + if (!host->is_crypto_en) + return; + + if (host->ops->crypto_cfg_reset) + host->ops->crypto_cfg_reset(host, slot); +} #else static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc) { @@ -4074,6 +4184,23 @@ static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set) static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc) { +} + +static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc, + struct mmc_request *mrq, u32 slot, u64 *ice_ctx) +{ + return 0; +} + +static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + return 0; +} + +static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot) +{ + } #endif @@ -4086,6 +4213,9 @@ static const struct cmdq_host_ops sdhci_cmdq_ops = { .enhanced_strobe_mask = sdhci_enhanced_strobe_mask, .post_cqe_halt = sdhci_cmdq_post_cqe_halt, .set_transfer_params = sdhci_cmdq_set_transfer_params, + .crypto_cfg = sdhci_cmdq_crypto_cfg, + .crypto_cfg_end = sdhci_cmdq_crypto_cfg_end, + .crypto_cfg_reset = sdhci_cmdq_crypto_cfg_reset, }; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 47f05e16d685..f2cf328764d9 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -653,6 +653,7 @@ struct sdhci_host { enum sdhci_power_policy power_policy; bool sdio_irq_async_status; + bool is_crypto_en; u32 auto_cmd_err_sts; struct ratelimit_state dbg_dump_rs; @@ -694,6 +695,14 @@ struct sdhci_ops { unsigned int (*get_ro)(struct sdhci_host *host); void (*reset)(struct sdhci_host *host, u8 mask); int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode); + int (*crypto_engine_cfg)(struct sdhci_host *host, + struct mmc_request *mrq, u32 slot); + int (*crypto_engine_cmdq_cfg)(struct sdhci_host *host, + struct mmc_request *mrq, u32 slot, u64 *ice_ctx); + int (*crypto_engine_cfg_end)(struct sdhci_host *host, + struct mmc_request *mrq); + int (*crypto_engine_reset)(struct sdhci_host *host); + void (*crypto_cfg_reset)(struct sdhci_host *host, unsigned int slot); void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); void (*hw_reset)(struct sdhci_host *host); void (*adma_workaround)(struct sdhci_host *host, u32 intmask); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 2adab19052d0..e29a46abfd4f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2172,6 +2172,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) if (!shost->use_clustering) q->limits.cluster = 0; + if (shost->inlinecrypt_support) + queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q); /* * Set a reasonable default alignment: The larger of 32-byte (dword), * which is a common minimum for HBAs, and the minimum DMA alignment, diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 8d4ef369aa15..a3c906c78a69 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -101,6 +101,19 @@ config SCSI_UFS_QCOM Select this if you have UFS controller on QCOM chipset. If unsure, say N. +config SCSI_UFS_QCOM_ICE + bool "QCOM specific hooks to Inline Crypto Engine for UFS driver" + depends on SCSI_UFS_QCOM && CRYPTO_DEV_QCOM_ICE + help + This selects the QCOM specific additions to support Inline Crypto + Engine (ICE). + ICE accelerates the crypto operations and maintains the high UFS + performance. + + Select this if you have ICE supported for UFS on QCOM chipset. + If unsure, say N. + + config SCSI_UFS_TEST tristate "Universal Flash Storage host controller driver unit-tests" depends on SCSI_UFSHCD && IOSCHED_TEST diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index bf374ee1f6e2..935b34a2fa0b 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o +obj-$(CONFIG_SCSI_UFS_QCOM_ICE) += ufs-qcom-ice.o obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c new file mode 100644 index 000000000000..8bb1f54455d1 --- /dev/null +++ b/drivers/scsi/ufs/ufs-qcom-ice.c @@ -0,0 +1,777 @@ +/* + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "ufshcd.h" +#include "ufs-qcom-ice.h" +#include "ufs-qcom-debugfs.h" + +#define UFS_QCOM_CRYPTO_LABEL "ufs-qcom-crypto" +/* Timeout waiting for ICE initialization, that requires TZ access */ +#define UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS 500 + +#define UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN 0 + +static struct workqueue_struct *ice_workqueue; + +static void ufs_qcom_ice_dump_regs(struct ufs_qcom_host *qcom_host, int offset, + int len, char *prefix) +{ + print_hex_dump(KERN_ERR, prefix, + len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, + 16, 4, qcom_host->hba->mmio_base + offset, len * 4, + false); +} + +void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host) +{ + int i; + + if (!(qcom_host->dbg_print_en & UFS_QCOM_DBG_PRINT_ICE_REGS_EN)) + return; + + ufs_qcom_ice_dump_regs(qcom_host, REG_UFS_QCOM_ICE_CFG, 1, + "REG_UFS_QCOM_ICE_CFG "); + for (i = 0; i < NUM_QCOM_ICE_CTRL_INFO_n_REGS; i++) { + pr_err("REG_UFS_QCOM_ICE_CTRL_INFO_1_%d = 0x%08X\n", i, + ufshcd_readl(qcom_host->hba, + (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * i))); + + pr_err("REG_UFS_QCOM_ICE_CTRL_INFO_2_%d = 0x%08X\n", i, + ufshcd_readl(qcom_host->hba, + (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * i))); + } + + if (qcom_host->ice.pdev && qcom_host->ice.vops && + qcom_host->ice.vops->debug) + qcom_host->ice.vops->debug(qcom_host->ice.pdev); +} + +static void ufs_qcom_ice_error_cb(void *host_ctrl, u32 error) +{ + struct ufs_qcom_host *qcom_host = (struct ufs_qcom_host *)host_ctrl; + + dev_err(qcom_host->hba->dev, "%s: Error in ice operation 0x%x", + __func__, error); + + if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE) + qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED; +} + +static struct platform_device *ufs_qcom_ice_get_pdevice(struct device *ufs_dev) +{ + struct device_node *node; + struct platform_device *ice_pdev = NULL; + + node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0); + + if (!node) { + dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n", + __func__); + goto out; + } + + ice_pdev = qcom_ice_get_pdevice(node); +out: + return ice_pdev; +} + +static +struct qcom_ice_variant_ops *ufs_qcom_ice_get_vops(struct device *ufs_dev) +{ + struct qcom_ice_variant_ops *ice_vops = NULL; + struct device_node *node; + + node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0); + + if (!node) { + dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n", + __func__); + goto out; + } + + ice_vops = qcom_ice_get_variant_ops(node); + + if (!ice_vops) + dev_err(ufs_dev, "%s: invalid ice_vops\n", __func__); + + of_node_put(node); +out: + return ice_vops; +} + +/** + * ufs_qcom_ice_get_dev() - sets pointers to ICE data structs in UFS QCom host + * @qcom_host: Pointer to a UFS QCom internal host structure. + * + * Sets ICE platform device pointer and ICE vops structure + * corresponding to the current UFS device. + * + * Return: -EINVAL in-case of invalid input parameters: + * qcom_host, qcom_host->hba or qcom_host->hba->dev + * -ENODEV in-case ICE device is not required + * -EPROBE_DEFER in-case ICE is required and hasn't been probed yet + * 0 otherwise + */ +int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host) +{ + struct device *ufs_dev; + int err = 0; + + if (!qcom_host || !qcom_host->hba || !qcom_host->hba->dev) { + pr_err("%s: invalid qcom_host %p or qcom_host->hba or qcom_host->hba->dev\n", + __func__, qcom_host); + err = -EINVAL; + goto out; + } + + ufs_dev = qcom_host->hba->dev; + + qcom_host->ice.vops = ufs_qcom_ice_get_vops(ufs_dev); + qcom_host->ice.pdev = ufs_qcom_ice_get_pdevice(ufs_dev); + + if (qcom_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) { + dev_err(ufs_dev, "%s: ICE device not probed yet\n", + __func__); + qcom_host->ice.pdev = NULL; + qcom_host->ice.vops = NULL; + err = -EPROBE_DEFER; + goto out; + } + + if (!qcom_host->ice.pdev || !qcom_host->ice.vops) { + dev_err(ufs_dev, "%s: invalid platform device %p or vops %p\n", + __func__, qcom_host->ice.pdev, qcom_host->ice.vops); + qcom_host->ice.pdev = NULL; + qcom_host->ice.vops = NULL; + err = -ENODEV; + goto out; + } + + qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED; + +out: + return err; +} + +static void ufs_qcom_ice_cfg_work(struct work_struct *work) +{ + unsigned long flags; + struct ufs_qcom_host *qcom_host = + container_of(work, struct ufs_qcom_host, ice_cfg_work); + + if (!qcom_host->ice.vops->config_start) + return; + + spin_lock_irqsave(&qcom_host->ice_work_lock, flags); + if (!qcom_host->req_pending || + ufshcd_is_shutdown_ongoing(qcom_host->hba)) { + qcom_host->work_pending = false; + spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags); + return; + } + spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags); + + /* + * config_start is called again as previous attempt returned -EAGAIN, + * this call shall now take care of the necessary key setup. + */ + qcom_host->ice.vops->config_start(qcom_host->ice.pdev, + qcom_host->req_pending, NULL, false); + + spin_lock_irqsave(&qcom_host->ice_work_lock, flags); + qcom_host->req_pending = NULL; + qcom_host->work_pending = false; + spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags); +} + +/** + * ufs_qcom_ice_init() - initializes the ICE-UFS interface and ICE device + * @qcom_host: Pointer to a UFS QCom internal host structure. + * qcom_host, qcom_host->hba and qcom_host->hba->dev should all + * be valid pointers. + * + * Return: -EINVAL in-case of an error + * 0 otherwise + */ +int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host) +{ + struct device *ufs_dev = qcom_host->hba->dev; + int err; + + err = qcom_host->ice.vops->init(qcom_host->ice.pdev, + qcom_host, + ufs_qcom_ice_error_cb); + if (err) { + dev_err(ufs_dev, "%s: ice init failed. err = %d\n", + __func__, err); + goto out; + } else { + qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE; + } + + qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN; + if (!ice_workqueue) { + ice_workqueue = alloc_workqueue("ice-set-key", + WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0); + if (!ice_workqueue) { + dev_err(ufs_dev, "%s: workqueue allocation failed.\n", + __func__); + err = -ENOMEM; + goto out; + } + INIT_WORK(&qcom_host->ice_cfg_work, ufs_qcom_ice_cfg_work); + } + +out: + return err; +} + +static inline bool ufs_qcom_is_data_cmd(char cmd_op, bool is_write) +{ + if (is_write) { + if (cmd_op == WRITE_6 || cmd_op == WRITE_10 || + cmd_op == WRITE_16) + return true; + } else { + if (cmd_op == READ_6 || cmd_op == READ_10 || + cmd_op == READ_16) + return true; + } + + return false; +} + +int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host, + struct scsi_cmnd *cmd, u8 *cc_index, bool *enable) +{ + struct ice_data_setting ice_set; + char cmd_op = cmd->cmnd[0]; + int err; + unsigned long flags; + + if (!qcom_host->ice.pdev || !qcom_host->ice.vops) { + dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n", + __func__); + return 0; + } + + if (qcom_host->ice.vops->config_start) { + memset(&ice_set, 0, sizeof(ice_set)); + + spin_lock_irqsave( + &qcom_host->ice_work_lock, flags); + + err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev, + cmd->request, &ice_set, true); + if (err) { + /* + * config_start() returns -EAGAIN when a key slot is + * available but still not configured. As configuration + * requires a non-atomic context, this means we should + * call the function again from the worker thread to do + * the configuration. For this request the error will + * propagate so it will be re-queued. + */ + if (err == -EAGAIN) { + if (!ice_workqueue) { + spin_unlock_irqrestore( + &qcom_host->ice_work_lock, + flags); + + dev_err(qcom_host->hba->dev, + "%s: error %d workqueue NULL\n", + __func__, err); + return -EINVAL; + } + + dev_dbg(qcom_host->hba->dev, + "%s: scheduling task for ice setup\n", + __func__); + + if (!qcom_host->work_pending) { + qcom_host->req_pending = cmd->request; + + if (!queue_work(ice_workqueue, + &qcom_host->ice_cfg_work)) { + qcom_host->req_pending = NULL; + + spin_unlock_irqrestore( + &qcom_host->ice_work_lock, + flags); + + return err; + } + qcom_host->work_pending = true; + } + + } else { + if (err != -EBUSY) + dev_err(qcom_host->hba->dev, + "%s: error in ice_vops->config %d\n", + __func__, err); + } + + spin_unlock_irqrestore(&qcom_host->ice_work_lock, + flags); + + return err; + } + + spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags); + + if (ufs_qcom_is_data_cmd(cmd_op, true)) + *enable = !ice_set.encr_bypass; + else if (ufs_qcom_is_data_cmd(cmd_op, false)) + *enable = !ice_set.decr_bypass; + + if (ice_set.crypto_data.key_index >= 0) + *cc_index = (u8)ice_set.crypto_data.key_index; + } + return 0; +} + +/** + * ufs_qcom_ice_cfg_start() - starts configuring UFS's ICE registers + * for an ICE transaction + * @qcom_host: Pointer to a UFS QCom internal host structure. + * qcom_host, qcom_host->hba and qcom_host->hba->dev should all + * be valid pointers. + * @cmd: Pointer to a valid scsi command. cmd->request should also be + * a valid pointer. + * + * Return: -EINVAL in-case of an error + * 0 otherwise + */ +int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host, + struct scsi_cmnd *cmd) +{ + struct device *dev = qcom_host->hba->dev; + int err = 0; + struct ice_data_setting ice_set; + unsigned int slot = 0; + sector_t lba = 0; + unsigned int ctrl_info_val = 0; + unsigned int bypass = 0; + struct request *req; + char cmd_op; + unsigned long flags; + + if (!qcom_host->ice.pdev || !qcom_host->ice.vops) { + dev_dbg(dev, "%s: ice device is not enabled\n", __func__); + goto out; + } + + if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) { + dev_err(dev, "%s: ice state (%d) is not active\n", + __func__, qcom_host->ice.state); + return -EINVAL; + } + + if (qcom_host->hw_ver.major >= 0x3) { + /* + * ICE 3.0 crypto sequences were changed, + * CTRL_INFO register no longer exists + * and doesn't need to be configured. + * The configuration is done via utrd. + */ + return 0; + } + + req = cmd->request; + if (req->bio) + lba = (req->bio->bi_iter.bi_sector) >> + UFS_QCOM_ICE_TR_DATA_UNIT_4_KB; + + slot = req->tag; + if (slot < 0 || slot > qcom_host->hba->nutrs) { + dev_err(dev, "%s: slot (%d) is out of boundaries (0...%d)\n", + __func__, slot, qcom_host->hba->nutrs); + return -EINVAL; + } + + + memset(&ice_set, 0, sizeof(ice_set)); + if (qcom_host->ice.vops->config_start) { + + spin_lock_irqsave( + &qcom_host->ice_work_lock, flags); + + err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev, + req, &ice_set, true); + if (err) { + /* + * config_start() returns -EAGAIN when a key slot is + * available but still not configured. As configuration + * requires a non-atomic context, this means we should + * call the function again from the worker thread to do + * the configuration. For this request the error will + * propagate so it will be re-queued. + */ + if (err == -EAGAIN) { + if (!ice_workqueue) { + spin_unlock_irqrestore( + &qcom_host->ice_work_lock, + flags); + + dev_err(qcom_host->hba->dev, + "%s: error %d workqueue NULL\n", + __func__, err); + return -EINVAL; + } + + dev_dbg(qcom_host->hba->dev, + "%s: scheduling task for ice setup\n", + __func__); + + if (!qcom_host->work_pending) { + + qcom_host->req_pending = cmd->request; + if (!queue_work(ice_workqueue, + &qcom_host->ice_cfg_work)) { + qcom_host->req_pending = NULL; + + spin_unlock_irqrestore( + &qcom_host->ice_work_lock, + flags); + + return err; + } + qcom_host->work_pending = true; + } + + } else { + if (err != -EBUSY) + dev_err(qcom_host->hba->dev, + "%s: error in ice_vops->config %d\n", + __func__, err); + } + + spin_unlock_irqrestore( + &qcom_host->ice_work_lock, flags); + + return err; + } + + spin_unlock_irqrestore( + &qcom_host->ice_work_lock, flags); + } + + cmd_op = cmd->cmnd[0]; + +#define UFS_QCOM_DIR_WRITE true +#define UFS_QCOM_DIR_READ false + /* if non data command, bypass shall be enabled */ + if (!ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE) && + !ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ)) + bypass = UFS_QCOM_ICE_ENABLE_BYPASS; + /* if writing data command */ + else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE)) + bypass = ice_set.encr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS : + UFS_QCOM_ICE_DISABLE_BYPASS; + /* if reading data command */ + else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ)) + bypass = ice_set.decr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS : + UFS_QCOM_ICE_DISABLE_BYPASS; + + + /* Configure ICE index */ + ctrl_info_val = + (ice_set.crypto_data.key_index & + MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX) + << OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX; + + /* Configure data unit size of transfer request */ + ctrl_info_val |= + UFS_QCOM_ICE_TR_DATA_UNIT_4_KB + << OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU; + + /* Configure ICE bypass mode */ + ctrl_info_val |= + (bypass & MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS) + << OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS; + + if (qcom_host->hw_ver.major == 0x1) { + ufshcd_writel(qcom_host->hba, lba, + (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * slot)); + + ufshcd_writel(qcom_host->hba, ctrl_info_val, + (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * slot)); + } + if (qcom_host->hw_ver.major == 0x2) { + ufshcd_writel(qcom_host->hba, (lba & 0xFFFFFFFF), + (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 16 * slot)); + + ufshcd_writel(qcom_host->hba, ((lba >> 32) & 0xFFFFFFFF), + (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 16 * slot)); + + ufshcd_writel(qcom_host->hba, ctrl_info_val, + (REG_UFS_QCOM_ICE_CTRL_INFO_3_n + 16 * slot)); + } + + /* + * Ensure UFS-ICE registers are being configured + * before next operation, otherwise UFS Host Controller might + * set get errors + */ + mb(); +out: + return err; +} + +/** + * ufs_qcom_ice_cfg_end() - finishes configuring UFS's ICE registers + * for an ICE transaction + * @qcom_host: Pointer to a UFS QCom internal host structure. + * qcom_host, qcom_host->hba and + * qcom_host->hba->dev should all + * be valid pointers. + * @cmd: Pointer to a valid scsi command. cmd->request should also be + * a valid pointer. + * + * Return: -EINVAL in-case of an error + * 0 otherwise + */ +int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, struct request *req) +{ + int err = 0; + struct device *dev = qcom_host->hba->dev; + + if (qcom_host->ice.vops->config_end) { + err = qcom_host->ice.vops->config_end(req); + if (err) { + dev_err(dev, "%s: error in ice_vops->config_end %d\n", + __func__, err); + return err; + } + } + + return 0; +} + +/** + * ufs_qcom_ice_reset() - resets UFS-ICE interface and ICE device + * @qcom_host: Pointer to a UFS QCom internal host structure. + * qcom_host, qcom_host->hba and qcom_host->hba->dev should all + * be valid pointers. + * + * Return: -EINVAL in-case of an error + * 0 otherwise + */ +int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host) +{ + struct device *dev = qcom_host->hba->dev; + int err = 0; + + if (!qcom_host->ice.pdev) { + dev_dbg(dev, "%s: ice device is not enabled\n", __func__); + goto out; + } + + if (!qcom_host->ice.vops) { + dev_err(dev, "%s: invalid ice_vops\n", __func__); + return -EINVAL; + } + + if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) + goto out; + + if (qcom_host->ice.vops->reset) { + err = qcom_host->ice.vops->reset(qcom_host->ice.pdev); + if (err) { + dev_err(dev, "%s: ice_vops->reset failed. err %d\n", + __func__, err); + goto out; + } + } + + if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) { + dev_err(qcom_host->hba->dev, + "%s: error. ice.state (%d) is not in active state\n", + __func__, qcom_host->ice.state); + err = -EINVAL; + } + +out: + return err; +} + + +/** + * ufs_qcom_ice_resume() - resumes UFS-ICE interface and ICE device from power + * collapse + * @qcom_host: Pointer to a UFS QCom internal host structure. + * qcom_host, qcom_host->hba and qcom_host->hba->dev should all + * be valid pointers. + * + * Return: -EINVAL in-case of an error + * 0 otherwise + */ +int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host) +{ + struct device *dev = qcom_host->hba->dev; + int err = 0; + + if (!qcom_host->ice.pdev) { + dev_dbg(dev, "%s: ice device is not enabled\n", __func__); + goto out; + } + + if (qcom_host->ice.state != + UFS_QCOM_ICE_STATE_SUSPENDED) { + goto out; + } + + if (!qcom_host->ice.vops) { + dev_err(dev, "%s: invalid ice_vops\n", __func__); + return -EINVAL; + } + + if (qcom_host->ice.vops->resume) { + err = qcom_host->ice.vops->resume(qcom_host->ice.pdev); + if (err) { + dev_err(dev, "%s: ice_vops->resume failed. err %d\n", + __func__, err); + return err; + } + } + qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE; +out: + return err; +} + +/** + * ufs_qcom_is_ice_busy() - lets the caller of the function know if + * there is any ongoing operation in ICE in workqueue context. + * @qcom_host: Pointer to a UFS QCom internal host structure. + * qcom_host should be a valid pointer. + * + * Return: 1 if ICE is busy, 0 if it is free. + * -EINVAL in case of error. + */ +int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host) +{ + if (!qcom_host) { + pr_err("%s: invalid qcom_host %pK", __func__, qcom_host); + return -EINVAL; + } + + if (qcom_host->req_pending) + return 1; + else + return 0; +} + +/** + * ufs_qcom_ice_suspend() - suspends UFS-ICE interface and ICE device + * @qcom_host: Pointer to a UFS QCom internal host structure. + * qcom_host, qcom_host->hba and qcom_host->hba->dev should all + * be valid pointers. + * + * Return: -EINVAL in-case of an error + * 0 otherwise + */ +int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host) +{ + struct device *dev = qcom_host->hba->dev; + int err = 0; + + if (!qcom_host->ice.pdev) { + dev_dbg(dev, "%s: ice device is not enabled\n", __func__); + goto out; + } + + if (qcom_host->ice.vops->suspend) { + err = qcom_host->ice.vops->suspend(qcom_host->ice.pdev); + if (err) { + dev_err(qcom_host->hba->dev, + "%s: ice_vops->suspend failed. err %d\n", + __func__, err); + return -EINVAL; + } + } + + if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE) { + qcom_host->ice.state = UFS_QCOM_ICE_STATE_SUSPENDED; + } else if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_DISABLED) { + dev_err(qcom_host->hba->dev, + "%s: ice state is invalid: disabled\n", + __func__); + err = -EINVAL; + } + +out: + return err; +} + +/** + * ufs_qcom_ice_get_status() - returns the status of an ICE transaction + * @qcom_host: Pointer to a UFS QCom internal host structure. + * qcom_host, qcom_host->hba and qcom_host->hba->dev should all + * be valid pointers. + * @ice_status: Pointer to a valid output parameter. + * < 0 in case of ICE transaction failure. + * 0 otherwise. + * + * Return: -EINVAL in-case of an error + * 0 otherwise + */ +int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status) +{ + struct device *dev = NULL; + int err = 0; + int stat = -EINVAL; + + *ice_status = 0; + + dev = qcom_host->hba->dev; + if (!dev) { + err = -EINVAL; + goto out; + } + + if (!qcom_host->ice.pdev) { + dev_dbg(dev, "%s: ice device is not enabled\n", __func__); + goto out; + } + + if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) { + err = -EINVAL; + goto out; + } + + if (!qcom_host->ice.vops) { + dev_err(dev, "%s: invalid ice_vops\n", __func__); + return -EINVAL; + } + + if (qcom_host->ice.vops->status) { + stat = qcom_host->ice.vops->status(qcom_host->ice.pdev); + if (stat < 0) { + dev_err(dev, "%s: ice_vops->status failed. stat %d\n", + __func__, stat); + err = -EINVAL; + goto out; + } + + *ice_status = stat; + } + +out: + return err; +} diff --git a/drivers/scsi/ufs/ufs-qcom-ice.h b/drivers/scsi/ufs/ufs-qcom-ice.h new file mode 100644 index 000000000000..88ffeb35f9f3 --- /dev/null +++ b/drivers/scsi/ufs/ufs-qcom-ice.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UFS_QCOM_ICE_H_ +#define _UFS_QCOM_ICE_H_ + +#include + +#include "ufs-qcom.h" + +/* + * UFS host controller ICE registers. There are n [0..31] + * of each of these registers + */ +enum { + REG_UFS_QCOM_ICE_CFG = 0x2200, + REG_UFS_QCOM_ICE_CTRL_INFO_1_n = 0x2204, + REG_UFS_QCOM_ICE_CTRL_INFO_2_n = 0x2208, + REG_UFS_QCOM_ICE_CTRL_INFO_3_n = 0x220C, +}; +#define NUM_QCOM_ICE_CTRL_INFO_n_REGS 32 + +/* UFS QCOM ICE CTRL Info register offset */ +enum { + OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS = 0, + OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX = 0x1, + OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU = 0x6, +}; + +/* UFS QCOM ICE CTRL Info register masks */ +enum { + MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS = 0x1, + MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX = 0x1F, + MASK_UFS_QCOM_ICE_CTRL_INFO_CDU = 0x8, +}; + +/* UFS QCOM ICE encryption/decryption bypass state */ +enum { + UFS_QCOM_ICE_DISABLE_BYPASS = 0, + UFS_QCOM_ICE_ENABLE_BYPASS = 1, +}; + +/* UFS QCOM ICE Crypto Data Unit of target DUN of Transfer Request */ +enum { + UFS_QCOM_ICE_TR_DATA_UNIT_512_B = 0, + UFS_QCOM_ICE_TR_DATA_UNIT_1_KB = 1, + UFS_QCOM_ICE_TR_DATA_UNIT_2_KB = 2, + UFS_QCOM_ICE_TR_DATA_UNIT_4_KB = 3, + UFS_QCOM_ICE_TR_DATA_UNIT_8_KB = 4, + UFS_QCOM_ICE_TR_DATA_UNIT_16_KB = 5, + UFS_QCOM_ICE_TR_DATA_UNIT_32_KB = 6, +}; + +/* UFS QCOM ICE internal state */ +enum { + UFS_QCOM_ICE_STATE_DISABLED = 0, + UFS_QCOM_ICE_STATE_ACTIVE = 1, + UFS_QCOM_ICE_STATE_SUSPENDED = 2, +}; + +#ifdef CONFIG_SCSI_UFS_QCOM_ICE +int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host); +int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host); +int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host, + struct scsi_cmnd *cmd, u8 *cc_index, bool *enable); +int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host, + struct scsi_cmnd *cmd); +int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, + struct request *req); +int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host); +int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host); +int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host); +int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status); +void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host); +int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host); +#else +inline int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host) +{ + if (qcom_host) { + qcom_host->ice.pdev = NULL; + qcom_host->ice.vops = NULL; + } + return -ENODEV; +} +inline int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host) +{ + return 0; +} +inline int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host, + struct scsi_cmnd *cmd) +{ + return 0; +} +inline int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, + struct request *req) +{ + return 0; +} +inline int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host) +{ + return 0; +} +inline int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host) +{ + return 0; +} +inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host) +{ + return 0; +} +inline int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, + int *ice_status) +{ + return 0; +} +inline void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host) +{ + return; +} +inline int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host) +{ + return 0; +} +#endif /* CONFIG_SCSI_UFS_QCOM_ICE */ + +#endif /* UFS_QCOM_ICE_H_ */ diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index ff66f7c5893a..195e0428cb54 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020, Linux Foundation. All rights reserved. + * Copyright (c) 2013-2019, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -28,6 +28,7 @@ #include "unipro.h" #include "ufs-qcom.h" #include "ufshci.h" +#include "ufs-qcom-ice.h" #include "ufs-qcom-debugfs.h" #include "ufs_quirks.h" @@ -405,6 +406,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, * is initialized. */ err = ufs_qcom_enable_lane_clks(host); + if (!err && host->ice.pdev) { + err = ufs_qcom_ice_init(host); + if (err) { + dev_err(hba->dev, "%s: ICE init failed (%d)\n", + __func__, err); + err = -EINVAL; + } + } break; case POST_CHANGE: @@ -840,6 +849,7 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufs_qcom_config_vreg(hba->dev, host->vccq_parent, false); + ufs_qcom_ice_suspend(host); if (ufs_qcom_is_link_off(hba)) { /* Assert PHY soft reset */ ufs_qcom_assert_reset(hba); @@ -879,6 +889,13 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (err) goto out; + err = ufs_qcom_ice_resume(host); + if (err) { + dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n", + __func__, err); + goto out; + } + hba->is_sys_suspended = false; out: @@ -918,6 +935,119 @@ out: return ret; } +#ifdef CONFIG_SCSI_UFS_QCOM_ICE +static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct request *req; + int ret; + + if (lrbp->cmd && lrbp->cmd->request) + req = lrbp->cmd->request; + else + return 0; + + /* Use request LBA or given dun as the DUN value */ + if (req->bio) { +#ifdef CONFIG_PFK + if (bio_dun(req->bio)) { + /* dun @bio can be split, so we have to adjust offset */ + *dun = bio_dun(req->bio); + } else { + *dun = req->bio->bi_iter.bi_sector; + *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB; + } +#else + *dun = req->bio->bi_iter.bi_sector; + *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB; +#endif + } + ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable); + + return ret; +} + +static +int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; + int err = 0; + + if (!host->ice.pdev || + !lrbp->cmd || + (lrbp->command_type != UTP_CMD_TYPE_SCSI && + lrbp->command_type != UTP_CMD_TYPE_UFS_STORAGE)) + goto out; + + err = ufs_qcom_ice_cfg_start(host, lrbp->cmd); +out: + return err; +} + +static +int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, struct request *req) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err = 0; + + if (!host->ice.pdev || (lrbp->command_type != UTP_CMD_TYPE_SCSI && + lrbp->command_type != UTP_CMD_TYPE_UFS_STORAGE)) + goto out; + + err = ufs_qcom_ice_cfg_end(host, req); +out: + return err; +} + +static +int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err = 0; + + if (!host->ice.pdev) + goto out; + + err = ufs_qcom_ice_reset(host); +out: + return err; +} + +static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!status) + return -EINVAL; + + return ufs_qcom_ice_get_status(host, status); +} + +static int ufs_qcom_crypto_get_pending_req_status(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err = 0; + + if (!host->ice.pdev) + goto out; + + err = ufs_qcom_is_ice_busy(host); +out: + return err; +} + +#else /* !CONFIG_SCSI_UFS_QCOM_ICE */ +#define ufs_qcom_crypto_req_setup NULL +#define ufs_qcom_crytpo_engine_cfg_start NULL +#define ufs_qcom_crytpo_engine_cfg_end NULL +#define ufs_qcom_crytpo_engine_reset NULL +#define ufs_qcom_crypto_engine_get_status NULL +#define ufs_qcom_crypto_get_pending_req_status NULL +#endif /* CONFIG_SCSI_UFS_QCOM_ICE */ + struct ufs_qcom_dev_params { u32 pwm_rx_gear; /* pwm rx gear to work in */ u32 pwm_tx_gear; /* pwm tx gear to work in */ @@ -1499,6 +1629,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err = 0; /* * In case ufs_qcom_init() is not yet done, simply ignore. @@ -1517,7 +1648,14 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, if (ufshcd_is_hs_mode(&hba->pwr_info)) ufs_qcom_dev_ref_clk_ctrl(host, true); + err = ufs_qcom_ice_resume(host); + if (err) + goto out; } else if (!on && (status == PRE_CHANGE)) { + err = ufs_qcom_ice_suspend(host); + if (err) + goto out; + /* * If auto hibern8 is supported then the link will already * be in hibern8 state and the ref clock can be gated. @@ -1536,7 +1674,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, } } - return 0; +out: + return err; } #ifdef CONFIG_SMP /* CONFIG_SMP */ @@ -2070,9 +2209,36 @@ static int ufs_qcom_init(struct ufs_hba *hba) /* Make a two way bind between the qcom host and the hba */ host->hba = hba; + spin_lock_init(&host->ice_work_lock); ufshcd_set_variant(hba, host); + err = ufs_qcom_ice_get_dev(host); + if (err == -EPROBE_DEFER) { + /* + * UFS driver might be probed before ICE driver does. + * In that case we would like to return EPROBE_DEFER code + * in order to delay its probing. + */ + dev_err(dev, "%s: required ICE device not probed yet err = %d\n", + __func__, err); + goto out_variant_clear; + + } else if (err == -ENODEV) { + /* + * ICE device is not enabled in DTS file. No need for further + * initialization of ICE driver. + */ + dev_warn(dev, "%s: ICE device is not enabled", + __func__); + } else if (err) { + dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n", + __func__, err); + goto out_variant_clear; + } else { + hba->host->inlinecrypt_support = 1; + } + host->generic_phy = devm_phy_get(dev, "ufsphy"); if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) { @@ -2646,6 +2812,7 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep) usleep_range(1000, 1100); ufs_qcom_phy_dbg_register_dump(phy); usleep_range(1000, 1100); + ufs_qcom_ice_print_regs(host); } /** @@ -2676,6 +2843,15 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { #endif }; +static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = { + .crypto_req_setup = ufs_qcom_crypto_req_setup, + .crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start, + .crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end, + .crypto_engine_reset = ufs_qcom_crytpo_engine_reset, + .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status, + .crypto_get_req_status = ufs_qcom_crypto_get_pending_req_status, +}; + static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = { .req_start = ufs_qcom_pm_qos_req_start, .req_end = ufs_qcom_pm_qos_req_end, @@ -2684,6 +2860,7 @@ static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = { static struct ufs_hba_variant ufs_hba_qcom_variant = { .name = "qcom", .vops = &ufs_hba_qcom_vops, + .crypto_vops = &ufs_hba_crypto_variant_ops, .pm_qos_vops = &ufs_hba_pm_qos_variant_ops, }; diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 34140f91437d..95304a581453 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -236,6 +236,26 @@ struct ufs_qcom_testbus { u8 select_minor; }; +/** + * struct ufs_qcom_ice_data - ICE related information + * @vops: pointer to variant operations of ICE + * @async_done: completion for supporting ICE's driver asynchronous nature + * @pdev: pointer to the proper ICE platform device + * @state: UFS-ICE interface's internal state (see + * ufs-qcom-ice.h for possible internal states) + * @quirks: UFS-ICE interface related quirks + * @crypto_engine_err: crypto engine errors + */ +struct ufs_qcom_ice_data { + struct qcom_ice_variant_ops *vops; + struct platform_device *pdev; + int state; + + u16 quirks; + + bool crypto_engine_err; +}; + #ifdef CONFIG_DEBUG_FS struct qcom_debugfs_files { struct dentry *debugfs_root; @@ -343,6 +363,7 @@ struct ufs_qcom_host { bool disable_lpm; bool is_lane_clks_enabled; bool sec_cfg_updated; + struct ufs_qcom_ice_data ice; void __iomem *dev_ref_clk_ctrl_mmio; bool is_dev_ref_clk_enabled; @@ -357,6 +378,8 @@ struct ufs_qcom_host { u32 dbg_print_en; struct ufs_qcom_testbus testbus; + spinlock_t ice_work_lock; + struct work_struct ice_cfg_work; struct request *req_pending; struct ufs_vreg *vddp_ref_clk; struct ufs_vreg *vccq_parent; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index d125c70bfe72..f1896b29b3ac 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1409,6 +1409,8 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba) { u32 val = CONTROLLER_ENABLE; + if (ufshcd_is_crypto_supported(hba)) + val |= CRYPTO_GENERAL_ENABLE; ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); } @@ -3358,6 +3360,41 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); } +static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; + u8 cc_index = 0; + bool enable = false; + u64 dun = 0; + int ret; + + /* + * Call vendor specific code to get crypto info for this request: + * enable, crypto config. index, DUN. + * If bypass is set, don't bother setting the other fields. + */ + ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun); + if (ret) { + if (ret != -EAGAIN) { + dev_err(hba->dev, + "%s: failed to setup crypto request (%d)\n", + __func__, ret); + } + + return ret; + } + + if (!enable) + goto out; + + req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE; + req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF); + req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF); +out: + return 0; +} + /** * ufshcd_prepare_req_desc_hdr() - Fills the requests header * descriptor according to request @@ -3406,6 +3443,9 @@ static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, req_desc->prd_table_length = 0; + if (ufshcd_is_crypto_supported(hba)) + return ufshcd_prepare_crypto_utrd(hba, lrbp); + return 0; } @@ -3669,7 +3709,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) err = ufshcd_get_read_lock(hba, cmd->device->lun); if (unlikely(err < 0)) { if (err == -EPERM) { - return SCSI_MLQUEUE_HOST_BUSY; + if (!ufshcd_vops_crypto_engine_get_req_status(hba)) { + set_host_byte(cmd, DID_ERROR); + cmd->scsi_done(cmd); + return 0; + } else { + return SCSI_MLQUEUE_HOST_BUSY; + } } if (err == -EAGAIN) return SCSI_MLQUEUE_HOST_BUSY; @@ -3805,6 +3851,22 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } + err = ufshcd_vops_crypto_engine_cfg_start(hba, tag); + if (err) { + if (err != -EAGAIN) + dev_err(hba->dev, + "%s: failed to configure crypto engine %d\n", + __func__, err); + + scsi_dma_unmap(lrbp->cmd); + lrbp->cmd = NULL; + clear_bit_unlock(tag, &hba->lrb_in_use); + ufshcd_release_all(hba); + ufshcd_vops_pm_qos_req_end(hba, cmd->request, true); + + goto out; + } + /* Make sure descriptors are ready before ringing the doorbell */ wmb(); @@ -3820,6 +3882,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) clear_bit_unlock(tag, &hba->lrb_in_use); ufshcd_release_all(hba); ufshcd_vops_pm_qos_req_end(hba, cmd->request, true); + ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request); dev_err(hba->dev, "%s: failed sending command, %d\n", __func__, err); err = DID_ERROR; @@ -6389,6 +6452,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, */ ufshcd_vops_pm_qos_req_end(hba, cmd->request, false); + ufshcd_vops_crypto_engine_cfg_end(hba, + lrbp, cmd->request); } req = cmd->request; @@ -6471,6 +6536,8 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result) */ ufshcd_vops_pm_qos_req_end(hba, cmd->request, true); + ufshcd_vops_crypto_engine_cfg_end(hba, + lrbp, cmd->request); } /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); @@ -7407,6 +7474,8 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_INTR, intr_status, &intr_status); + ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error); + hba->errors = UFSHCD_ERROR_MASK & intr_status; if (hba->errors || hba->ce_error) retval |= ufshcd_check_errors(hba); @@ -7883,6 +7952,16 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) goto out; } + if (!err) { + err = ufshcd_vops_crypto_engine_reset(hba); + if (err) { + dev_err(hba->dev, + "%s: failed to reset crypto engine %d\n", + __func__, err); + goto out; + } + } + out: if (err) dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index a51cc94ad603..e94b04baa3ef 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -369,6 +369,30 @@ struct ufs_hba_variant_ops { #endif }; +/** + * struct ufs_hba_crypto_variant_ops - variant specific crypto callbacks + * @crypto_req_setup: retreieve the necessary cryptographic arguments to setup + a requests's transfer descriptor. + * @crypto_engine_cfg_start: start configuring cryptographic engine + * according to tag + * parameter + * @crypto_engine_cfg_end: end configuring cryptographic engine + * according to tag parameter + * @crypto_engine_reset: perform reset to the cryptographic engine + * @crypto_engine_get_status: get errors status of the cryptographic engine + * @crypto_get_req_status: Check if crypto driver still holds request or not + */ +struct ufs_hba_crypto_variant_ops { + int (*crypto_req_setup)(struct ufs_hba *, struct ufshcd_lrb *lrbp, + u8 *cc_index, bool *enable, u64 *dun); + int (*crypto_engine_cfg_start)(struct ufs_hba *, unsigned int); + int (*crypto_engine_cfg_end)(struct ufs_hba *, struct ufshcd_lrb *, + struct request *); + int (*crypto_engine_reset)(struct ufs_hba *); + int (*crypto_engine_get_status)(struct ufs_hba *, u32 *); + int (*crypto_get_req_status)(struct ufs_hba *); +}; + /** * struct ufs_hba_pm_qos_variant_ops - variant specific PM QoS callbacks */ @@ -385,6 +409,7 @@ struct ufs_hba_variant { struct device *dev; const char *name; struct ufs_hba_variant_ops *vops; + struct ufs_hba_crypto_variant_ops *crypto_vops; struct ufs_hba_pm_qos_variant_ops *pm_qos_vops; }; @@ -1476,6 +1501,55 @@ static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba) } #endif +static inline int ufshcd_vops_crypto_req_setup(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun) +{ + if (hba->var && hba->var->crypto_vops && + hba->var->crypto_vops->crypto_req_setup) + return hba->var->crypto_vops->crypto_req_setup(hba, lrbp, + cc_index, enable, dun); + return 0; +} + +static inline int ufshcd_vops_crypto_engine_cfg_start(struct ufs_hba *hba, + unsigned int task_tag) +{ + if (hba->var && hba->var->crypto_vops && + hba->var->crypto_vops->crypto_engine_cfg_start) + return hba->var->crypto_vops->crypto_engine_cfg_start + (hba, task_tag); + return 0; +} + +static inline int ufshcd_vops_crypto_engine_cfg_end(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, + struct request *req) +{ + if (hba->var && hba->var->crypto_vops && + hba->var->crypto_vops->crypto_engine_cfg_end) + return hba->var->crypto_vops->crypto_engine_cfg_end + (hba, lrbp, req); + return 0; +} + +static inline int ufshcd_vops_crypto_engine_reset(struct ufs_hba *hba) +{ + if (hba->var && hba->var->crypto_vops && + hba->var->crypto_vops->crypto_engine_reset) + return hba->var->crypto_vops->crypto_engine_reset(hba); + return 0; +} + +static inline int ufshcd_vops_crypto_engine_get_status(struct ufs_hba *hba, + u32 *status) +{ + if (hba->var && hba->var->crypto_vops && + hba->var->crypto_vops->crypto_engine_get_status) + return hba->var->crypto_vops->crypto_engine_get_status(hba, + status); + return 0; +} + static inline void ufshcd_vops_pm_qos_req_start(struct ufs_hba *hba, struct request *req) { @@ -1491,4 +1565,13 @@ static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba, hba->var->pm_qos_vops->req_end(hba, req, lock); } +static inline int ufshcd_vops_crypto_engine_get_req_status(struct ufs_hba *hba) + +{ + if (hba->var && hba->var->crypto_vops && + hba->var->crypto_vops->crypto_get_req_status) + return hba->var->crypto_vops->crypto_get_req_status(hba); + return 0; +} + #endif /* End of Header */ diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile index 0a78543f6cec..f36e47a10455 100644 --- a/fs/crypto/Makefile +++ b/fs/crypto/Makefile @@ -1,11 +1,15 @@ obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o +ccflags-y += -Ifs/ext4 +ccflags-y += -Ifs/f2fs + fscrypto-y := crypto.o \ fname.o \ hkdf.o \ hooks.o \ keyring.o \ keysetup.o \ + fscrypt_ice.o \ keysetup_v1.o \ policy.o diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 699bb4d426f2..b0033880d8c9 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -33,10 +33,14 @@ void fscrypt_decrypt_bio(struct bio *bio) bio_for_each_segment_all(bv, bio, i) { struct page *page = bv->bv_page; - int ret = fscrypt_decrypt_pagecache_blocks(page, - bv->bv_len, bv->bv_offset); - if (ret) - SetPageError(page); + if (fscrypt_using_hardware_encryption(page->mapping->host)) { + SetPageUptodate(page); + } else { + int ret = fscrypt_decrypt_pagecache_blocks(page, + bv->bv_len, bv->bv_offset); + if (ret) + SetPageError(page); + } } } EXPORT_SYMBOL(fscrypt_decrypt_bio); @@ -68,7 +72,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, } bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - 9); - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); + bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_NOENCRYPT); ret = bio_add_page(bio, ciphertext_page, blocksize, 0); if (WARN_ON(ret != blocksize)) { /* should never happen! */ diff --git a/fs/crypto/fscrypt_ice.c b/fs/crypto/fscrypt_ice.c new file mode 100644 index 000000000000..c5b6bdf3b3eb --- /dev/null +++ b/fs/crypto/fscrypt_ice.c @@ -0,0 +1,190 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "fscrypt_ice.h" + +extern int fscrypt_get_mode_key_size(int mode); + +int fscrypt_using_hardware_encryption(const struct inode *inode) +{ + struct fscrypt_info *ci = inode->i_crypt_info; + + return S_ISREG(inode->i_mode) && ci && + (fscrypt_policy_contents_mode(&(ci->ci_policy)) == FSCRYPT_MODE_PRIVATE); +} +EXPORT_SYMBOL(fscrypt_using_hardware_encryption); + +size_t fscrypt_get_ice_encryption_key_size(const struct inode *inode) +{ + struct fscrypt_info *ci = NULL; + + if (inode) + ci = inode->i_crypt_info; + if (!ci) + return 0; + + return fscrypt_get_mode_key_size(fscrypt_policy_contents_mode(&(ci->ci_policy))) / 2; +} + +size_t fscrypt_get_ice_encryption_salt_size(const struct inode *inode) +{ + struct fscrypt_info *ci = NULL; + + if (inode) + ci = inode->i_crypt_info; + if (!ci) + return 0; + + return fscrypt_get_mode_key_size(fscrypt_policy_contents_mode(&(ci->ci_policy))) / 2; +} + +/* + * Retrieves encryption key from the inode + */ +char *fscrypt_get_ice_encryption_key(const struct inode *inode) +{ + struct fscrypt_info *ci = NULL; + + if (!inode) + return NULL; + + ci = inode->i_crypt_info; + if (!ci) + return NULL; + + return &(ci->ci_raw_key[0]); +} + +/* + * Retrieves encryption salt from the inode + */ +char *fscrypt_get_ice_encryption_salt(const struct inode *inode) +{ + struct fscrypt_info *ci = NULL; + int size = 0; + + if (!inode) + return NULL; + + ci = inode->i_crypt_info; + if (!ci) + return NULL; + + size = fscrypt_get_ice_encryption_key_size(inode); + if (!size) + return NULL; + + return &(ci->ci_raw_key[size]); +} + +/* + * returns true if the cipher mode in inode is AES XTS + */ +int fscrypt_is_aes_xts_cipher(const struct inode *inode) +{ + struct fscrypt_info *ci = inode->i_crypt_info; + + if (!ci) + return 0; + + return (fscrypt_policy_contents_mode(&(ci->ci_policy)) == FSCRYPT_MODE_PRIVATE); +} + +/* + * returns true if encryption info in both inodes is equal + */ +bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1, + const struct inode *inode2) +{ + char *key1 = NULL; + char *key2 = NULL; + char *salt1 = NULL; + char *salt2 = NULL; + + if (!inode1 || !inode2) + return false; + + if (inode1 == inode2) + return true; + + /* both do not belong to ice, so we don't care, they are equal + *for us + */ + if (!fscrypt_should_be_processed_by_ice(inode1) && + !fscrypt_should_be_processed_by_ice(inode2)) + return true; + + /* one belongs to ice, the other does not -> not equal */ + if (fscrypt_should_be_processed_by_ice(inode1) ^ + fscrypt_should_be_processed_by_ice(inode2)) + return false; + + key1 = fscrypt_get_ice_encryption_key(inode1); + key2 = fscrypt_get_ice_encryption_key(inode2); + salt1 = fscrypt_get_ice_encryption_salt(inode1); + salt2 = fscrypt_get_ice_encryption_salt(inode2); + + /* key and salt should not be null by this point */ + if (!key1 || !key2 || !salt1 || !salt2 || + (fscrypt_get_ice_encryption_key_size(inode1) != + fscrypt_get_ice_encryption_key_size(inode2)) || + (fscrypt_get_ice_encryption_salt_size(inode1) != + fscrypt_get_ice_encryption_salt_size(inode2))) + return false; + + if ((memcmp(key1, key2, + fscrypt_get_ice_encryption_key_size(inode1)) == 0) && + (memcmp(salt1, salt2, + fscrypt_get_ice_encryption_salt_size(inode1)) == 0)) + return true; + + return false; +} + +void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun) +{ + if (fscrypt_should_be_processed_by_ice(inode)) + bio->bi_iter.bi_dun = dun; +} +EXPORT_SYMBOL(fscrypt_set_ice_dun); + +void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip) +{ +#ifdef CONFIG_DM_DEFAULT_KEY + bio->bi_crypt_skip = bi_crypt_skip; +#endif +} +EXPORT_SYMBOL(fscrypt_set_ice_skip); + +/* + * This function will be used for filesystem when deciding to merge bios. + * Basic assumption is, if inline_encryption is set, single bio has to + * guarantee consecutive LBAs as well as ino|pg->index. + */ +bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted, + int bi_crypt_skip) +{ + if (!bio) + return true; + +#ifdef CONFIG_DM_DEFAULT_KEY + if (bi_crypt_skip != bio->bi_crypt_skip) + return false; +#endif + /* if both of them are not encrypted, no further check is needed */ + if (!bio_dun(bio) && !bio_encrypted) + return true; + + /* ICE allows only consecutive iv_key stream. */ + return bio_end_dun(bio) == dun; +} +EXPORT_SYMBOL(fscrypt_mergeable_bio); diff --git a/fs/crypto/fscrypt_ice.h b/fs/crypto/fscrypt_ice.h new file mode 100644 index 000000000000..d448b4289317 --- /dev/null +++ b/fs/crypto/fscrypt_ice.h @@ -0,0 +1,99 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _FSCRYPT_ICE_H +#define _FSCRYPT_ICE_H + +#include +#include "fscrypt_private.h" + +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) +static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode) +{ + if (!inode->i_sb->s_cop) + return 0; + if (!IS_ENCRYPTED((struct inode *)inode)) + return 0; + + return fscrypt_using_hardware_encryption(inode); +} + +static inline int fscrypt_is_ice_capable(const struct super_block *sb) +{ + return blk_queue_inlinecrypt(bdev_get_queue(sb->s_bdev)); +} + +int fscrypt_is_aes_xts_cipher(const struct inode *inode); + +char *fscrypt_get_ice_encryption_key(const struct inode *inode); +char *fscrypt_get_ice_encryption_salt(const struct inode *inode); + +bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1, + const struct inode *inode2); + +size_t fscrypt_get_ice_encryption_key_size(const struct inode *inode); + +size_t fscrypt_get_ice_encryption_salt_size(const struct inode *inode); + +#else +static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode) +{ + return 0; +} + +static inline int fscrypt_is_ice_capable(const struct super_block *sb) +{ + return 0; +} + +static inline char *fscrypt_get_ice_encryption_key(const struct inode *inode) +{ + return NULL; +} + +static inline char *fscrypt_get_ice_encryption_salt(const struct inode *inode) +{ + return NULL; +} + +static inline size_t fscrypt_get_ice_encryption_key_size( + const struct inode *inode) +{ + return 0; +} + +static inline size_t fscrypt_get_ice_encryption_salt_size( + const struct inode *inode) +{ + return 0; +} + +static inline int fscrypt_is_xts_cipher(const struct inode *inode) +{ + return 0; +} + +static inline bool fscrypt_is_ice_encryption_info_equal( + const struct inode *inode1, + const struct inode *inode2) +{ + return 0; +} + +static inline int fscrypt_is_aes_xts_cipher(const struct inode *inode) +{ + return 0; +} + +#endif + +#endif /* _FSCRYPT_ICE_H */ diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 94da6bad5f19..d6134d07ccdb 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -13,6 +13,7 @@ #include #include +#include #define CONST_STRLEN(str) (sizeof(str) - 1) @@ -159,10 +160,8 @@ struct fscrypt_symlink_data { * inode is evicted. */ struct fscrypt_info { + /* The actual crypto transform used for encryption and decryption */ - u8 ci_data_mode; - u8 ci_filename_mode; - u8 ci_flags; struct crypto_skcipher *ci_ctfm; /* True if the key should be freed when this fscrypt_info is freed */ @@ -220,6 +219,10 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode, filenames_mode == FSCRYPT_MODE_AES_256_CTS) return true; + if (contents_mode == FSCRYPT_MODE_PRIVATE && + filenames_mode == FSCRYPT_MODE_AES_256_CTS) + return true; + if (contents_mode == FSCRYPT_MODE_ADIANTUM && filenames_mode == FSCRYPT_MODE_ADIANTUM) return true; diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 0380ae882441..16413b728b2b 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -12,6 +12,7 @@ #include #include "fscrypt_private.h" +#include "fscrypt_ice.h" static struct fscrypt_mode available_modes[] = { [FSCRYPT_MODE_AES_256_XTS] = { @@ -51,6 +52,12 @@ static struct fscrypt_mode available_modes[] = { }, }; +static int fscrypt_data_encryption_mode(struct inode *inode) +{ + return fscrypt_should_be_processed_by_ice(inode) ? + FSCRYPT_MODE_PRIVATE : FSCRYPT_MODE_AES_256_XTS; +} + static struct fscrypt_mode * select_encryption_mode(const union fscrypt_policy *policy, const struct inode *inode) @@ -386,7 +393,7 @@ int fscrypt_get_encryption_info(struct inode *inode) /* Fake up a context for an unencrypted directory */ memset(&ctx, 0, sizeof(ctx)); ctx.version = FSCRYPT_CONTEXT_V1; - ctx.v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; + ctx.v1.contents_encryption_mode = fscrypt_data_encryption_mode(inode); ctx.v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; memset(ctx.v1.master_key_descriptor, 0x42, FSCRYPT_KEY_DESCRIPTOR_SIZE); @@ -480,6 +487,11 @@ void fscrypt_put_encryption_info(struct inode *inode) } EXPORT_SYMBOL(fscrypt_put_encryption_info); +int fscrypt_get_mode_key_size(int mode) +{ + return available_modes[mode].keysize; +} + /** * fscrypt_free_inode - free an inode's fscrypt data requiring RCU delay * diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 454fb03fc30e..b217970ef392 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -306,10 +306,25 @@ out: int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key) { - if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) + int err; + if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { return setup_v1_file_key_direct(ci, raw_master_key); - else + } else if(S_ISREG(ci->ci_inode->i_mode) && + (fscrypt_policy_contents_mode(&(ci->ci_policy)) == FSCRYPT_MODE_PRIVATE)) { + /* Inline encryption: no key derivation required because IVs are + * assigned based on iv_sector. + */ + if (ci->ci_mode->keysize != FSCRYPT_MAX_KEY_SIZE) { + err = -EINVAL; + } else { + memcpy(ci->ci_raw_key, raw_master_key, ci->ci_mode->keysize); + err = 0; + } + } + else { return setup_v1_file_key_derived(ci, raw_master_key); + } + return err; } int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci) diff --git a/fs/direct-io.c b/fs/direct-io.c index 30bf22c989de..b88a0a9a66dd 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -37,6 +37,8 @@ #include #include #include +#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION) +#include /* * How many user pages to map in one call to get_user_pages(). This determines @@ -452,6 +454,23 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; } +#ifdef CONFIG_PFK +static bool is_inode_filesystem_type(const struct inode *inode, + const char *fs_type) +{ + if (!inode || !fs_type) + return false; + + if (!inode->i_sb) + return false; + + if (!inode->i_sb->s_type) + return false; + + return (strcmp(inode->i_sb->s_type->name, fs_type) == 0); +} +#endif + /* * In the AIO read case we speculatively dirty the pages before starting IO. * During IO completion, any of these pages which happen to have been written @@ -474,7 +493,17 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) bio_set_pages_dirty(bio); dio->bio_disk = bio->bi_disk; +#ifdef CONFIG_PFK + bio->bi_dio_inode = dio->inode; +/* iv sector for security/pfe/pfk_fscrypt.c and f2fs in fs/f2fs/f2fs.h */ +#define PG_DUN_NEW(i,p) \ + (((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p) & 0xffffffff)) + + if (is_inode_filesystem_type(dio->inode, "f2fs")) + fscrypt_set_ice_dun(dio->inode, bio, PG_DUN_NEW(dio->inode, + (sdio->logical_offset_in_bio >> PAGE_SHIFT))); +#endif if (sdio->submit_io) { sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); dio->bio_cookie = BLK_QC_T_NONE; @@ -486,6 +515,18 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) sdio->logical_offset_in_bio = 0; } +struct inode *dio_bio_get_inode(struct bio *bio) +{ + struct inode *inode = NULL; + + if (bio == NULL) + return NULL; +#ifdef CONFIG_PFK + inode = bio->bi_dio_inode; +#endif + return inode; +} + /* * Release any resources in case of a failure */ diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index ac2a73c00bfa..6eea530054d2 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -108,10 +108,16 @@ config EXT4_ENCRYPTION files config EXT4_FS_ENCRYPTION - bool + bool "Ext4 FS Encryption" default n depends on EXT4_ENCRYPTION +config EXT4_FS_ICE_ENCRYPTION + bool "Ext4 Encryption with ICE support" + default n + depends on EXT4_FS_ENCRYPTION + depends on PFK + config EXT4_DEBUG bool "EXT4 debugging support" depends on EXT4_FS diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 6c129067c07e..1578a86784a6 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -205,7 +205,10 @@ typedef struct ext4_io_end { ssize_t size; /* size of the extent */ } ext4_io_end_t; +#define EXT4_IO_ENCRYPTED 1 + struct ext4_io_submit { + unsigned int io_flags; struct writeback_control *io_wbc; struct bio *io_bio; ext4_io_end_t *io_end; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c134c701a034..2dd1114d5f6c 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1234,10 +1234,12 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { - ll_rw_block(REQ_OP_READ, 0, 1, &bh); - *wait_bh++ = bh; decrypt = IS_ENCRYPTED(inode) && - S_ISREG(inode->i_mode); + S_ISREG(inode->i_mode) && + !fscrypt_using_hardware_encryption(inode); + ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), + 1, &bh); + *wait_bh++ = bh; } } /* @@ -3742,9 +3744,14 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) get_block_func = ext4_dio_get_block_unwritten_async; dio_flags = DIO_LOCKING; } - ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, - get_block_func, ext4_end_io_dio, NULL, - dio_flags); +#if defined(CONFIG_EXT4_FS_ENCRYPTION) + WARN_ON(IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) + && !fscrypt_using_hardware_encryption(inode)); +#endif + ret = __blockdev_direct_IO(iocb, inode, + inode->i_sb->s_bdev, iter, + get_block_func, + ext4_end_io_dio, NULL, dio_flags); if (ret > 0 && !overwrite && ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN)) { @@ -3856,8 +3863,9 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ssize_t ret; int rw = iov_iter_rw(iter); -#ifdef CONFIG_FS_ENCRYPTION - if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) +#if defined(CONFIG_FS_ENCRYPTION) + if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) + && !fscrypt_using_hardware_encryption(inode)) return 0; #endif if (fsverity_active(inode)) @@ -4020,6 +4028,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, struct inode *inode = mapping->host; struct buffer_head *bh; struct page *page; + bool decrypt; int err = 0; page = find_or_create_page(mapping, from >> PAGE_SHIFT, @@ -4062,13 +4071,15 @@ static int __ext4_block_zero_page_range(handle_t *handle, if (!buffer_uptodate(bh)) { err = -EIO; - ll_rw_block(REQ_OP_READ, 0, 1, &bh); + decrypt = S_ISREG(inode->i_mode) && + IS_ENCRYPTED(inode) && + !fscrypt_using_hardware_encryption(inode); + ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; - if (S_ISREG(inode->i_mode) && - IS_ENCRYPTED(inode)) { + if (decrypt) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); BUG_ON(blocksize != PAGE_SIZE); diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index ced39d449fd0..4935c146bbc7 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -603,10 +603,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, return -EOPNOTSUPP; } - if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) { - ext4_msg(orig_inode->i_sb, KERN_ERR, - "Online defrag not supported for encrypted files"); - return -EOPNOTSUPP; + if (!fscrypt_using_hardware_encryption(orig_inode) || + !fscrypt_using_hardware_encryption(donor_inode)) { + if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) { + ext4_msg(orig_inode->i_sb, KERN_ERR, + "Online defrag not supported for encrypted files"); + return -EOPNOTSUPP; + } } /* Protect orig and donor inodes against a truncate */ diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index a6ec98d494b8..5c32a6d30c60 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -344,6 +344,8 @@ void ext4_io_submit(struct ext4_io_submit *io) int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0; io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint; + if (io->io_flags & EXT4_IO_ENCRYPTED) + io_op_flags |= REQ_NOENCRYPT; bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); submit_bio(io->io_bio); } @@ -353,6 +355,7 @@ void ext4_io_submit(struct ext4_io_submit *io) void ext4_io_submit_init(struct ext4_io_submit *io, struct writeback_control *wbc) { + io->io_flags = 0; io->io_wbc = wbc; io->io_bio = NULL; io->io_end = NULL; @@ -480,22 +483,24 @@ int ext4_bio_write_page(struct ext4_io_submit *io, if (io->io_bio) gfp_flags = GFP_NOWAIT | __GFP_NOWARN; retry_encrypt: - bounce_page = fscrypt_encrypt_pagecache_blocks(page, - PAGE_SIZE, 0, gfp_flags); - if (IS_ERR(bounce_page)) { - ret = PTR_ERR(bounce_page); - if (ret == -ENOMEM && (io->io_bio || - wbc->sync_mode == WB_SYNC_ALL)) { - gfp_flags = GFP_NOFS; - if (io->io_bio) - ext4_io_submit(io); - else - gfp_flags |= __GFP_NOFAIL; - congestion_wait(BLK_RW_ASYNC, HZ/50); - goto retry_encrypt; + if (!fscrypt_using_hardware_encryption(inode)) { + bounce_page = fscrypt_encrypt_pagecache_blocks(page, + PAGE_SIZE, 0, gfp_flags); + if (IS_ERR(bounce_page)) { + ret = PTR_ERR(bounce_page); + if (ret == -ENOMEM && (io->io_bio || + wbc->sync_mode == WB_SYNC_ALL)) { + gfp_flags = GFP_NOFS; + if (io->io_bio) + ext4_io_submit(io); + else + gfp_flags |= __GFP_NOFAIL; + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto retry_encrypt; + } + bounce_page = NULL; + goto out; } - bounce_page = NULL; - goto out; } } @@ -503,6 +508,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io, do { if (!buffer_async_write(bh)) continue; + if (bounce_page) + io->io_flags |= EXT4_IO_ENCRYPTED; ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh); if (ret) { /* diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index aefcd712df85..49806a7bcecf 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -412,7 +412,8 @@ int ext4_mpage_readpages(struct address_space *mapping, bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_end_io = mpage_end_io; bio->bi_private = ctx; - bio_set_op_attrs(bio, REQ_OP_READ, 0); + bio_set_op_attrs(bio, REQ_OP_READ, + ctx ? REQ_NOENCRYPT : 0); } length = first_hole << blkbits; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 2d4c259624b3..ca538c1f5a26 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -700,6 +700,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) struct bio *bio; struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; + struct inode *inode = fio->page->mapping->host; if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, fio->is_por ? META_POR : (__is_meta_io(fio) ? @@ -712,10 +713,15 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) /* Allocate a new bio */ bio = __bio_alloc(fio, 1); + if (f2fs_may_encrypt_bio(inode, fio)) + fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page)); + fscrypt_set_ice_skip(bio, fio->encrypted_page ? 1 : 0); + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); return -EFAULT; } + fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0; if (fio->io_wbc && !is_read_io(fio->op)) wbc_account_io(fio->io_wbc, page, PAGE_SIZE); @@ -896,6 +902,9 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; struct inode *inode; + bool bio_encrypted; + int bi_crypt_skip; + u64 dun; if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) @@ -905,14 +914,26 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) f2fs_trace_ios(fio, 0); inode = fio->page->mapping->host; + dun = PG_DUN(inode, fio->page); + bi_crypt_skip = fio->encrypted_page ? 1 : 0; + bio_encrypted = f2fs_may_encrypt_bio(inode, fio); + fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0; if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, fio->new_blkaddr)) f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); + /* ICE support */ + if (bio && !fscrypt_mergeable_bio(bio, dun, + bio_encrypted, bi_crypt_skip)) { + f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); + } alloc_new: if (!bio) { bio = __bio_alloc(fio, BIO_MAX_PAGES); bio_set_op_attrs(bio, fio->op, fio->op_flags); + if (bio_encrypted) + fscrypt_set_ice_dun(inode, bio, dun); + fscrypt_set_ice_skip(bio, bi_crypt_skip); add_bio_entry(fio->sbi, bio, page, fio->temp); } else { if (add_ipu_page(fio->sbi, &bio, page)) @@ -936,6 +957,10 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio) enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp; struct page *bio_page; + struct inode *inode; + bool bio_encrypted; + int bi_crypt_skip; + u64 dun; f2fs_bug_on(sbi, is_read_io(fio->op)); @@ -962,6 +987,12 @@ next: else bio_page = fio->page; + inode = fio->page->mapping->host; + dun = PG_DUN(inode, fio->page); + bi_crypt_skip = fio->encrypted_page ? 1 : 0; + bio_encrypted = f2fs_may_encrypt_bio(inode, fio); + fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0; + /* set submitted = true as a return value */ fio->submitted = true; @@ -970,6 +1001,11 @@ next: if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, fio->new_blkaddr)) __submit_merged_bio(io); + + /* ICE support */ + if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted, bi_crypt_skip)) + __submit_merged_bio(io); + alloc_new: if (io->bio == NULL) { if (F2FS_IO_ALIGNED(sbi) && @@ -980,6 +1016,9 @@ alloc_new: goto skip; } io->bio = __bio_alloc(fio, BIO_MAX_PAGES); + if (bio_encrypted) + fscrypt_set_ice_dun(inode, io->bio, dun); + fscrypt_set_ice_skip(io->bio, bi_crypt_skip); io->fio = *fio; } @@ -1026,9 +1065,13 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, return ERR_PTR(-ENOMEM); f2fs_target_device(sbi, blkaddr, bio); bio->bi_end_io = f2fs_read_end_io; - bio_set_op_attrs(bio, REQ_OP_READ, op_flag); + bio_set_op_attrs(bio, REQ_OP_READ, + (IS_ENCRYPTED(inode) ? + REQ_NOENCRYPT : + op_flag)); - if (f2fs_encrypted_file(inode)) + if (f2fs_encrypted_file(inode) && + !fscrypt_using_hardware_encryption(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (f2fs_compressed_file(inode)) post_read_steps |= 1 << STEP_DECOMPRESS; @@ -1065,6 +1108,9 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page, if (IS_ERR(bio)) return PTR_ERR(bio); + if (f2fs_may_encrypt_bio(inode, NULL)) + fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, page)); + /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, blkaddr); @@ -1991,6 +2037,8 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, sector_t last_block_in_file; sector_t block_nr; int ret = 0; + bool bio_encrypted; + u64 dun; block_in_file = (sector_t)page_index(page); last_block = block_in_file + nr_pages; @@ -2061,6 +2109,13 @@ submit_and_realloc: bio = NULL; } + dun = PG_DUN(inode, page); + bio_encrypted = f2fs_may_encrypt_bio(inode, NULL); + if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted, 0)) { + __submit_bio(F2FS_I_SB(inode), bio, DATA); + bio = NULL; + } + if (bio == NULL) { bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, is_readahead ? REQ_RAHEAD : 0, page->index, @@ -2070,7 +2125,10 @@ submit_and_realloc: bio = NULL; goto out; } + if (bio_encrypted) + fscrypt_set_ice_dun(inode, bio, dun); } + /* * If the page is under writeback, we need to wait for * its completion to see the correct decrypted data. @@ -2407,6 +2465,9 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio) f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); retry_encrypt: + if (fscrypt_using_hardware_encryption(inode)) + return 0; + fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page, PAGE_SIZE, 0, gfp_flags); if (IS_ERR(fio->encrypted_page)) { diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 1e2c9a59393a..a12a09565dc6 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -4035,7 +4035,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int rw = iov_iter_rw(iter); - if (f2fs_encrypted_file(inode)) + if (f2fs_encrypted_file(inode) && + !fscrypt_using_hardware_encryption(inode)) return true; if (f2fs_is_multi_device(sbi)) return true; @@ -4060,6 +4061,16 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, return false; } +static inline bool f2fs_may_encrypt_bio(struct inode *inode, + struct f2fs_io_info *fio) +{ + if (fio && (fio->type != DATA || fio->encrypted_page)) + return false; + + return (f2fs_encrypted_file(inode) && + fscrypt_using_hardware_encryption(inode)); +} + #ifdef CONFIG_F2FS_FAULT_INJECTION extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, unsigned int type); diff --git a/fs/namei.c b/fs/namei.c index 1c626f56d21d..6c933d1cc941 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3039,6 +3039,11 @@ int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, if (error) return error; error = dir->i_op->create(dir, dentry, mode, want_excl); + if (error) + return error; + error = security_inode_post_create(dir, dentry, mode); + if (error) + return error; if (!error) fsnotify_create(dir, dentry); return error; @@ -3871,6 +3876,11 @@ int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, u return error; error = dir->i_op->mknod(dir, dentry, mode, dev); + if (error) + return error; + error = security_inode_post_create(dir, dentry, mode); + if (error) + return error; if (!error) fsnotify_create(dir, dentry); return error; diff --git a/include/linux/bio.h b/include/linux/bio.h index e260f000b9ac..bcdbd29052e0 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -69,6 +69,9 @@ ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) +#define bio_dun(bio) ((bio)->bi_iter.bi_dun) +#define bio_duns(bio) (bio_sectors(bio) >> 3) /* 4KB unit */ +#define bio_end_dun(bio) (bio_dun(bio) + bio_duns(bio)) /* * Return the data direction, READ or WRITE. @@ -178,6 +181,11 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, { iter->bi_sector += bytes >> 9; +#ifdef CONFIG_PFK + if (iter->bi_dun) + iter->bi_dun += bytes >> 12; +#endif + if (bio_no_advance_iter(bio)) { iter->bi_size -= bytes; iter->bi_done += bytes; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 415811f0b24a..d24227285a44 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -100,6 +100,13 @@ struct bio { struct bio_integrity_payload *bi_integrity; /* data integrity */ #endif }; +#ifdef CONFIG_PFK + /* Encryption key to use (NULL if none) */ + const struct blk_encryption_key *bi_crypt_key; +#endif +#ifdef CONFIG_DM_DEFAULT_KEY + int bi_crypt_skip; +#endif unsigned short bi_vcnt; /* how many bio_vec's */ @@ -114,7 +121,9 @@ struct bio { struct bio_vec *bi_io_vec; /* the actual vec list */ struct bio_set *bi_pool; - +#ifdef CONFIG_PFK + struct inode *bi_dio_inode; +#endif /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member @@ -239,6 +248,13 @@ enum req_flag_bits { __REQ_URGENT, /* urgent request */ __REQ_NOWAIT, /* Don't wait if request will block */ + + /* Android specific flags */ + __REQ_NOENCRYPT, /* + * ok to not encrypt (already encrypted at fs + * level) + */ + __REQ_NR_BITS, /* stops here */ }; @@ -256,6 +272,7 @@ enum req_flag_bits { #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) +#define REQ_NOENCRYPT (1ULL << __REQ_NOENCRYPT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 835a3cf3b47b..1673d238b60f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -154,6 +154,7 @@ struct request { unsigned int __data_len; /* total data len */ int tag; sector_t __sector; /* sector cursor */ + u64 __dun; /* dun for UFS */ struct bio *bio; struct bio *biotail; @@ -652,6 +653,7 @@ struct request_queue { #define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ #define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ +#define QUEUE_FLAG_INLINECRYPT 29 /* inline encryption support */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -751,6 +753,8 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) #define blk_queue_scsi_passthrough(q) \ test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) +#define blk_queue_inlinecrypt(q) \ + test_bit(QUEUE_FLAG_INLINECRYPT, &(q)->queue_flags) #define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ @@ -1031,6 +1035,11 @@ static inline sector_t blk_rq_pos(const struct request *rq) return rq->__sector; } +static inline sector_t blk_rq_dun(const struct request *rq) +{ + return rq->__dun; +} + static inline unsigned int blk_rq_bytes(const struct request *rq) { return rq->__data_len; diff --git a/include/linux/bvec.h b/include/linux/bvec.h index ec8a4d7af6bd..711236dba71d 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -44,6 +44,9 @@ struct bvec_iter { unsigned int bi_bvec_done; /* number of bytes completed in current bvec */ +#ifdef CONFIG_PFK + u64 bi_dun; /* DUN setting for bio */ +#endif }; /* diff --git a/include/linux/fs.h b/include/linux/fs.h index 1d8a53a6211a..236c4d59b9ae 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3071,6 +3071,8 @@ static inline void inode_dio_end(struct inode *inode) wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); } +struct inode *dio_bio_get_inode(struct bio *bio); + extern void inode_set_flags(struct inode *inode, unsigned int flags, unsigned int mask); diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 5977a6ced502..e65ce4237f52 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -20,6 +20,10 @@ #define FS_CRYPTO_BLOCK_SIZE 16 +/* iv sector for security/pfe/pfk_fscrypt.c and f2fs */ +#define PG_DUN(i, p) \ + (((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p)->index & 0xffffffff)) + struct fscrypt_info; struct fscrypt_str { @@ -741,6 +745,33 @@ static inline int fscrypt_encrypt_symlink(struct inode *inode, return 0; } +/* fscrypt_ice.c */ +#ifdef CONFIG_PFK +extern int fscrypt_using_hardware_encryption(const struct inode *inode); +extern void fscrypt_set_ice_dun(const struct inode *inode, + struct bio *bio, u64 dun); +extern void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip); +extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted, + int bi_crypt_skip); +#else +static inline int fscrypt_using_hardware_encryption(const struct inode *inode) +{ + return 0; +} + +static inline void fscrypt_set_ice_dun(const struct inode *inode, + struct bio *bio, u64 dun){} + +static inline void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip) +{} + +static inline bool fscrypt_mergeable_bio(struct bio *bio, + u64 dun, bool bio_encrypted, int bi_crypt_skip) +{ + return true; +} +#endif + /* If *pagep is a bounce page, free it and set *pagep to the pagecache page */ static inline void fscrypt_finalize_bounce_page(struct page **pagep) { diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 7e9f59aeadb6..f40789bd5c15 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1475,6 +1475,8 @@ union security_list_options { size_t *len); int (*inode_create)(struct inode *dir, struct dentry *dentry, umode_t mode); + int (*inode_post_create)(struct inode *dir, struct dentry *dentry, + umode_t mode); int (*inode_link)(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int (*inode_unlink)(struct inode *dir, struct dentry *dentry); @@ -1788,6 +1790,7 @@ struct security_hook_heads { struct list_head inode_free_security; struct list_head inode_init_security; struct list_head inode_create; + struct list_head inode_post_create; struct list_head inode_link; struct list_head inode_unlink; struct list_head inode_symlink; diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index aded5a8a733d..ee92081416f3 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -12,7 +12,6 @@ #include #include #include -#include struct mmc_data; struct mmc_request; @@ -170,8 +169,8 @@ struct mmc_request { void (*recovery_notifier)(struct mmc_request *); struct mmc_host *host; struct mmc_cmdq_req *cmdq_req; + struct request *req; - struct request *req; /* Allow other commands during this ongoing data transfer or busy wait */ bool cap_cmd_during_tfr; ktime_t io_start; diff --git a/include/linux/pfk.h b/include/linux/pfk.h new file mode 100644 index 000000000000..bba8fc2681b8 --- /dev/null +++ b/include/linux/pfk.h @@ -0,0 +1,79 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef PFK_H_ +#define PFK_H_ + +#include + +struct ice_crypto_setting; + +#ifdef CONFIG_PFK + +/* + * Default key for inline encryption. + * + * For now only AES-256-XTS is supported, so this is a fixed length. But if + * ever needed, this should be made variable-length with a 'mode' and 'size'. + * (Remember to update pfk_allow_merge_bio() when doing so!) + */ +#define BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS 64 + +struct blk_encryption_key { + u8 raw[BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS]; +}; + +int pfk_load_key_start(const struct bio *bio, + struct ice_crypto_setting *ice_setting, + bool *is_pfe, bool async); +int pfk_load_key_end(const struct bio *bio, bool *is_pfe); +int pfk_remove_key(const unsigned char *key, size_t key_size); +int pfk_fbe_clear_key(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size); +bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2); +void pfk_clear_on_reset(void); + +#else +static inline int pfk_load_key_start(const struct bio *bio, + struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async) +{ + return -ENODEV; +} + +static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe) +{ + return -ENODEV; +} + +static inline int pfk_remove_key(const unsigned char *key, size_t key_size) +{ + return -ENODEV; +} + +static inline bool pfk_allow_merge_bio(const struct bio *bio1, + const struct bio *bio2) +{ + return true; +} + +static inline int pfk_fbe_clear_key(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size) +{ + return -ENODEV; +} + +static inline void pfk_clear_on_reset(void) +{} + +#endif /* CONFIG_PFK */ + +#endif /* PFK_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 666c75c2269c..ecd3b0dd9c12 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -31,6 +31,7 @@ #include #include #include +#include struct linux_binprm; struct cred; @@ -270,6 +271,8 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const char **name, void **value, size_t *len); int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode); +int security_inode_post_create(struct inode *dir, struct dentry *dentry, + umode_t mode); int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int security_inode_unlink(struct inode *dir, struct dentry *dentry); @@ -664,6 +667,13 @@ static inline int security_inode_create(struct inode *dir, return 0; } +static inline int security_inode_post_create(struct inode *dir, + struct dentry *dentry, + umode_t mode) +{ + return 0; +} + static inline int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 203fec0bd88c..0472647a9cf7 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -666,6 +666,9 @@ struct Scsi_Host { /* The controller does not support WRITE SAME */ unsigned no_write_same:1; + /* Inline encryption support? */ + unsigned inlinecrypt_support:1; + unsigned use_blk_mq:1; unsigned use_cmd_list:1; diff --git a/security/Kconfig b/security/Kconfig index 8b6c5e9528e0..daaf13e06d83 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -6,6 +6,10 @@ menu "Security options" source security/keys/Kconfig +if ARCH_QCOM +source security/pfe/Kconfig +endif + config SECURITY_DMESG_RESTRICT bool "Restrict unprivileged access to the kernel syslog" default n diff --git a/security/Makefile b/security/Makefile index 4d2d3782ddef..47bffaa3f5f8 100644 --- a/security/Makefile +++ b/security/Makefile @@ -10,6 +10,7 @@ subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor subdir-$(CONFIG_SECURITY_YAMA) += yama subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin +subdir-$(CONFIG_ARCH_QCOM) += pfe # always enable default capabilities obj-y += commoncap.o @@ -26,6 +27,7 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/ obj-$(CONFIG_SECURITY_YAMA) += yama/ obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/ obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o +obj-$(CONFIG_ARCH_QCOM) += pfe/ # Object integrity file lists subdir-$(CONFIG_INTEGRITY) += integrity diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig new file mode 100644 index 000000000000..923fe1cd9987 --- /dev/null +++ b/security/pfe/Kconfig @@ -0,0 +1,50 @@ +menu "Qualcomm Technologies, Inc Per File Encryption security device drivers" + depends on ARCH_QCOM + +config PFT + bool "Per-File-Tagger driver" + depends on SECURITY + default n + help + This driver is used for tagging enterprise files. + It is part of the Per-File-Encryption (PFE) feature. + The driver is tagging files when created by + registered application. + Tagged files are encrypted using the dm-req-crypt driver. + +config PFK + bool "Per-File-Key driver" + depends on SECURITY + depends on SECURITY_SELINUX + default n + help + This driver is used for storing eCryptfs information + in file node. + This is part of eCryptfs hardware enhanced solution + provided by Qualcomm Technologies, Inc. + Information is used when file is encrypted later using + ICE or dm crypto engine + +config PFK_WRAPPED_KEY_SUPPORTED + bool "Per-File-Key driver with wrapped key support" + depends on SECURITY + depends on SECURITY_SELINUX + depends on QSEECOM + depends on PFK + default n + help + Adds wrapped key support in PFK driver. Instead of setting + the key directly in ICE, it unwraps the key and sets the key + in ICE. + +config PFK_VIRTUALIZED + bool "Per-File-Key driver virtualized version" + depends on SECURITY + depends on SECURITY_SELINUX + depends on QSEECOM + depends on PFK + depends on MSM_HAB + help + Makes the driver to use the hypervisor back end for ICE HW + operation virtualization instead of calling directly to TZ. +endmenu diff --git a/security/pfe/Makefile b/security/pfe/Makefile new file mode 100644 index 000000000000..c95f02a46bba --- /dev/null +++ b/security/pfe/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for the MSM specific security device drivers. +# + +ccflags-y += -Isecurity/selinux -Isecurity/selinux/include +ccflags-y += -Ifs/crypto +ccflags-y += -Idrivers/misc + +obj-$(CONFIG_PFT) += pft.o +obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ext4.o pfk_f2fs.o +ifdef CONFIG_PFK_VIRTUALIZED +obj-$(CONFIG_PFK_VIRTUALIZED) += pfk_ice_virt.o +else +obj-$(CONFIG_PFK) += pfk_ice.o +endif diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c new file mode 100644 index 000000000000..ae681487248c --- /dev/null +++ b/security/pfe/pfk.c @@ -0,0 +1,570 @@ +/* + * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Per-File-Key (PFK). + * + * This driver is responsible for overall management of various + * Per File Encryption variants that work on top of or as part of different + * file systems. + * + * The driver has the following purpose : + * 1) Define priorities between PFE's if more than one is enabled + * 2) Extract key information from inode + * 3) Load and manage various keys in ICE HW engine + * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER + * that need to take decision on HW encryption management of the data + * Some examples: + * BLOCK LAYER: when it takes decision on whether 2 chunks can be united + * to one encryption / decryption request sent to the HW + * + * UFS DRIVER: when it need to configure ICE HW with a particular key slot + * to be used for encryption / decryption + * + * PFE variants can differ on particular way of storing the cryptographic info + * inside inode, actions to be taken upon file operations, etc., but the common + * properties are described above + * + */ + + +/* Uncomment the line below to enable debug messages */ +/* #define DEBUG 1 */ +#define pr_fmt(fmt) "pfk [%s]: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "pfk_kc.h" +#include "objsec.h" +#include "pfk_ice.h" +#include "pfk_ext4.h" +#include "pfk_f2fs.h" +#include "pfk_internal.h" + +static bool pfk_ready; + + +/* might be replaced by a table when more than one cipher is supported */ +#define PFK_SUPPORTED_KEY_SIZE 32 +#define PFK_SUPPORTED_SALT_SIZE 32 + +/* Various PFE types and function tables to support each one of them */ +enum pfe_type {EXT4_CRYPT_PFE, F2FS_CRYPT_PFE, INVALID_PFE}; + +typedef int (*pfk_parse_inode_type)(const struct bio *bio, + const struct inode *inode, + struct pfk_key_info *key_info, + enum ice_cryto_algo_mode *algo, + bool *is_pfe); + +typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1, + const struct bio *bio2, const struct inode *inode1, + const struct inode *inode2); + +static const pfk_parse_inode_type pfk_parse_inode_ftable[] = { + /* EXT4_CRYPT_PFE */ &pfk_ext4_parse_inode, + /* F2FS_CRYPT_PFE */ &pfk_f2fs_parse_inode, +}; + +static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = { + /* EXT4_CRYPT_PFE */ &pfk_ext4_allow_merge_bio, + /* F2FS_CRYPT_PFE */ &pfk_f2fs_allow_merge_bio, +}; + +static void __exit pfk_exit(void) +{ + pfk_ready = false; + pfk_ext4_deinit(); + pfk_f2fs_deinit(); + pfk_kc_deinit(); +} + +static int __init pfk_init(void) +{ + + int ret = 0; + + ret = pfk_ext4_init(); + if (ret != 0) + goto fail; + + ret = pfk_f2fs_init(); + if (ret != 0) + goto fail; + + ret = pfk_kc_init(true); + if (ret != 0 && ret != -EAGAIN) { + pr_err("could init pfk key cache, error %d\n", ret); + pfk_ext4_deinit(); + pfk_f2fs_deinit(); + goto fail; + } + + pfk_ready = true; + pr_info("Driver initialized successfully\n"); + + return 0; + +fail: + pr_err("Failed to init driver\n"); + return -ENODEV; +} + +/* + * If more than one type is supported simultaneously, this function will also + * set the priority between them + */ +static enum pfe_type pfk_get_pfe_type(const struct inode *inode) +{ + if (!inode) + return INVALID_PFE; + + if (pfk_is_ext4_type(inode)) + return EXT4_CRYPT_PFE; + + if (pfk_is_f2fs_type(inode)) + return F2FS_CRYPT_PFE; + + return INVALID_PFE; +} + +/** + * inode_to_filename() - get the filename from inode pointer. + * @inode: inode pointer + * + * it is used for debug prints. + * + * Return: filename string or "unknown". + */ +char *inode_to_filename(const struct inode *inode) +{ + struct dentry *dentry = NULL; + char *filename = NULL; + + if (!inode) + return "NULL"; + + if (hlist_empty(&inode->i_dentry)) + return "unknown"; + + dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); + filename = dentry->d_iname; + + return filename; +} + +/** + * pfk_is_ready() - driver is initialized and ready. + * + * Return: true if the driver is ready. + */ +static inline bool pfk_is_ready(void) +{ + return pfk_ready; +} + +/** + * pfk_bio_get_inode() - get the inode from a bio. + * @bio: Pointer to BIO structure. + * + * Walk the bio struct links to get the inode. + * Please note, that in general bio may consist of several pages from + * several files, but in our case we always assume that all pages come + * from the same file, since our logic ensures it. That is why we only + * walk through the first page to look for inode. + * + * Return: pointer to the inode struct if successful, or NULL otherwise. + * + */ +static struct inode *pfk_bio_get_inode(const struct bio *bio) +{ + struct address_space *mapping = NULL; + + if (!bio) + return NULL; + if (!bio_has_data((struct bio *)bio)) + return NULL; + if (!bio->bi_io_vec) + return NULL; + if (!bio->bi_io_vec->bv_page) + return NULL; + + if (PageAnon(bio->bi_io_vec->bv_page)) { + struct inode *inode; + + /* Using direct-io (O_DIRECT) without page cache */ + inode = dio_bio_get_inode((struct bio *)bio); + pr_debug("inode on direct-io, inode = 0x%pK.\n", inode); + + return inode; + } + + mapping = page_mapping(bio->bi_io_vec->bv_page); + if (!mapping) + return NULL; + + return mapping->host; +} + +/** + * pfk_key_size_to_key_type() - translate key size to key size enum + * @key_size: key size in bytes + * @key_size_type: pointer to store the output enum (can be null) + * + * return 0 in case of success, error otherwise (i.e not supported key size) + */ +int pfk_key_size_to_key_type(size_t key_size, + enum ice_crpto_key_size *key_size_type) +{ + /* + * currently only 32 bit key size is supported + * in the future, table with supported key sizes might + * be introduced + */ + + if (key_size != PFK_SUPPORTED_KEY_SIZE) { + pr_err("not supported key size %zu\n", key_size); + return -EINVAL; + } + + if (key_size_type) + *key_size_type = ICE_CRYPTO_KEY_SIZE_256; + + return 0; +} + +/* + * Retrieves filesystem type from inode's superblock + */ +bool pfe_is_inode_filesystem_type(const struct inode *inode, + const char *fs_type) +{ + if (!inode || !fs_type) + return false; + + if (!inode->i_sb) + return false; + + if (!inode->i_sb->s_type) + return false; + + return (strcmp(inode->i_sb->s_type->name, fs_type) == 0); +} + +/** + * pfk_get_key_for_bio() - get the encryption key to be used for a bio + * + * @bio: pointer to the BIO + * @key_info: pointer to the key information which will be filled in + * @algo_mode: optional pointer to the algorithm identifier which will be set + * @is_pfe: will be set to false if the BIO should be left unencrypted + * + * Return: 0 if a key is being used, otherwise a -errno value + */ +static int pfk_get_key_for_bio(const struct bio *bio, + struct pfk_key_info *key_info, + enum ice_cryto_algo_mode *algo_mode, + bool *is_pfe, unsigned int *data_unit) +{ + const struct inode *inode; + enum pfe_type which_pfe; + const struct blk_encryption_key *key = NULL; + char *s_type = NULL; + + inode = pfk_bio_get_inode(bio); + which_pfe = pfk_get_pfe_type(inode); + s_type = (char *)pfk_kc_get_storage_type(); + + /* + * Update dun based on storage type. + * 512 byte dun - For ext4 emmc + * 4K dun - For ext4 ufs, f2fs ufs and f2fs emmc + */ + + if (data_unit && bio) { + if (!bio_dun(bio) && !memcmp(s_type, "sdcc", strlen("sdcc"))) + *data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B; + else + *data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB; + } + + if (which_pfe != INVALID_PFE) { + /* Encrypted file; override ->bi_crypt_key */ + pr_debug("parsing inode %lu with PFE type %d\n", + inode->i_ino, which_pfe); + return (*(pfk_parse_inode_ftable[which_pfe])) + (bio, inode, key_info, algo_mode, is_pfe); + } + + /* + * bio is not for an encrypted file. Use ->bi_crypt_key if it was set. + * Otherwise, don't encrypt/decrypt the bio. + */ +#ifdef CONFIG_DM_DEFAULT_KEY + key = bio->bi_crypt_key; +#endif + if (!key) { + *is_pfe = false; + return -EINVAL; + } + + /* Note: the "salt" is really just the second half of the XTS key. */ + BUILD_BUG_ON(sizeof(key->raw) != + PFK_SUPPORTED_KEY_SIZE + PFK_SUPPORTED_SALT_SIZE); + key_info->key = &key->raw[0]; + key_info->key_size = PFK_SUPPORTED_KEY_SIZE; + key_info->salt = &key->raw[PFK_SUPPORTED_KEY_SIZE]; + key_info->salt_size = PFK_SUPPORTED_SALT_SIZE; + if (algo_mode) + *algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS; + return 0; +} + + +/** + * pfk_load_key_start() - loads PFE encryption key to the ICE + * Can also be invoked from non + * PFE context, in this case it + * is not relevant and is_pfe + * flag is set to false + * + * @bio: Pointer to the BIO structure + * @ice_setting: Pointer to ice setting structure that will be filled with + * ice configuration values, including the index to which the key was loaded + * @is_pfe: will be false if inode is not relevant to PFE, in such a case + * it should be treated as non PFE by the block layer + * + * Returns the index where the key is stored in encryption hw and additional + * information that will be used later for configuration of the encryption hw. + * + * Must be followed by pfk_load_key_end when key is no longer used by ice + * + */ +int pfk_load_key_start(const struct bio *bio, + struct ice_crypto_setting *ice_setting, bool *is_pfe, + bool async) +{ + int ret = 0; + struct pfk_key_info key_info = {NULL, NULL, 0, 0}; + enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS; + enum ice_crpto_key_size key_size_type = 0; + unsigned int data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B; + u32 key_index = 0; + + if (!is_pfe) { + pr_err("is_pfe is NULL\n"); + return -EINVAL; + } + + /* + * only a few errors below can indicate that + * this function was not invoked within PFE context, + * otherwise we will consider it PFE + */ + *is_pfe = true; + + if (!pfk_is_ready()) + return -ENODEV; + + if (!ice_setting) { + pr_err("ice setting is NULL\n"); + return -EINVAL; + } + + ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe, + &data_unit); + + if (ret != 0) + return ret; + + ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type); + if (ret != 0) + return ret; + + ret = pfk_kc_load_key_start(key_info.key, key_info.key_size, + key_info.salt, key_info.salt_size, &key_index, async, + data_unit); + if (ret) { + if (ret != -EBUSY && ret != -EAGAIN) + pr_err("start: could not load key into pfk key cache, error %d\n", + ret); + + return ret; + } + + ice_setting->key_size = key_size_type; + ice_setting->algo_mode = algo_mode; + /* hardcoded for now */ + ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY; + ice_setting->key_index = key_index; + + pr_debug("loaded key for file %s key_index %d\n", + inode_to_filename(pfk_bio_get_inode(bio)), key_index); + + return 0; +} + +/** + * pfk_load_key_end() - marks the PFE key as no longer used by ICE + * Can also be invoked from non + * PFE context, in this case it is not + * relevant and is_pfe flag is + * set to false + * + * @bio: Pointer to the BIO structure + * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked + * from PFE context + */ +int pfk_load_key_end(const struct bio *bio, bool *is_pfe) +{ + int ret = 0; + struct pfk_key_info key_info = {NULL, NULL, 0, 0}; + + if (!is_pfe) { + pr_err("is_pfe is NULL\n"); + return -EINVAL; + } + + /* only a few errors below can indicate that + * this function was not invoked within PFE context, + * otherwise we will consider it PFE + */ + *is_pfe = true; + + if (!pfk_is_ready()) + return -ENODEV; + + ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe, NULL); + if (ret != 0) + return ret; + + pfk_kc_load_key_end(key_info.key, key_info.key_size, + key_info.salt, key_info.salt_size); + + pr_debug("finished using key for file %s\n", + inode_to_filename(pfk_bio_get_inode(bio))); + + return 0; +} + +/** + * pfk_allow_merge_bio() - Check if 2 BIOs can be merged. + * @bio1: Pointer to first BIO structure. + * @bio2: Pointer to second BIO structure. + * + * Prevent merging of BIOs from encrypted and non-encrypted + * files, or files encrypted with different key. + * Also prevent non encrypted and encrypted data from the same file + * to be merged (ecryptfs header if stored inside file should be non + * encrypted) + * This API is called by the file system block layer. + * + * Return: true if the BIOs allowed to be merged, false + * otherwise. + */ +bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2) +{ + const struct blk_encryption_key *key1 = NULL; + const struct blk_encryption_key *key2 = NULL; + const struct inode *inode1; + const struct inode *inode2; + enum pfe_type which_pfe1; + enum pfe_type which_pfe2; + +#ifdef CONFIG_DM_DEFAULT_KEY + key1 = bio1->bi_crypt_key; + key2 = bio2->bi_crypt_key; +#endif + + if (!pfk_is_ready()) + return false; + + if (!bio1 || !bio2) + return false; + + if (bio1 == bio2) + return true; + + key1 = bio1->bi_crypt_key; + key2 = bio2->bi_crypt_key; + + inode1 = pfk_bio_get_inode(bio1); + inode2 = pfk_bio_get_inode(bio2); + + which_pfe1 = pfk_get_pfe_type(inode1); + which_pfe2 = pfk_get_pfe_type(inode2); + + /* + * If one bio is for an encrypted file and the other is for a different + * type of encrypted file or for blocks that are not part of an + * encrypted file, do not merge. + */ + if (which_pfe1 != which_pfe2) + return false; + + if (which_pfe1 != INVALID_PFE) { + /* Both bios are for the same type of encrypted file. */ + return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2, + inode1, inode2); + } + + /* + * Neither bio is for an encrypted file. Merge only if the default keys + * are the same (or both are NULL). + */ + return key1 == key2 || + (key1 && key2 && + !crypto_memneq(key1->raw, key2->raw, sizeof(key1->raw))); +} + +int pfk_fbe_clear_key(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size) +{ + int ret = -EINVAL; + + if (!key || !salt) + return ret; + + ret = pfk_kc_remove_key_with_salt(key, key_size, salt, salt_size); + if (ret) + pr_err("Clear key error: ret value %d\n", ret); + return ret; +} + +/** + * Flush key table on storage core reset. During core reset key configuration + * is lost in ICE. We need to flash the cache, so that the keys will be + * reconfigured again for every subsequent transaction + */ +void pfk_clear_on_reset(void) +{ + if (!pfk_is_ready()) + return; + + pfk_kc_clear_on_reset(); +} + +module_init(pfk_init); +module_exit(pfk_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Per-File-Key driver"); diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c new file mode 100644 index 000000000000..0eb122565ecc --- /dev/null +++ b/security/pfe/pfk_ext4.c @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Per-File-Key (PFK) - EXT4 + * + * This driver is used for working with EXT4 crypt extension + * + * The key information is stored in node by EXT4 when file is first opened + * and will be later accessed by Block Device Driver to actually load the key + * to encryption hw. + * + * PFK exposes API's for loading and removing keys from encryption hw + * and also API to determine whether 2 adjacent blocks can be agregated by + * Block Layer in one request to encryption hw. + * + */ + + +/* Uncomment the line below to enable debug messages */ +/* #define DEBUG 1 */ +#define pr_fmt(fmt) "pfk_ext4 [%s]: " fmt, __func__ + +#include +#include +#include +#include + +#include "fscrypt_ice.h" +#include "pfk_ext4.h" +//#include "ext4_ice.h" + +static bool pfk_ext4_ready; + +/* + * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer + */ +void pfk_ext4_deinit(void) +{ + pfk_ext4_ready = false; +} + +/* + * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer + */ +int __init pfk_ext4_init(void) +{ + pfk_ext4_ready = true; + pr_info("PFK EXT4 inited successfully\n"); + + return 0; +} + +/** + * pfk_ecryptfs_is_ready() - driver is initialized and ready. + * + * Return: true if the driver is ready. + */ +static inline bool pfk_ext4_is_ready(void) +{ + return pfk_ext4_ready; +} + +/** + * pfk_ext4_dump_inode() - dumps all interesting info about inode to the screen + * + * + */ +/* + * static void pfk_ext4_dump_inode(const struct inode* inode) + * { + * struct ext4_crypt_info *ci = ext4_encryption_info((struct inode*)inode); + * + * pr_debug("dumping inode with address 0x%p\n", inode); + * pr_debug("S_ISREG is %d\n", S_ISREG(inode->i_mode)); + * pr_debug("EXT4_INODE_ENCRYPT flag is %d\n", + * ext4_test_inode_flag((struct inode*)inode, EXT4_INODE_ENCRYPT)); + * if (ci) { + * pr_debug("crypt_info address 0x%p\n", ci); + * pr_debug("ci->ci_data_mode %d\n", ci->ci_data_mode); + * } else { + * pr_debug("crypt_info is NULL\n"); + * } + * } + */ + +/** + * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE + * @inode: inode pointer + */ +bool pfk_is_ext4_type(const struct inode *inode) +{ + if (!pfe_is_inode_filesystem_type(inode, "ext4")) + return false; + + return fscrypt_should_be_processed_by_ice(inode); +} + +/** + * pfk_ext4_parse_cipher() - parse cipher from inode to enum + * @inode: inode + * @algo: pointer to store the output enum (can be null) + * + * return 0 in case of success, error otherwise (i.e not supported cipher) + */ +static int pfk_ext4_parse_cipher(const struct inode *inode, + enum ice_cryto_algo_mode *algo) +{ + /* + * currently only AES XTS algo is supported + * in the future, table with supported ciphers might + * be introduced + */ + + if (!inode) + return -EINVAL; + + if (!fscrypt_is_aes_xts_cipher(inode)) { + pr_err("ext4 alghoritm is not supported by pfk\n"); + return -EINVAL; + } + + if (algo) + *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS; + + return 0; +} + + +int pfk_ext4_parse_inode(const struct bio *bio, + const struct inode *inode, + struct pfk_key_info *key_info, + enum ice_cryto_algo_mode *algo, + bool *is_pfe) +{ + int ret = 0; + + if (!is_pfe) + return -EINVAL; + + /* + * only a few errors below can indicate that + * this function was not invoked within PFE context, + * otherwise we will consider it PFE + */ + *is_pfe = true; + + if (!pfk_ext4_is_ready()) + return -ENODEV; + + if (!inode) + return -EINVAL; + + if (!key_info) + return -EINVAL; + + key_info->key = fscrypt_get_ice_encryption_key(inode); + if (!key_info->key) { + pr_err("could not parse key from ext4\n"); + return -EINVAL; + } + + key_info->key_size = fscrypt_get_ice_encryption_key_size(inode); + if (!key_info->key_size) { + pr_err("could not parse key size from ext4\n"); + return -EINVAL; + } + + key_info->salt = fscrypt_get_ice_encryption_salt(inode); + if (!key_info->salt) { + pr_err("could not parse salt from ext4\n"); + return -EINVAL; + } + + key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode); + if (!key_info->salt_size) { + pr_err("could not parse salt size from ext4\n"); + return -EINVAL; + } + + ret = pfk_ext4_parse_cipher(inode, algo); + if (ret != 0) { + pr_err("not supported cipher\n"); + return ret; + } + + return 0; +} + +bool pfk_ext4_allow_merge_bio(const struct bio *bio1, + const struct bio *bio2, const struct inode *inode1, + const struct inode *inode2) +{ + /* if there is no ext4 pfk, don't disallow merging blocks */ + if (!pfk_ext4_is_ready()) + return true; + + if (!inode1 || !inode2) + return false; + + return fscrypt_is_ice_encryption_info_equal(inode1, inode2); +} diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h new file mode 100644 index 000000000000..c33232f35a14 --- /dev/null +++ b/security/pfe/pfk_ext4.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _PFK_EXT4_H_ +#define _PFK_EXT4_H_ + +#include +#include +#include +#include "pfk_internal.h" + +bool pfk_is_ext4_type(const struct inode *inode); + +int pfk_ext4_parse_inode(const struct bio *bio, + const struct inode *inode, + struct pfk_key_info *key_info, + enum ice_cryto_algo_mode *algo, + bool *is_pfe); + +bool pfk_ext4_allow_merge_bio(const struct bio *bio1, + const struct bio *bio2, const struct inode *inode1, + const struct inode *inode2); + +int __init pfk_ext4_init(void); + +void pfk_ext4_deinit(void); + +#endif /* _PFK_EXT4_H_ */ diff --git a/security/pfe/pfk_f2fs.c b/security/pfe/pfk_f2fs.c new file mode 100644 index 000000000000..8b9d515043e8 --- /dev/null +++ b/security/pfe/pfk_f2fs.c @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Per-File-Key (PFK) - f2fs + * + * This driver is used for working with EXT4/F2FS crypt extension + * + * The key information is stored in node by EXT4/F2FS when file is first opened + * and will be later accessed by Block Device Driver to actually load the key + * to encryption hw. + * + * PFK exposes API's for loading and removing keys from encryption hw + * and also API to determine whether 2 adjacent blocks can be agregated by + * Block Layer in one request to encryption hw. + * + */ + + +/* Uncomment the line below to enable debug messages */ +#define DEBUG 1 +#define pr_fmt(fmt) "pfk_f2fs [%s]: " fmt, __func__ + +#include +#include +#include +#include + +#include "fscrypt_ice.h" +#include "pfk_f2fs.h" + +static bool pfk_f2fs_ready; + +/* + * pfk_f2fs_deinit() - Deinit function, should be invoked by upper PFK layer + */ +void pfk_f2fs_deinit(void) +{ + pfk_f2fs_ready = false; +} + +/* + * pfk_f2fs_init() - Init function, should be invoked by upper PFK layer + */ +int __init pfk_f2fs_init(void) +{ + pfk_f2fs_ready = true; + pr_info("PFK F2FS inited successfully\n"); + + return 0; +} + +/** + * pfk_f2fs_is_ready() - driver is initialized and ready. + * + * Return: true if the driver is ready. + */ +static inline bool pfk_f2fs_is_ready(void) +{ + return pfk_f2fs_ready; +} + +/** + * pfk_is_f2fs_type() - return true if inode belongs to ICE F2FS PFE + * @inode: inode pointer + */ +bool pfk_is_f2fs_type(const struct inode *inode) +{ + if (!pfe_is_inode_filesystem_type(inode, "f2fs")) + return false; + + return fscrypt_should_be_processed_by_ice(inode); +} + +/** + * pfk_f2fs_parse_cipher() - parse cipher from inode to enum + * @inode: inode + * @algo: pointer to store the output enum (can be null) + * + * return 0 in case of success, error otherwise (i.e not supported cipher) + */ +static int pfk_f2fs_parse_cipher(const struct inode *inode, + enum ice_cryto_algo_mode *algo) +{ + /* + * currently only AES XTS algo is supported + * in the future, table with supported ciphers might + * be introduced + */ + if (!inode) + return -EINVAL; + + if (!fscrypt_is_aes_xts_cipher(inode)) { + pr_err("f2fs alghoritm is not supported by pfk\n"); + return -EINVAL; + } + + if (algo) + *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS; + + return 0; +} + + +int pfk_f2fs_parse_inode(const struct bio *bio, + const struct inode *inode, + struct pfk_key_info *key_info, + enum ice_cryto_algo_mode *algo, + bool *is_pfe) +{ + int ret = 0; + + if (!is_pfe) + return -EINVAL; + + /* + * only a few errors below can indicate that + * this function was not invoked within PFE context, + * otherwise we will consider it PFE + */ + *is_pfe = true; + + if (!pfk_f2fs_is_ready()) + return -ENODEV; + + if (!inode) + return -EINVAL; + + if (!key_info) + return -EINVAL; + + key_info->key = fscrypt_get_ice_encryption_key(inode); + if (!key_info->key) { + pr_err("could not parse key from f2fs\n"); + return -EINVAL; + } + + key_info->key_size = fscrypt_get_ice_encryption_key_size(inode); + if (!key_info->key_size) { + pr_err("could not parse key size from f2fs\n"); + return -EINVAL; + } + + key_info->salt = fscrypt_get_ice_encryption_salt(inode); + if (!key_info->salt) { + pr_err("could not parse salt from f2fs\n"); + return -EINVAL; + } + + key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode); + if (!key_info->salt_size) { + pr_err("could not parse salt size from f2fs\n"); + return -EINVAL; + } + + ret = pfk_f2fs_parse_cipher(inode, algo); + if (ret != 0) { + pr_err("not supported cipher\n"); + return ret; + } + + return 0; +} + +bool pfk_f2fs_allow_merge_bio(const struct bio *bio1, + const struct bio *bio2, const struct inode *inode1, + const struct inode *inode2) +{ + bool mergeable; + + /* if there is no f2fs pfk, don't disallow merging blocks */ + if (!pfk_f2fs_is_ready()) + return true; + + if (!inode1 || !inode2) + return false; + + mergeable = fscrypt_is_ice_encryption_info_equal(inode1, inode2); + if (!mergeable) + return false; + + + /* ICE allows only consecutive iv_key stream. */ + if (!bio_dun(bio1) && !bio_dun(bio2)) + return true; + else if (!bio_dun(bio1) || !bio_dun(bio2)) + return false; + + return bio_end_dun(bio1) == bio_dun(bio2); +} diff --git a/security/pfe/pfk_f2fs.h b/security/pfe/pfk_f2fs.h new file mode 100644 index 000000000000..551d529bced6 --- /dev/null +++ b/security/pfe/pfk_f2fs.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _PFK_F2FS_H_ +#define _PFK_F2FS_H_ + +#include +#include +#include +#include "pfk_internal.h" + +bool pfk_is_f2fs_type(const struct inode *inode); + +int pfk_f2fs_parse_inode(const struct bio *bio, + const struct inode *inode, + struct pfk_key_info *key_info, + enum ice_cryto_algo_mode *algo, + bool *is_pfe); + +bool pfk_f2fs_allow_merge_bio(const struct bio *bio1, + const struct bio *bio2, const struct inode *inode1, + const struct inode *inode2); + +int __init pfk_f2fs_init(void); + +void pfk_f2fs_deinit(void); + +#endif /* _PFK_F2FS_H_ */ diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c new file mode 100644 index 000000000000..b627c92aaf5d --- /dev/null +++ b/security/pfe/pfk_ice.c @@ -0,0 +1,216 @@ +/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pfk_ice.h" + +/**********************************/ +/** global definitions **/ +/**********************************/ + +#define TZ_ES_INVALIDATE_ICE_KEY 0x3 +#define TZ_ES_CONFIG_SET_ICE_KEY 0x4 + +/* index 0 and 1 is reserved for FDE */ +#define MIN_ICE_KEY_INDEX 2 +#define NUM_ICE_SLOTS 32 +#define MAX_ICE_KEY_INDEX (NUM_ICE_SLOTS - 1) + +#define TZ_ES_CONFIG_SET_ICE_KEY_ID \ + TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \ + TZ_ES_CONFIG_SET_ICE_KEY) + +#define TZ_ES_INVALIDATE_ICE_KEY_ID \ + TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \ + TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY) + +#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \ + TZ_SYSCALL_CREATE_PARAM_ID_1( \ + TZ_SYSCALL_PARAM_TYPE_VAL) + +#define TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID \ + TZ_SYSCALL_CREATE_PARAM_ID_5( \ + TZ_SYSCALL_PARAM_TYPE_VAL, \ + TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \ + TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL) + +#define CONTEXT_SIZE 0x1000 + +#define ICE_BUFFER_SIZE 64 + +static uint8_t ice_buffer[ICE_BUFFER_SIZE]; + +enum { + ICE_CIPHER_MODE_XTS_128 = 0, + ICE_CIPHER_MODE_CBC_128 = 1, + ICE_CIPHER_MODE_XTS_256 = 3, + ICE_CIPHER_MODE_CBC_256 = 4 +}; + +static int set_key(uint32_t index, const uint8_t *key, const uint8_t *salt, + unsigned int data_unit) +{ + struct scm_desc desc = {0}; + int ret = 0; + uint32_t smc_id = 0; + char *tzbuf = (char *)ice_buffer; + uint32_t size = ICE_BUFFER_SIZE / 2; + + if (!tzbuf) { + pr_err("%s No Memory\n", __func__); + return -ENOMEM; + } + + memset(tzbuf, 0, ICE_BUFFER_SIZE); + + memcpy(ice_buffer, key, size); + memcpy(ice_buffer+size, salt, size); + + dmac_flush_range(tzbuf, tzbuf + ICE_BUFFER_SIZE); + + smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID; + + desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID; + desc.args[0] = index; + desc.args[1] = virt_to_phys(tzbuf); + desc.args[2] = ICE_BUFFER_SIZE; + desc.args[3] = ICE_CIPHER_MODE_XTS_256; + desc.args[4] = data_unit; + + ret = scm_call2_noretry(smc_id, &desc); + if (ret) + pr_err("%s:SCM call Error: 0x%x\n", __func__, ret); + + return ret; +} + +static int clear_key(uint32_t index) +{ + struct scm_desc desc = {0}; + int ret = 0; + uint32_t smc_id = 0; + + smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID; + + desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID; + desc.args[0] = index; + + ret = scm_call2_noretry(smc_id, &desc); + if (ret) + pr_err("%s:SCM call Error: 0x%x\n", __func__, ret); + return ret; +} + +int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt, + char *storage_type, unsigned int data_unit) +{ + int ret = 0, ret1 = 0; + char *s_type = storage_type; + + if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) { + pr_err("%s Invalid index %d\n", __func__, index); + return -EINVAL; + } + if (!key || !salt) { + pr_err("%s Invalid key/salt\n", __func__); + return -EINVAL; + } + + if (s_type == NULL) { + pr_err("%s Invalid Storage type\n", __func__); + return -EINVAL; + } + + ret = qcom_ice_setup_ice_hw((const char *)s_type, true); + if (ret) { + pr_err("%s: could not enable clocks: %d\n", __func__, ret); + goto out; + } + + ret = set_key(index, key, salt, data_unit); + if (ret) { + pr_err("%s: Set Key Error: %d\n", __func__, ret); + if (ret == -EBUSY) { + if (qcom_ice_setup_ice_hw((const char *)s_type, false)) + pr_err("%s: clock disable failed\n", __func__); + goto out; + } + /* Try to invalidate the key to keep ICE in proper state */ + ret1 = clear_key(index); + if (ret1) + pr_err("%s: Invalidate key error: %d\n", __func__, ret); + } + + ret1 = qcom_ice_setup_ice_hw((const char *)s_type, false); + if (ret) + pr_err("%s: Error %d disabling clocks\n", __func__, ret); + +out: + return ret; +} + +int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type) +{ + int ret = 0; + + if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) { + pr_err("%s Invalid index %d\n", __func__, index); + return -EINVAL; + } + + if (storage_type == NULL) { + pr_err("%s Invalid Storage type\n", __func__); + return -EINVAL; + } + + ret = qcom_ice_setup_ice_hw((const char *)storage_type, true); + if (ret) { + pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret); + return ret; + } + + ret = clear_key(index); + if (ret) + pr_err("%s: Invalidate key error: %d\n", __func__, ret); + + if (qcom_ice_setup_ice_hw((const char *)storage_type, false)) + pr_err("%s: could not disable clocks\n", __func__); + + return ret; +} + +int qti_pfk_ice_get_info(uint32_t *min_slot_index, uint32_t *total_num_slots, + bool async) +{ + + if (!min_slot_index || !total_num_slots) { + pr_err("%s Null input\n", __func__); + return -EINVAL; + } + + *min_slot_index = MIN_ICE_KEY_INDEX; + *total_num_slots = NUM_ICE_SLOTS - MIN_ICE_KEY_INDEX; + + return 0; +} diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h new file mode 100644 index 000000000000..bc919744e7a2 --- /dev/null +++ b/security/pfe/pfk_ice.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef PFK_ICE_H_ +#define PFK_ICE_H_ + +/* + * PFK ICE + * + * ICE keys configuration through scm calls. + * + */ + +#include + +int pfk_ice_init(void); +int pfk_ice_deinit(void); + +int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt, + char *storage_type, unsigned int data_unit); +int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type); +int qti_pfk_ice_get_info(uint32_t *min_slot_index, uint32_t *total_num_slots, + bool async); + +#endif /* PFK_ICE_H_ */ diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h new file mode 100644 index 000000000000..3214327b8bcd --- /dev/null +++ b/security/pfe/pfk_internal.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _PFK_INTERNAL_H_ +#define _PFK_INTERNAL_H_ + +#include +#include + +struct pfk_key_info { + const unsigned char *key; + const unsigned char *salt; + size_t key_size; + size_t salt_size; +}; + +int pfk_key_size_to_key_type(size_t key_size, + enum ice_crpto_key_size *key_size_type); + +bool pfe_is_inode_filesystem_type(const struct inode *inode, + const char *fs_type); + +char *inode_to_filename(const struct inode *inode); + +#endif /* _PFK_INTERNAL_H_ */ diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c new file mode 100644 index 000000000000..c07c82c5260d --- /dev/null +++ b/security/pfe/pfk_kc.c @@ -0,0 +1,951 @@ +/* + * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * PFK Key Cache + * + * Key Cache used internally in PFK. + * The purpose of the cache is to save access time to QSEE when loading keys. + * Currently the cache is the same size as the total number of keys that can + * be loaded to ICE. Since this number is relatively small, the algorithms for + * cache eviction are simple, linear and based on last usage timestamp, i.e + * the node that will be evicted is the one with the oldest timestamp. + * Empty entries always have the oldest timestamp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pfk_kc.h" +#include "pfk_ice.h" + +/** currently the only supported key and salt sizes */ +#define PFK_KC_KEY_SIZE 32 +#define PFK_KC_SALT_SIZE 32 + +/** Table size limitations */ +#define PFK_KC_MAX_TABLE_SIZE (32) +#define PFK_KC_MIN_TABLE_SIZE (1) + +/** The maximum key and salt size */ +#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE +#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE +#define PFK_UFS "ufs" + +static DEFINE_SPINLOCK(kc_lock); +static unsigned long flags; +static bool kc_ready; +static char *s_type = "sdcc"; + + +/** Actual table size */ +static uint32_t kc_table_size; + +/** + * enum pfk_kc_entry_state - state of the entry inside kc table + * + * @FREE: entry is free + * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine + and cannot be used by others. SCM call + to load key to ICE is pending to be performed + * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and + cannot be used by others. SCM call to load the + key to ICE was successfully executed and key is + now loaded + * @INACTIVE_INVALIDATING: entry is being invalidated during file close + and cannot be used by others until invalidation + is complete + * @INACTIVE: entry's key is already loaded, but is not + currently being used. It can be re-used for + optimization and to avoid SCM call cost or + it can be taken by another key if there are + no FREE entries + * @SCM_ERROR: error occurred while scm call was performed to + load the key to ICE + */ +enum pfk_kc_entry_state { + FREE, + ACTIVE_ICE_PRELOAD, + ACTIVE_ICE_LOADED, + INACTIVE_INVALIDATING, + INACTIVE, + SCM_ERROR +}; + +struct kc_entry { + unsigned char key[PFK_MAX_KEY_SIZE]; + size_t key_size; + + unsigned char salt[PFK_MAX_SALT_SIZE]; + size_t salt_size; + + u64 time_stamp; + u32 key_index; + + struct task_struct *thread_pending; + + enum pfk_kc_entry_state state; + + /* ref count for the number of requests in the HW queue for this key */ + int loaded_ref_cnt; + int scm_error; +}; + +static struct kc_entry kc_table[PFK_KC_MAX_TABLE_SIZE]; + + + +static inline void kc_spin_lock(void) +{ + spin_lock_irqsave(&kc_lock, flags); +} + +static inline void kc_spin_unlock(void) +{ + spin_unlock_irqrestore(&kc_lock, flags); +} +/** + * kc_is_ready() - driver is initialized and ready. + * + * Return: true if the key cache is ready. + */ +static inline bool kc_is_ready(void) +{ + bool res; + + kc_spin_lock(); + res = kc_ready; + kc_spin_unlock(); + return res; +} +/** + * pfk_kc_get_storage_type() - return the hardware storage type. + * + * Return: storage type queried during bootup. + */ +const char *pfk_kc_get_storage_type(void) +{ + return s_type; +} + +/** + * kc_entry_is_available() - checks whether the entry is available + * + * Return true if it is , false otherwise or if invalid + * Should be invoked under spinlock + */ +static bool kc_entry_is_available(const struct kc_entry *entry) +{ + if (!entry) + return false; + + return (entry->state == FREE || entry->state == INACTIVE); +} + +/** + * kc_entry_wait_till_available() - waits till entry is available + * + * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted + * by signal + * + * Should be invoked under spinlock + */ +static int kc_entry_wait_till_available(struct kc_entry *entry) +{ + int res = 0; + + while (!kc_entry_is_available(entry)) { + set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + /* assuming only one thread can try to invalidate + * the same entry + */ + entry->thread_pending = current; + kc_spin_unlock(); + schedule(); + kc_spin_lock(); + } + set_current_state(TASK_RUNNING); + + return res; +} + +/** + * kc_entry_start_invalidating() - moves entry to state + * INACTIVE_INVALIDATING + * If entry is in use, waits till + * it gets available + * @entry: pointer to entry + * + * Return 0 in case of success, otherwise error + * Should be invoked under spinlock + */ +static int kc_entry_start_invalidating(struct kc_entry *entry) +{ + int res; + + res = kc_entry_wait_till_available(entry); + if (res) + return res; + + entry->state = INACTIVE_INVALIDATING; + + return 0; +} + +/** + * kc_entry_finish_invalidating() - moves entry to state FREE + * wakes up all the tasks waiting + * on it + * + * @entry: pointer to entry + * + * Return 0 in case of success, otherwise error + * Should be invoked under spinlock + */ +static void kc_entry_finish_invalidating(struct kc_entry *entry) +{ + if (!entry) + return; + + if (entry->state != INACTIVE_INVALIDATING) + return; + + entry->state = FREE; +} + +/** + * kc_min_entry() - compare two entries to find one with minimal time + * @a: ptr to the first entry. If NULL the other entry will be returned + * @b: pointer to the second entry + * + * Return the entry which timestamp is the minimal, or b if a is NULL + */ +static inline struct kc_entry *kc_min_entry(struct kc_entry *a, + struct kc_entry *b) +{ + if (!a) + return b; + + if (time_before64(b->time_stamp, a->time_stamp)) + return b; + + return a; +} + +/** + * kc_entry_at_index() - return entry at specific index + * @index: index of entry to be accessed + * + * Return entry + * Should be invoked under spinlock + */ +static struct kc_entry *kc_entry_at_index(int index) +{ + return &(kc_table[index]); +} + +/** + * kc_find_key_at_index() - find kc entry starting at specific index + * @key: key to look for + * @key_size: the key size + * @salt: salt to look for + * @salt_size: the salt size + * @sarting_index: index to start search with, if entry found, updated with + * index of that entry + * + * Return entry or NULL in case of error + * Should be invoked under spinlock + */ +static struct kc_entry *kc_find_key_at_index(const unsigned char *key, + size_t key_size, const unsigned char *salt, size_t salt_size, + int *starting_index) +{ + struct kc_entry *entry = NULL; + int i = 0; + + for (i = *starting_index; i < kc_table_size; i++) { + entry = kc_entry_at_index(i); + + if (salt != NULL) { + if (entry->salt_size != salt_size) + continue; + + if (memcmp(entry->salt, salt, salt_size) != 0) + continue; + } + + if (entry->key_size != key_size) + continue; + + if (memcmp(entry->key, key, key_size) == 0) { + *starting_index = i; + return entry; + } + } + + return NULL; +} + +/** + * kc_find_key() - find kc entry + * @key: key to look for + * @key_size: the key size + * @salt: salt to look for + * @salt_size: the salt size + * + * Return entry or NULL in case of error + * Should be invoked under spinlock + */ +static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size) +{ + int index = 0; + + return kc_find_key_at_index(key, key_size, salt, salt_size, &index); +} + +/** + * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp + * that is not locked + * + * Returns entry with minimal timestamp. Empty entries have timestamp + * of 0, therefore they are returned first. + * If all the entries are locked, will return NULL + * Should be invoked under spin lock + */ +static struct kc_entry *kc_find_oldest_entry_non_locked(void) +{ + struct kc_entry *curr_min_entry = NULL; + struct kc_entry *entry = NULL; + int i = 0; + + for (i = 0; i < kc_table_size; i++) { + entry = kc_entry_at_index(i); + + if (entry->state == FREE) + return entry; + + if (entry->state == INACTIVE) + curr_min_entry = kc_min_entry(curr_min_entry, entry); + } + + return curr_min_entry; +} + +/** + * kc_update_timestamp() - updates timestamp of entry to current + * + * @entry: entry to update + * + */ +static void kc_update_timestamp(struct kc_entry *entry) +{ + if (!entry) + return; + + entry->time_stamp = get_jiffies_64(); +} + +/** + * kc_clear_entry() - clear the key from entry and mark entry not in use + * + * @entry: pointer to entry + * + * Should be invoked under spinlock + */ +static void kc_clear_entry(struct kc_entry *entry) +{ + if (!entry) + return; + + memset(entry->key, 0, entry->key_size); + memset(entry->salt, 0, entry->salt_size); + + entry->key_size = 0; + entry->salt_size = 0; + + entry->time_stamp = 0; + entry->scm_error = 0; + + entry->state = FREE; + + entry->loaded_ref_cnt = 0; + entry->thread_pending = NULL; +} + +/** + * kc_update_entry() - replaces the key in given entry and + * loads the new key to ICE + * + * @entry: entry to replace key in + * @key: key + * @key_size: key_size + * @salt: salt + * @salt_size: salt_size + * @data_unit: dun size + * + * The previous key is securely released and wiped, the new one is loaded + * to ICE. + * Should be invoked under spinlock + */ +static int kc_update_entry(struct kc_entry *entry, const unsigned char *key, + size_t key_size, const unsigned char *salt, size_t salt_size, + unsigned int data_unit) +{ + int ret; + kc_clear_entry(entry); + + memcpy(entry->key, key, key_size); + entry->key_size = key_size; + + memcpy(entry->salt, salt, salt_size); + entry->salt_size = salt_size; + + /* Mark entry as no longer free before releasing the lock */ + entry->state = ACTIVE_ICE_PRELOAD; + kc_spin_unlock(); + ret = qti_pfk_ice_set_key(entry->key_index, entry->key, + entry->salt, s_type, data_unit); + kc_spin_lock(); + return ret; +} + +/** + * pfk_kc_init() - init function + * + * Return 0 in case of success, error otherwise + */ +int pfk_kc_init(bool async) +{ + int ret = 0; + struct kc_entry *entry = NULL; + uint32_t i = 0, num_ice_slots = 0, kc_starting_index = 0; + + if (kc_is_ready()) + return 0; + + ret = qti_pfk_ice_get_info(&kc_starting_index, &num_ice_slots, async); + if (ret) { + pr_err("qti_pfk_ice_get_info failed ret = %d\n", ret); + return ret; + } + if (num_ice_slots > PFK_KC_MAX_TABLE_SIZE || + num_ice_slots < PFK_KC_MIN_TABLE_SIZE) { + pr_err("Received ICE num slots = %u not in [%u,%u]\n", + num_ice_slots, PFK_KC_MAX_TABLE_SIZE, + PFK_KC_MIN_TABLE_SIZE); + return -E2BIG; + } + + kc_spin_lock(); + if (!kc_ready) { + kc_table_size = num_ice_slots; + for (i = 0; i < kc_table_size; i++) { + entry = kc_entry_at_index(i); + entry->key_index = kc_starting_index + i; + } + kc_ready = true; + } + kc_spin_unlock(); + + return ret; +} + +/** + * pfk_kc_denit() - deinit function + * + * Return 0 in case of success, error otherwise + */ +int pfk_kc_deinit(void) +{ + int res = pfk_kc_clear(); + kc_spin_lock(); + kc_ready = false; + kc_spin_unlock(); + kc_table_size = 0; + + return res; +} + +/** + * pfk_kc_load_key_start() - retrieve the key from cache or add it if + * it's not there and return the ICE hw key index in @key_index. + * @key: pointer to the key + * @key_size: the size of the key + * @salt: pointer to the salt + * @salt_size: the size of the salt + * @key_index: the pointer to key_index where the output will be stored + * @async: whether scm calls are allowed in the caller context + * + * If key is present in cache, than the key_index will be retrieved from cache. + * If it is not present, the oldest entry from kc table will be evicted, + * the key will be loaded to ICE via QSEE to the index that is the evicted + * entry number and stored in cache. + * Entry that is going to be used is marked as being used, it will mark + * as not being used when ICE finishes using it and pfk_kc_load_key_end + * will be invoked. + * As QSEE calls can only be done from a non-atomic context, when @async flag + * is set to 'false', it specifies that it is ok to make the calls in the + * current context. Otherwise, when @async is set, the caller should retry the + * call again from a different context, and -EAGAIN error will be returned. + * + * Return 0 in case of success, error otherwise + */ +int pfk_kc_load_key_start(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size, u32 *key_index, + bool async, unsigned int data_unit) +{ + int ret = 0; + struct kc_entry *entry = NULL; + bool entry_exists = false; + + ret = pfk_kc_init(async); + if (ret) + return ret; + + if (!key || !salt || !key_index) { + pr_err("%s key/salt/key_index NULL\n", __func__); + return -EINVAL; + } + + if (key_size != PFK_KC_KEY_SIZE) { + pr_err("unsupported key size %zu\n", key_size); + return -EINVAL; + } + + if (salt_size != PFK_KC_SALT_SIZE) { + pr_err("unsupported salt size %zu\n", salt_size); + return -EINVAL; + } + + kc_spin_lock(); + + entry = kc_find_key(key, key_size, salt, salt_size); + if (!entry) { + if (async) { + pr_debug("%s task will populate entry\n", __func__); + kc_spin_unlock(); + return -EAGAIN; + } + + entry = kc_find_oldest_entry_non_locked(); + if (!entry) { + /* could not find a single non locked entry, + * return EBUSY to upper layers so that the + * request will be rescheduled + */ + kc_spin_unlock(); + return -EBUSY; + } + } else { + entry_exists = true; + } + + pr_debug("entry with index %d is in state %d\n", + entry->key_index, entry->state); + + switch (entry->state) { + case (INACTIVE): + if (entry_exists) { + kc_update_timestamp(entry); + entry->state = ACTIVE_ICE_LOADED; + + if (!strcmp(s_type, (char *)PFK_UFS)) { + if (async) + entry->loaded_ref_cnt++; + } else { + entry->loaded_ref_cnt++; + } + break; + } + case (FREE): + ret = kc_update_entry(entry, key, key_size, salt, salt_size, + data_unit); + if (ret) { + entry->state = SCM_ERROR; + entry->scm_error = ret; + pr_err("%s: key load error (%d)\n", __func__, ret); + } else { + kc_update_timestamp(entry); + entry->state = ACTIVE_ICE_LOADED; + + /* + * In case of UFS only increase ref cnt for async calls, + * sync calls from within work thread do not pass + * requests further to HW + */ + if (!strcmp(s_type, (char *)PFK_UFS)) { + if (async) + entry->loaded_ref_cnt++; + } else { + entry->loaded_ref_cnt++; + } + } + break; + case (ACTIVE_ICE_PRELOAD): + case (INACTIVE_INVALIDATING): + ret = -EAGAIN; + break; + case (ACTIVE_ICE_LOADED): + kc_update_timestamp(entry); + + if (!strcmp(s_type, (char *)PFK_UFS)) { + if (async) + entry->loaded_ref_cnt++; + } else { + entry->loaded_ref_cnt++; + } + break; + case(SCM_ERROR): + ret = entry->scm_error; + kc_clear_entry(entry); + entry->state = FREE; + break; + default: + pr_err("invalid state %d for entry with key index %d\n", + entry->state, entry->key_index); + ret = -EINVAL; + } + + *key_index = entry->key_index; + kc_spin_unlock(); + + return ret; +} + +/** + * pfk_kc_load_key_end() - finish the process of key loading that was started + * by pfk_kc_load_key_start + * by marking the entry as not + * being in use + * @key: pointer to the key + * @key_size: the size of the key + * @salt: pointer to the salt + * @salt_size: the size of the salt + * + */ +void pfk_kc_load_key_end(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size) +{ + struct kc_entry *entry = NULL; + struct task_struct *tmp_pending = NULL; + int ref_cnt = 0; + + if (!kc_is_ready()) + return; + + if (!key || !salt) + return; + + if (key_size != PFK_KC_KEY_SIZE) + return; + + if (salt_size != PFK_KC_SALT_SIZE) + return; + + kc_spin_lock(); + + entry = kc_find_key(key, key_size, salt, salt_size); + if (!entry) { + kc_spin_unlock(); + pr_err("internal error, there should an entry to unlock\n"); + + return; + } + ref_cnt = --entry->loaded_ref_cnt; + + if (ref_cnt < 0) + pr_err("internal error, ref count should never be negative\n"); + + if (!ref_cnt) { + entry->state = INACTIVE; + /* + * wake-up invalidation if it's waiting + * for the entry to be released + */ + if (entry->thread_pending) { + tmp_pending = entry->thread_pending; + entry->thread_pending = NULL; + + kc_spin_unlock(); + wake_up_process(tmp_pending); + return; + } + } + + kc_spin_unlock(); +} + +/** + * pfk_kc_remove_key() - remove the key from cache and from ICE engine + * @key: pointer to the key + * @key_size: the size of the key + * @salt: pointer to the key + * @salt_size: the size of the key + * + * Return 0 in case of success, error otherwise (also in case of non + * (existing key) + */ +int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size) +{ + struct kc_entry *entry = NULL; + int res = 0; + + if (!kc_is_ready()) + return -ENODEV; + + if (!key) + return -EINVAL; + + if (!salt) + return -EINVAL; + + if (key_size != PFK_KC_KEY_SIZE) + return -EINVAL; + + if (salt_size != PFK_KC_SALT_SIZE) + return -EINVAL; + + kc_spin_lock(); + + entry = kc_find_key(key, key_size, salt, salt_size); + if (!entry) { + pr_debug("%s: key does not exist\n", __func__); + kc_spin_unlock(); + return -EINVAL; + } + + res = kc_entry_start_invalidating(entry); + if (res != 0) { + kc_spin_unlock(); + return res; + } + kc_clear_entry(entry); + + kc_spin_unlock(); + + qti_pfk_ice_invalidate_key(entry->key_index, s_type); + + kc_spin_lock(); + kc_entry_finish_invalidating(entry); + kc_spin_unlock(); + + return 0; +} + +/** + * pfk_kc_remove_key() - remove the key from cache and from ICE engine + * when no salt is available. Will only search key part, if there are several, + * all will be removed + * + * @key: pointer to the key + * @key_size: the size of the key + * + * Return 0 in case of success, error otherwise (also for non-existing key) + */ +int pfk_kc_remove_key(const unsigned char *key, size_t key_size) +{ + struct kc_entry *entry = NULL; + int index = 0; + int temp_indexes[PFK_KC_MAX_TABLE_SIZE] = {0}; + int temp_indexes_size = 0; + int i = 0; + int res = 0; + + if (!kc_is_ready()) + return -ENODEV; + + if (!key) + return -EINVAL; + + if (key_size != PFK_KC_KEY_SIZE) + return -EINVAL; + + memset(temp_indexes, -1, sizeof(temp_indexes)); + + kc_spin_lock(); + + entry = kc_find_key_at_index(key, key_size, NULL, 0, &index); + if (!entry) { + pr_err("%s: key does not exist\n", __func__); + kc_spin_unlock(); + return -EINVAL; + } + + res = kc_entry_start_invalidating(entry); + if (res != 0) { + kc_spin_unlock(); + return res; + } + + temp_indexes[temp_indexes_size++] = index; + kc_clear_entry(entry); + + /* let's clean additional entries with the same key if there are any */ + do { + index++; + entry = kc_find_key_at_index(key, key_size, NULL, 0, &index); + if (!entry) + break; + + res = kc_entry_start_invalidating(entry); + if (res != 0) { + kc_spin_unlock(); + goto out; + } + + temp_indexes[temp_indexes_size++] = index; + + kc_clear_entry(entry); + + + } while (true); + + kc_spin_unlock(); + + temp_indexes_size--; + for (i = temp_indexes_size; i >= 0 ; i--) + qti_pfk_ice_invalidate_key( + kc_entry_at_index(temp_indexes[i])->key_index, + s_type); + + /* fall through */ + res = 0; + +out: + kc_spin_lock(); + for (i = temp_indexes_size; i >= 0 ; i--) + kc_entry_finish_invalidating( + kc_entry_at_index(temp_indexes[i])); + kc_spin_unlock(); + + return res; +} + +/** + * pfk_kc_clear() - clear the table and remove all keys from ICE + * + * Return 0 on success, error otherwise + * + */ +int pfk_kc_clear(void) +{ + struct kc_entry *entry = NULL; + int i = 0; + int res = 0; + + if (!kc_is_ready()) + return -ENODEV; + + kc_spin_lock(); + for (i = 0; i < kc_table_size; i++) { + entry = kc_entry_at_index(i); + res = kc_entry_start_invalidating(entry); + if (res != 0) { + kc_spin_unlock(); + goto out; + } + kc_clear_entry(entry); + } + kc_spin_unlock(); + + for (i = 0; i < kc_table_size; i++) + qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index, + s_type); + + /* fall through */ + res = 0; +out: + kc_spin_lock(); + for (i = 0; i < kc_table_size; i++) + kc_entry_finish_invalidating(kc_entry_at_index(i)); + kc_spin_unlock(); + + return res; +} + +/** + * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE + * The assumption is that at this point we don't have any pending transactions + * Also, there is no need to clear keys from ICE + * + * Return 0 on success, error otherwise + * + */ +void pfk_kc_clear_on_reset(void) +{ + struct kc_entry *entry = NULL; + int i = 0; + + if (!kc_is_ready()) + return; + + kc_spin_lock(); + for (i = 0; i < kc_table_size; i++) { + entry = kc_entry_at_index(i); + kc_clear_entry(entry); + } + kc_spin_unlock(); +} + +static int pfk_kc_find_storage_type(char **device) +{ + +#ifdef CONFIG_PFK_VIRTUALIZED + *device = PFK_UFS; + return 0; +#else + char boot[20] = {'\0'}; + char *match = (char *)strnstr(saved_command_line, + "androidboot.bootdevice=", + strlen(saved_command_line)); + if (match) { + memcpy(boot, (match + strlen("androidboot.bootdevice=")), + sizeof(boot) - 1); + if (strnstr(boot, PFK_UFS, strlen(boot))) + *device = PFK_UFS; + + return 0; + } + return -EINVAL; +#endif +} + +static int __init pfk_kc_pre_init(void) +{ + return pfk_kc_find_storage_type(&s_type); +} + +static void __exit pfk_kc_exit(void) +{ + s_type = NULL; +} + +module_init(pfk_kc_pre_init); +module_exit(pfk_kc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Per-File-Key-KC driver"); diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h new file mode 100644 index 000000000000..dc00d286377a --- /dev/null +++ b/security/pfe/pfk_kc.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef PFK_KC_H_ +#define PFK_KC_H_ + +#include + +int pfk_kc_init(bool async); +int pfk_kc_deinit(void); +int pfk_kc_load_key_start(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size, u32 *key_index, + bool async, unsigned int data_unit); +void pfk_kc_load_key_end(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size); +int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size); +int pfk_kc_remove_key(const unsigned char *key, size_t key_size); +int pfk_kc_clear(void); +void pfk_kc_clear_on_reset(void); +const char *pfk_kc_get_storage_type(void); +extern char *saved_command_line; + + +#endif /* PFK_KC_H_ */ diff --git a/security/security.c b/security/security.c index 5afd1dc81511..2655987c9638 100644 --- a/security/security.c +++ b/security/security.c @@ -614,6 +614,14 @@ int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode } EXPORT_SYMBOL_GPL(security_inode_create); +int security_inode_post_create(struct inode *dir, struct dentry *dentry, + umode_t mode) +{ + if (unlikely(IS_PRIVATE(dir))) + return 0; + return call_int_hook(inode_post_create, 0, dir, dentry, mode); +} + int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index 512908b55ca3..2ac6edc1d131 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -26,8 +26,9 @@ #include #include #include -#include "flask.h" -#include "avc.h" +//#include "flask.h" +//#include "avc.h" +#include "security.h" struct task_security_struct { u32 osid; /* SID prior to last execve */ @@ -64,6 +65,8 @@ struct inode_security_struct { u32 sid; /* SID of this object */ u16 sclass; /* security class of this object */ unsigned char initialized; /* initialization flag */ + u32 tag; /* Per-File-Encryption tag */ + void *pfk_data; /* Per-File-Key data from ecryptfs */ spinlock_t lock; }; From b26d2a1d20a8f012957b28475128421ccf940faf Mon Sep 17 00:00:00 2001 From: Shumin Qiu Date: Sat, 29 Aug 2020 10:41:47 +0530 Subject: [PATCH 135/141] Dm: init: Enable rootfs mount as dm-verity during boot without ramdisk If the rootfs image has HASH tree appended, we need mount the rootfs as dm-verity in boot phase. This commit make rootfs mount as dm-verity without ramdisk. Change-Id: I10329ba09d57b832482cd9b5f668acc88bddf548 Signed-off-by: Shumin Qiu Signed-off-by: UtsavBalar1231 --- drivers/md/dm-ioctl.c | 35 +++++ drivers/md/dm-ioctrl.h | 20 +++ init/Makefile | 1 + init/do_mounts.c | 1 + init/do_mounts.h | 14 ++ init/do_mounts_verity.c | 285 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 356 insertions(+) create mode 100644 drivers/md/dm-ioctrl.h create mode 100644 init/do_mounts_verity.c diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 787afba77b2e..33d6011ac461 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -6,6 +6,7 @@ */ #include "dm-core.h" +#include "dm-ioctrl.h" #include #include @@ -2056,3 +2057,37 @@ out: return r; } + +int __init dm_ioctrl(uint cmd, struct dm_ioctl *param) +{ + int r = 0; + int ioctl_flags; + ioctl_fn fn = NULL; + size_t input_param_size; + + /* + * Nothing more to do for the version command. + */ + if (cmd == DM_VERSION_CMD) + return 0; + + DMDEBUG("dm_ctl_ioctl: command 0x%x", cmd); + + fn = lookup_ioctl(cmd, &ioctl_flags); + if (!fn) { + DMWARN("dm_ctl_ioctl: unknown command 0x%x", cmd); + return -ENOTTY; + } + + input_param_size = param->data_size; + param->data_size = sizeof(*param); + + r = fn(NULL, param, input_param_size); + + if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) && + unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) + DMERR("ioctl %d but has IOCTL_FLAGS_NO_PARAMS set", cmd); + + return r; +} +EXPORT_SYMBOL(dm_ioctrl); diff --git a/drivers/md/dm-ioctrl.h b/drivers/md/dm-ioctrl.h new file mode 100644 index 000000000000..d331fcd83df4 --- /dev/null +++ b/drivers/md/dm-ioctrl.h @@ -0,0 +1,20 @@ +/* Copyright (c) 2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef DM_IOCTRL_INTERNAL_H +#define DM_IOCTRL_INTERNAL_H + +#include + +int dm_ioctrl(uint cmd, struct dm_ioctl *param); + +#endif diff --git a/init/Makefile b/init/Makefile index 58bc56e3e4e7..230853115245 100644 --- a/init/Makefile +++ b/init/Makefile @@ -20,6 +20,7 @@ mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o mounts-$(CONFIG_BLK_DEV_DM) += do_mounts_dm.o +mounts-$(CONFIG_BLK_DEV_DM) += do_mounts_verity.o # dependencies on generated files need to be listed explicitly $(obj)/version.o: include/generated/compile.h diff --git a/init/do_mounts.c b/init/do_mounts.c index 2006003e448d..80716bd5eb05 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -687,6 +687,7 @@ void __init prepare_namespace(void) if ((!is_early_userspace) || (is_early_userspace && first_time)) { md_run_setup(); dm_run_setup(); + dm_verity_setup(); if (saved_root_name[0]) { root_device_name = saved_root_name; diff --git a/init/do_mounts.h b/init/do_mounts.h index cd201124714b..9dfd4138aca8 100644 --- a/init/do_mounts.h +++ b/init/do_mounts.h @@ -8,6 +8,8 @@ #include #include #include +#include "uapi/linux/dm-ioctl.h" +#include void change_floppy(char *fmt, ...); void mount_block_root(char *name, int flags); @@ -71,3 +73,15 @@ void dm_run_setup(void); static inline void dm_run_setup(void) {} #endif + +#ifdef CONFIG_BLK_DEV_DM + +void dm_verity_setup(void); +extern int dm_ioctrl(uint cmd, struct dm_ioctl *param); +extern void dm_table_destroy(struct dm_table *t); + +#else + +static inline void dm_verity_setup(void) {} + +#endif diff --git a/init/do_mounts_verity.c b/init/do_mounts_verity.c new file mode 100644 index 000000000000..5d7c8a2efec9 --- /dev/null +++ b/init/do_mounts_verity.c @@ -0,0 +1,285 @@ +/* Copyright (c) 2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include "uapi/linux/dm-ioctl.h" +#include +#include +#include "do_mounts.h" + +#define DM_BUF_SIZE 4096 + +#define DM_MSG_PREFIX "verity" + +#define VERITY_COMMANDLINE_PARAM_LENGTH 32 +#define VERITY_ROOT_HASH_PARAM_LENGTH 65 +#define VERITY_SALT_PARAM_LENGTH 65 + +static char dm_name[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char dm_version[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char dm_data_device[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char dm_hash_device[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char dm_data_block_size[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char dm_hash_block_size[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char dm_number_of_data_blocks[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char dm_hash_start_block[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char dm_algorithm[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char dm_digest[VERITY_ROOT_HASH_PARAM_LENGTH]; +static char dm_salt[VERITY_SALT_PARAM_LENGTH]; +static char dm_opt[VERITY_COMMANDLINE_PARAM_LENGTH]; + +static void __init init_param(struct dm_ioctl *param, const char *name) +{ + memset(param, 0, DM_BUF_SIZE); + param->data_size = DM_BUF_SIZE; + param->data_start = sizeof(struct dm_ioctl); + param->version[0] = 4; + param->version[1] = 0; + param->version[2] = 0; + param->flags = DM_READONLY_FLAG; + strlcpy(param->name, name, sizeof(param->name)); +} + +static int __init dm_name_param(char *line) +{ + strlcpy(dm_name, line, sizeof(dm_name)); + return 1; +} +__setup("dmname=", dm_name_param); + +static int __init dm_version_param(char *line) +{ + strlcpy(dm_version, line, sizeof(dm_version)); + return 1; +} +__setup("version=", dm_version_param); + +static int __init dm_data_device_param(char *line) +{ + strlcpy(dm_data_device, line, sizeof(dm_data_device)); + return 1; +} +__setup("data_device=", dm_data_device_param); + +static int __init dm_hash_device_param(char *line) +{ + strlcpy(dm_hash_device, line, sizeof(dm_hash_device)); + return 1; +} +__setup("hash_device=", dm_hash_device_param); + +static int __init dm_data_block_size_param(char *line) +{ + strlcpy(dm_data_block_size, line, sizeof(dm_data_block_size)); + return 1; +} +__setup("data_block_size=", dm_data_block_size_param); + +static int __init dm_hash_block_size_param(char *line) +{ + strlcpy(dm_hash_block_size, line, sizeof(dm_hash_block_size)); + return 1; +} +__setup("hash_block_size=", dm_hash_block_size_param); + +static int __init dm_number_of_data_blocks_param(char *line) +{ + strlcpy(dm_number_of_data_blocks, line, sizeof(dm_number_of_data_blocks)); + return 1; +} +__setup("number_of_data_blocks=", dm_number_of_data_blocks_param); + +static int __init dm_hash_start_block_param(char *line) +{ + strlcpy(dm_hash_start_block, line, sizeof(dm_hash_start_block)); + return 1; +} +__setup("hash_start_block=", dm_hash_start_block_param); + +static int __init dm_algorithm_param(char *line) +{ + strlcpy(dm_algorithm, line, sizeof(dm_algorithm)); + return 1; +} +__setup("algorithm=", dm_algorithm_param); + +static int __init dm_digest_param(char *line) +{ + strlcpy(dm_digest, line, sizeof(dm_digest)); + return 1; +} +__setup("digest=", dm_digest_param); + +static int __init dm_salt_param(char *line) +{ + strlcpy(dm_salt, line, sizeof(dm_salt)); + return 1; +} +__setup("salt=", dm_salt_param); + +static int __init dm_opt_param(char *line) +{ + strlcpy(dm_opt, line, sizeof(dm_opt)); + return 1; +} +__setup("opt=", dm_opt_param); + +static void __init dm_setup_drive(void) +{ + const char *name; + const char *version; + const char *data_device; + const char *hash_device; + const char *data_block_size; + const char *hash_block_size; + const char *number_of_data_blocks; + const char *hash_start_block; + const char *algorithm; + const char *digest; + const char *salt; + const char *opt; + unsigned long long data_blocks; + char dummy; + char *verity_params; + size_t bufsize; + char *buffer = kzalloc(DM_BUF_SIZE, GFP_KERNEL); + struct dm_ioctl *param = (struct dm_ioctl *) buffer; + size_t dm_sz = sizeof(struct dm_ioctl); + struct dm_target_spec *tgt = (struct dm_target_spec *) &buffer[dm_sz]; + + if (!buffer) + goto fail; + name = dm_name; + if (name == NULL) + goto fail; + DMDEBUG("(I) name=%s", name); + + if (strcmp(name, "disabled") == 0) { + pr_info("dm: dm-verity is disabled."); + kfree(buffer); + return; + } + + version = dm_version; + if (version == NULL) + goto fail; + DMDEBUG("(I) version=%s", version); + + data_device = dm_data_device; + if (data_device == NULL) + goto fail; + DMDEBUG("(I) data_device=%s", data_device); + + hash_device = dm_hash_device; + if (hash_device == NULL) + goto fail; + DMDEBUG("(I) hash_device=%s", hash_device); + + data_block_size = dm_data_block_size; + if (data_block_size == NULL) + goto fail; + DMDEBUG("(I) data_block_size=%s", data_block_size); + + hash_block_size = dm_hash_block_size; + if (hash_block_size == NULL) + goto fail; + DMDEBUG("(I) hash_block_size=%s", hash_block_size); + + number_of_data_blocks = dm_number_of_data_blocks; + if (number_of_data_blocks == NULL) + goto fail; + DMDEBUG("(I) number_of_data_blocks=%s", number_of_data_blocks); + + hash_start_block = dm_hash_start_block; + if (hash_start_block == NULL) + goto fail; + DMDEBUG("(I) hash_start_block=%s", hash_start_block); + + algorithm = dm_algorithm; + if (algorithm == NULL) + goto fail; + DMDEBUG("(I) algorithm=%s", algorithm); + + digest = dm_digest; + if (digest == NULL) + goto fail; + DMDEBUG("(I) digest=%s", digest); + + salt = dm_salt; + if (salt == NULL) + goto fail; + DMDEBUG("(I) salt=%s", salt); + + opt = dm_opt; + if (opt == NULL) + goto fail; + DMDEBUG("(I) opt=%s", opt); + + init_param(param, name); + if (dm_ioctrl(DM_DEV_CREATE_CMD, param)) { + DMERR("(E) failed to create the device"); + goto fail; + } + + init_param(param, name); + param->target_count = 1; + /* set tgt arguments */ + tgt->status = 0; + tgt->sector_start = 0; + if (sscanf(number_of_data_blocks, "%llu%c", &data_blocks, &dummy) != 1) { + DMERR("(E) invalid number of data blocks"); + goto fail; + } + + tgt->length = data_blocks*4096/512; /* size in sector(512b) of data dev */ + strlcpy(tgt->target_type, "verity", sizeof(tgt->target_type)); + /* build the verity params here */ + verity_params = buffer + sizeof(struct dm_ioctl) + sizeof(struct dm_target_spec); + bufsize = DM_BUF_SIZE - (verity_params - buffer); + + verity_params += snprintf(verity_params, bufsize, "%s %s %s %s %s %s %s %s %s %s 1 %s", + version, + data_device, hash_device, + data_block_size, hash_block_size, + number_of_data_blocks, hash_start_block, + algorithm, digest, salt, opt); + + tgt->next = verity_params - buffer; + if (dm_ioctrl(DM_TABLE_LOAD_CMD, param)) { + DMERR("(E) failed to load the device"); + goto fail; + } + + init_param(param, name); + if (dm_ioctrl(DM_DEV_SUSPEND_CMD, param)) { + DMERR("(E) failed to suspend the device"); + goto fail; + } + + pr_info("dm: dm-0 (%s) is ready", data_device); + kfree(buffer); + return; + +fail: + pr_info("dm: starting dm-0 failed"); + kfree(buffer); + return; + +} + +void __init dm_verity_setup(void) +{ + pr_info("dm: attempting early device configuration."); + dm_setup_drive(); +} From 9cbaa4636b3b9562421b0de01d6fd9ade21ded4a Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Thu, 30 Jul 2020 11:27:59 +0530 Subject: [PATCH 136/141] init: completely remove Early init services support - we don't have a car with this kernel Revert "init: use unbound instead of highpriority wq in early init" Revert "Early Services: init: fs: synchronize Early Services with init" Revert "init: support early init on SA8195" Revert "init: support early userspace" Revert "init: move more subsystems into early init framework" Revert "init: define early init functions for rootfs mounting" Revert "init: add new early param and define early initcall macros" Revert "init: define early init functions needed by all subsystems" Revert "init: define early init functions for display subsystem" Revert "init: define early init functions for display subsystem" Revert "init: define early init functions for camera subsystem" Revert "msm: ais: define early init for camera" Signed-off-by: UtsavBalar1231 --- .../admin-guide/kernel-parameters.txt | 7 +- .../bindings/soc/qcom/qcom,early-devices.txt | 35 ---- arch/arm/kernel/vmlinux.lds.S | 1 - .../boot/dts/qcom/sa8195p-regulator.dtsi | 16 +- arch/arm64/boot/dts/qcom/sdmshrike.dtsi | 6 - arch/arm64/crypto/sha2-ce-glue.c | 3 +- arch/arm64/kernel/vmlinux.lds.S | 1 - arch/um/include/asm/common.lds.S | 4 - block/bio.c | 2 +- block/blk-ioc.c | 2 +- block/blk-settings.c | 3 +- block/blk-softirq.c | 3 +- block/bsg.c | 2 +- block/cfq-iosched.c | 2 +- block/genhd.c | 3 +- crypto/crc32c_generic.c | 2 +- drivers/base/dd.c | 7 +- drivers/base/firmware_class.c | 3 +- drivers/char/mem.c | 2 +- drivers/clk/clk.c | 46 +--- drivers/clk/qcom/camcc-sdmshrike.c | 3 +- drivers/clk/qcom/camcc-sm8150.c | 3 +- drivers/clk/qcom/clk-aop-qmp.c | 5 +- drivers/clk/qcom/clk-cpu-osm.c | 3 +- drivers/clk/qcom/clk-rpmh.c | 2 +- drivers/clk/qcom/dispcc-sm8150.c | 3 +- drivers/clk/qcom/gcc-sdmshrike.c | 3 +- drivers/clk/qcom/gcc-sm8150.c | 3 +- drivers/clk/qcom/gdsc-regulator.c | 2 +- drivers/clk/qcom/gpucc-sm8150.c | 3 +- drivers/clk/qcom/mdss/mdss-pll.c | 2 +- drivers/clk/qcom/npucc-sm8150.c | 3 +- drivers/clk/qcom/scc-sm8150.c | 3 +- drivers/clk/qcom/videocc-sm8150.c | 3 +- drivers/crypto/msm/ice.c | 3 +- drivers/devfreq/devfreq.c | 2 +- drivers/devfreq/devfreq_devbw.c | 3 +- drivers/devfreq/governor_bw_vbif.c | 5 +- drivers/devfreq/governor_gpubw_mon.c | 3 +- drivers/devfreq/governor_msm_adreno_tz.c | 3 +- drivers/devfreq/governor_performance.c | 3 +- drivers/devfreq/governor_powersave.c | 3 +- drivers/devfreq/governor_simpleondemand.c | 3 +- drivers/gpu/drm/bridge/analogix-anx7625.c | 3 +- drivers/gpu/drm/drm_drv.c | 2 +- drivers/gpu/msm/adreno.c | 2 +- drivers/gpu/msm/kgsl.c | 2 +- drivers/i2c/busses/i2c-qcom-geni.c | 6 +- drivers/i2c/muxes/i2c-mux-pca954x.c | 3 +- .../iio/imu/st_asm330lhh/st_asm330lhh_i2c.c | 2 +- drivers/iommu/arm-smmu.c | 2 +- drivers/md/dm-bufio.c | 2 +- drivers/md/dm-verity-target.c | 2 +- drivers/md/dm.c | 2 +- drivers/media/media-devnode.c | 2 +- .../platform/msm/ais/ais_isp/ais_ife_dev.c | 2 +- .../msm/ais/ais_isp/csid_hw/ais_ife_csid17x.c | 3 +- .../ais_isp/csid_hw/ais_ife_csid_lite17x.c | 3 +- .../ais/ais_isp/vfe_hw/vfe17x/ais_vfe17x.c | 2 +- .../msm/ais/cam_cdm/cam_cdm_hw_core.c | 4 +- .../platform/msm/ais/cam_cdm/cam_cdm_intf.c | 4 +- .../platform/msm/ais/cam_cpas/cam_cpas_intf.c | 4 +- .../platform/msm/ais/cam_fd/cam_fd_dev.c | 4 +- .../cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c | 4 +- .../msm/ais/cam_hyp_intf/cam_hyp_intf.c | 4 +- .../platform/msm/ais/cam_icp/cam_icp_subdev.c | 4 +- .../msm/ais/cam_icp/icp_hw/a5_hw/a5_dev.c | 4 +- .../msm/ais/cam_icp/icp_hw/bps_hw/bps_dev.c | 4 +- .../msm/ais/cam_icp/icp_hw/ipe_hw/ipe_dev.c | 4 +- .../platform/msm/ais/cam_isp/cam_isp_dev.c | 4 +- .../isp_hw/ife_csid_hw/cam_csid_ppi170.c | 5 +- .../isp_hw/ife_csid_hw/cam_ife_csid17x.c | 5 +- .../isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c | 5 +- .../isp_hw/vfe_hw/vfe17x/cam_vfe17x.c | 4 +- .../platform/msm/ais/cam_jpeg/cam_jpeg_dev.c | 4 +- .../jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c | 4 +- .../jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c | 4 +- .../platform/msm/ais/cam_lrme/cam_lrme_dev.c | 4 +- .../lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c | 4 +- .../msm/ais/cam_req_mgr/cam_req_mgr_dev.c | 4 +- .../cam_actuator/cam_actuator_dev.c | 4 +- .../cam_sensor_module/cam_cci/cam_cci_dev.c | 6 +- .../cam_csiphy/cam_csiphy_dev.c | 4 +- .../cam_eeprom/cam_eeprom_dev.c | 4 +- .../cam_flash/cam_flash_dev.c | 4 +- .../cam_sensor_module/cam_ois/cam_ois_dev.c | 4 +- .../cam_res_mgr/cam_res_mgr.c | 4 +- .../cam_sensor/cam_sensor_dev.c | 4 +- .../platform/msm/ais/cam_smmu/cam_smmu_api.c | 4 +- .../platform/msm/ais/cam_sync/cam_sync.c | 2 +- drivers/media/v4l2-core/v4l2-dev.c | 2 +- drivers/net/phy/phy_device.c | 2 +- drivers/net/wireless/cnss2/main.c | 9 +- drivers/of/platform.c | 7 - drivers/pci/host/pci-msm.c | 18 +- drivers/phy/phy-core.c | 2 +- drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c | 8 +- drivers/pinctrl/pinctrl-sx150x.c | 2 +- drivers/platform/msm/qcom-geni-se.c | 3 +- drivers/scsi/scsi.c | 2 +- drivers/scsi/sd.c | 8 +- drivers/scsi/ufs/ufs-qcom.c | 10 +- drivers/scsi/ufs/ufshcd.c | 6 +- drivers/soc/qcom/boot_marker.c | 2 +- drivers/soc/qcom/fsa4480-i2c.c | 2 +- .../soc/qcom/msm_bus/msm_bus_fabric_rpmh.c | 3 +- .../soc/qcom/msm_bus/msm_bus_proxy_client.c | 5 +- drivers/soc/qcom/socinfo.c | 4 +- drivers/soc/qcom/subsys-pil-tz.c | 2 +- drivers/thermal/thermal_core.c | 2 +- drivers/usb/pd/policy_engine.c | 2 +- fs/anon_inodes.c | 3 +- fs/eventpoll.c | 2 +- fs/ext4/super.c | 2 +- fs/filesystems.c | 19 -- fs/jbd2/journal.c | 2 +- fs/pipe.c | 2 +- include/asm-generic/vmlinux.lds.h | 29 --- include/linux/cpufeature.h | 13 -- include/linux/device.h | 25 --- include/linux/early_async.h | 63 ------ include/linux/fs.h | 1 - include/linux/i2c.h | 4 - include/linux/init.h | 73 ------- include/linux/module.h | 16 -- include/linux/of_platform.h | 2 - include/linux/platform_device.h | 8 - init/Makefile | 1 - init/do_mounts.c | 145 ++++--------- init/early_userspace.c | 196 ------------------ init/initramfs.c | 2 +- init/main.c | 26 +-- lib/sg_pool.c | 2 +- net/core/dev.c | 2 +- net/unix/af_unix.c | 2 +- security/pfe/pfk.c | 2 +- 136 files changed, 209 insertions(+), 920 deletions(-) delete mode 100644 Documentation/devicetree/bindings/soc/qcom/qcom,early-devices.txt delete mode 100644 include/linux/early_async.h delete mode 100644 init/early_userspace.c diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 08d453d43456..f095885e5305 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5067,9 +5067,4 @@ eipv6= [KNL] Sets ipv6 address at boot up for early ethernet. - ermac= [KNL] Sets mac address at boot up for early ethernet. - - early_userspace [KNL] - Enable early userspace feature where we adjust the - normal booting sequence and get rootfs mounted and - other subsystems ready much earlier. + ermac= [KNL] Sets mac address at boot up for early ethernet. \ No newline at end of file diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,early-devices.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,early-devices.txt deleted file mode 100644 index 50e1a8dca74b..000000000000 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,early-devices.txt +++ /dev/null @@ -1,35 +0,0 @@ -* early-devices - -Some initcall stage takes long time to finish and it's better we launch some -of our userspace applications earlier before these initcalls. This way, the -userspace applications we care after system booting can start to work earlier -and provide better user experience. -To achieve this, we need to populate and probe some platform devices earlier -(thus its name early-devices) since these devices are commonly needed by all -other subsystems. Usually this list of devices includes clock, regulator, -bus components, iommu, crypto device, etc. and it's soc related(depends on -how these devices are connected in hardware in specific soc). - -Required properties: -- compatible: - Usage: required for early-devices - Value type: - Definition: must be "qcom,early-devices" -- devices: - Usage: required - Value type: - Definition: nodes handle for early devices. - -Example: - early-devices { - compatible = "qcom,early-devices"; - devices = <&ufs_phy_gdsc &clock_rpmh &clock_gcc - &ad_hoc_bus &ufs_ice &ufsphy_mem - &ufshc_mem &apps_rsc &cmd_db - &ldoa10 &ldoc5 &ldoc8 - &smpa4 &tlmm &ldoa5 - &cxlvl &mxlvl &bps_gdsc - &ipe_0_gdsc &ipe_1_gdsc &ife_0_gdsc - &ife_1_gdsc &titan_top_gdsc &mvsc_gdsc - &mvs0_gdsc &mvs1_gdsc>; - }; diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index f0e62bd2dce9..1845a5affb44 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -219,7 +219,6 @@ SECTIONS INIT_DATA INIT_SETUP(16) INIT_CALLS - EARLY_INIT_CALLS CON_INITCALL SECURITY_INITCALL INIT_RAM_FS diff --git a/arch/arm64/boot/dts/qcom/sa8195p-regulator.dtsi b/arch/arm64/boot/dts/qcom/sa8195p-regulator.dtsi index ecd6a1968095..9d3ff0740904 100644 --- a/arch/arm64/boot/dts/qcom/sa8195p-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195p-regulator.dtsi @@ -44,7 +44,7 @@ }; /* PM8195_1 S2 = VDDCX_MM supply */ - mmcxlvl: rpmh-regulator-mmcxlvl { + rpmh-regulator-mmcxlvl { compatible = "qcom,rpmh-arc-regulator"; mboxes = <&apps_rsc 0>; qcom,resource-name = "mmcx.lvl"; @@ -145,7 +145,7 @@ }; /* PM8195_1 S10 = VDD_MX supply */ - mxlvl: rpmh-regulator-mxlvl { + rpmh-regulator-mxlvl { compatible = "qcom,rpmh-arc-regulator"; mboxes = <&apps_rsc 0>; qcom,resource-name = "mx.lvl"; @@ -213,7 +213,7 @@ }; }; - ldoa5: rpmh-regulator-ldoa5 { + rpmh-regulator-ldoa5 { compatible = "qcom,rpmh-vrm-regulator"; mboxes = <&apps_rsc 0>; qcom,resource-name = "ldoa5"; @@ -252,7 +252,7 @@ }; }; - ldoa10: rpmh-regulator-ldoa10 { + rpmh-regulator-ldoa10 { compatible = "qcom,rpmh-vrm-regulator"; mboxes = <&apps_rsc 0>; qcom,resource-name = "ldoa10"; @@ -459,7 +459,7 @@ }; /* PM8195_2 S10 + S9 + S8 + S7 + S6 = VDD_GFX supply */ - gfxlvl: rpmh-regulator-gfxlvl { + rpmh-regulator-gfxlvl { compatible = "qcom,rpmh-arc-regulator"; mboxes = <&apps_rsc 0>; qcom,resource-name = "gfx.lvl"; @@ -495,7 +495,7 @@ }; }; - ldoc5: rpmh-regulator-ldoc5 { + rpmh-regulator-ldoc5 { compatible = "qcom,rpmh-vrm-regulator"; mboxes = <&apps_rsc 0>; qcom,resource-name = "ldoc5"; @@ -592,7 +592,7 @@ /* PM8195_3 S3 + S2 + S1 = VDD_CX supply */ - cxlvl: rpmh-regulator-cxlvl { + rpmh-regulator-cxlvl { compatible = "qcom,rpmh-arc-regulator"; mboxes = <&apps_rsc 0>; qcom,resource-name = "cx.lvl"; @@ -708,7 +708,7 @@ }; /* pm8195_3 L8 - LPI_CX supply */ - lcxlvl: rpmh-regulator-lcxlvl { + rpmh-regulator-lcxlvl { compatible = "qcom,rpmh-arc-regulator"; mboxes = <&apps_rsc 0>; qcom,resource-name = "lcx.lvl"; diff --git a/arch/arm64/boot/dts/qcom/sdmshrike.dtsi b/arch/arm64/boot/dts/qcom/sdmshrike.dtsi index aa4f216a392f..2efff7a41249 100644 --- a/arch/arm64/boot/dts/qcom/sdmshrike.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmshrike.dtsi @@ -736,12 +736,6 @@ }; -&soc { - early_devices: early-devices { - compatible = "qcom,early-devices"; - }; -}; - #include "sdmshrike-gdsc.dtsi" #include "sdmshrike-sde-pll.dtsi" #include "sdmshrike-sde.dtsi" diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index da4844640a07..9141184d89e4 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -153,6 +153,5 @@ static void __exit sha2_ce_mod_fini(void) crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } -early_module_cpu_feature_match(SHA2, sha2_ce_mod_init, EARLY_SUBSYS_1, -EARLY_INIT_LEVEL4); +module_cpu_feature_match(SHA2, sha2_ce_mod_init); module_exit(sha2_ce_mod_fini); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index ca26bf7158e9..0b6d21167a02 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -172,7 +172,6 @@ SECTIONS INIT_DATA INIT_SETUP(16) INIT_CALLS - EARLY_INIT_CALLS CON_INITCALL SECURITY_INITCALL INIT_RAM_FS diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index aa244ad2a6a7..b30d73ca29d0 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S @@ -49,10 +49,6 @@ INIT_CALLS } - .early_initcall.init : { - EARLY_INIT_CALLS - } - .con_initcall.init : { CON_INITCALL } diff --git a/block/bio.c b/block/bio.c index 3ccd0e8d1918..ce70677b9b5e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -2223,4 +2223,4 @@ static int __init init_bio(void) return 0; } -early_subsys_initcall(init_bio, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +subsys_initcall(init_bio); diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 20d67f42a6d8..e56a480b6f92 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -445,4 +445,4 @@ static int __init blk_ioc_init(void) sizeof(struct io_context), 0, SLAB_PANIC, NULL); return 0; } -early_subsys_initcall(blk_ioc_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +subsys_initcall(blk_ioc_init); diff --git a/block/blk-settings.c b/block/blk-settings.c index 554a69c10390..e0a744921ed3 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -915,5 +915,4 @@ static int __init blk_settings_init(void) blk_max_pfn = max_pfn - 1; return 0; } -early_subsys_initcall(blk_settings_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL8); +subsys_initcall(blk_settings_init); diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 13a9866434f4..01e2b353a2b9 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -178,5 +178,4 @@ static __init int blk_softirq_init(void) blk_softirq_cpu_dead); return 0; } -early_subsys_initcall(blk_softirq_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL8); +subsys_initcall(blk_softirq_init); diff --git a/block/bsg.c b/block/bsg.c index 715752a91979..ee1335c68de7 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -1089,4 +1089,4 @@ MODULE_AUTHOR("Jens Axboe"); MODULE_DESCRIPTION(BSG_DESCRIPTION); MODULE_LICENSE("GPL"); -early_device_initcall(bsg_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +device_initcall(bsg_init); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 009ef1d2d9c9..8df0fecac3a8 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -4970,7 +4970,7 @@ static void __exit cfq_exit(void) kmem_cache_destroy(cfq_pool); } -early_module_init(cfq_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +module_init(cfq_init); module_exit(cfq_exit); MODULE_AUTHOR("Jens Axboe"); diff --git a/block/genhd.c b/block/genhd.c index de13dcf22636..7568c16b83d4 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1016,8 +1016,7 @@ static int __init genhd_device_init(void) return 0; } -early_subsys_initcall(genhd_device_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL8); +subsys_initcall(genhd_device_init); static ssize_t disk_range_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 744f496bf97c..372320399622 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -165,7 +165,7 @@ static void __exit crc32c_mod_fini(void) crypto_unregister_shash(&alg); } -early_module_init(crc32c_mod_init, EARLY_SUBSYS_1, EARLY_INIT_LEVEL4); +module_init(crc32c_mod_init); module_exit(crc32c_mod_fini); MODULE_AUTHOR("Clay Haapala "); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 8bb1571f97b2..e80fd3da4e87 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -261,7 +261,6 @@ arch_initcall_sync(deferred_probe_initcall); subsys_initcall_sync(deferred_probe_initcall); fs_initcall_sync(deferred_probe_initcall); device_initcall_sync(deferred_probe_initcall); -early_init(deferred_probe_initcall, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL6); static int deferred_probe_enable_fn(void) { @@ -840,15 +839,13 @@ static int __driver_attach(struct device *dev, void *data) return ret; } /* ret > 0 means positive match */ - if (!(is_early_userspace && (dev->bus == &platform_bus_type)) - && lock_parent(dev)) /* Needed for USB */ + if (lock_parent(dev)) /* Needed for USB */ device_lock(dev->parent); device_lock(dev); if (!dev->driver) driver_probe_device(drv, dev); device_unlock(dev); - if (!(is_early_userspace && (dev->bus == &platform_bus_type)) - && lock_parent(dev)) + if (lock_parent(dev)) device_unlock(dev->parent); return 0; diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index cc7106631298..364181591f77 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -1852,6 +1852,5 @@ static void __exit firmware_class_exit(void) #endif } -early_fs_initcall(firmware_class_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL8); +fs_initcall(firmware_class_init); module_exit(firmware_class_exit); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 8f5fd8f455d0..125404773646 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -959,4 +959,4 @@ static int __init chr_dev_init(void) return tty_init(); } -early_fs_initcall(chr_dev_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +fs_initcall(chr_dev_init); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 397584ce1508..7ee62043cab9 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2010-2011 Canonical Ltd * Copyright (C) 2011-2012 Linaro Ltd - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -27,7 +27,6 @@ #include #include #include -#include #include "clk.h" @@ -3460,22 +3459,6 @@ void clock_debug_print_enabled(bool print_parent) } EXPORT_SYMBOL_GPL(clock_debug_print_enabled); -static DECLARE_COMPLETION(clk_debug_init_start); - -static int __init clk_debug_init_sync_start(void) -{ - complete(&clk_debug_init_start); - return 0; -} -late_initcall(clk_debug_init_sync_start); - -static int __init clk_debug_init_wait_start(void) -{ - wait_for_completion(&clk_debug_init_start); - return 0; -} -early_init(clk_debug_init_wait_start, EARLY_SUBSYS_6, EARLY_INIT_LEVEL3); - /** * clk_debug_init - lazily populate the debugfs clk directory * @@ -3539,32 +3522,7 @@ static int __init clk_debug_init(void) return 0; } -early_late_initcall(clk_debug_init, EARLY_SUBSYS_6, EARLY_INIT_LEVEL3); - -static DECLARE_COMPLETION(clk_debug_init_end); -static bool is_clk_debug_sync; - -static int __init clk_debug_init_sync_end(void) -{ - complete(&clk_debug_init_end); - return 0; -} -early_init(clk_debug_init_sync_end, EARLY_SUBSYS_6, EARLY_INIT_LEVEL3); - -static int __init clk_debug_sync(char *p) -{ - is_clk_debug_sync = true; - return 0; -} -early_param("clk_debug_sync", clk_debug_sync); - -static int __init clk_debug_init_wait_end(void) -{ - if (is_early_userspace && is_clk_debug_sync) - wait_for_completion(&clk_debug_init_end); - return 0; -} -late_initcall(clk_debug_init_wait_end); +late_initcall(clk_debug_init); #else static inline int clk_debug_register(struct clk_core *core) { return 0; } static inline void clk_debug_reparent(struct clk_core *core, diff --git a/drivers/clk/qcom/camcc-sdmshrike.c b/drivers/clk/qcom/camcc-sdmshrike.c index 3a2248a36a10..55c9c1b9576c 100644 --- a/drivers/clk/qcom/camcc-sdmshrike.c +++ b/drivers/clk/qcom/camcc-sdmshrike.c @@ -3252,8 +3252,7 @@ static int __init cam_cc_sdmshrike_init(void) { return platform_driver_register(&cam_cc_sdmshrike_driver); } -early_subsys_initcall(cam_cc_sdmshrike_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL5); +subsys_initcall(cam_cc_sdmshrike_init); static void __exit cam_cc_sdmshrike_exit(void) { diff --git a/drivers/clk/qcom/camcc-sm8150.c b/drivers/clk/qcom/camcc-sm8150.c index 861e6821f097..b44c1fe938a2 100644 --- a/drivers/clk/qcom/camcc-sm8150.c +++ b/drivers/clk/qcom/camcc-sm8150.c @@ -2578,8 +2578,7 @@ static int __init cam_cc_sm8150_init(void) { return platform_driver_register(&cam_cc_sm8150_driver); } -early_subsys_initcall(cam_cc_sm8150_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL5); +subsys_initcall(cam_cc_sm8150_init); static void __exit cam_cc_sm8150_exit(void) { diff --git a/drivers/clk/qcom/clk-aop-qmp.c b/drivers/clk/qcom/clk-aop-qmp.c index 74d72eb34393..d8b54b841d38 100644 --- a/drivers/clk/qcom/clk-aop-qmp.c +++ b/drivers/clk/qcom/clk-aop-qmp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -351,5 +351,4 @@ static int __init aop_qmp_clk_init(void) { return platform_driver_register(&aop_qmp_clk_driver); } -early_subsys_initcall(aop_qmp_clk_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL5); +subsys_initcall(aop_qmp_clk_init); diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index 4d87ff7d5d3a..a397013479f1 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -1289,8 +1289,7 @@ static int __init clk_cpu_osm_init(void) { return platform_driver_register(&clk_cpu_osm_driver); } -early_subsys_initcall(clk_cpu_osm_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL5); +subsys_initcall(clk_cpu_osm_init); static void __exit clk_cpu_osm_exit(void) { diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index e786ea6c78a3..b7e69b449d80 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -673,7 +673,7 @@ static int __init clk_rpmh_init(void) { return platform_driver_register(&clk_rpmh_driver); } -early_subsys_initcall(clk_rpmh_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL0); +subsys_initcall(clk_rpmh_init); static void __exit clk_rpmh_exit(void) { diff --git a/drivers/clk/qcom/dispcc-sm8150.c b/drivers/clk/qcom/dispcc-sm8150.c index 6faada4df9e8..5f8d9fca53b9 100644 --- a/drivers/clk/qcom/dispcc-sm8150.c +++ b/drivers/clk/qcom/dispcc-sm8150.c @@ -1793,8 +1793,7 @@ static int __init disp_cc_sm8150_init(void) { return platform_driver_register(&disp_cc_sm8150_driver); } -early_subsys_initcall(disp_cc_sm8150_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL5); +subsys_initcall(disp_cc_sm8150_init); static void __exit disp_cc_sm8150_exit(void) { diff --git a/drivers/clk/qcom/gcc-sdmshrike.c b/drivers/clk/qcom/gcc-sdmshrike.c index e01add9e9142..ce6f1575a30b 100644 --- a/drivers/clk/qcom/gcc-sdmshrike.c +++ b/drivers/clk/qcom/gcc-sdmshrike.c @@ -5275,8 +5275,7 @@ static int __init gcc_sdmshrike_init(void) { return platform_driver_register(&gcc_sdmshrike_driver); } -early_subsys_initcall(gcc_sdmshrike_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL0); +subsys_initcall(gcc_sdmshrike_init); static void __exit gcc_sdmshrike_exit(void) { diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index b2a6ed2491ff..eb19336e9007 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -4354,8 +4354,7 @@ static int __init gcc_sm8150_init(void) { return platform_driver_register(&gcc_sm8150_driver); } -early_subsys_initcall(gcc_sm8150_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL0); +subsys_initcall(gcc_sm8150_init); static void __exit gcc_sm8150_exit(void) { diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c index 1ff0b7a2c292..f689661cfc3d 100644 --- a/drivers/clk/qcom/gdsc-regulator.c +++ b/drivers/clk/qcom/gdsc-regulator.c @@ -1072,7 +1072,7 @@ static int __init gdsc_init(void) { return platform_driver_register(&gdsc_driver); } -early_subsys_initcall(gdsc_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL0); +subsys_initcall(gdsc_init); static void __exit gdsc_exit(void) { diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c index d108d640a2d6..7116505d6e20 100644 --- a/drivers/clk/qcom/gpucc-sm8150.c +++ b/drivers/clk/qcom/gpucc-sm8150.c @@ -540,8 +540,7 @@ static int __init gpu_cc_sm8150_init(void) { return platform_driver_register(&gpu_cc_sm8150_driver); } -early_subsys_initcall(gpu_cc_sm8150_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL5); +subsys_initcall(gpu_cc_sm8150_init); static void __exit gpu_cc_sm8150_exit(void) { diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c index 05a289c070b1..0708d1bb0898 100644 --- a/drivers/clk/qcom/mdss/mdss-pll.c +++ b/drivers/clk/qcom/mdss/mdss-pll.c @@ -465,7 +465,7 @@ static int __init mdss_pll_driver_init(void) return rc; } -early_fs_initcall(mdss_pll_driver_init, EARLY_SUBSYS_2, EARLY_INIT_LEVEL2); +fs_initcall(mdss_pll_driver_init); static void __exit mdss_pll_driver_deinit(void) { diff --git a/drivers/clk/qcom/npucc-sm8150.c b/drivers/clk/qcom/npucc-sm8150.c index 6e4189c58ff6..6c947f04c669 100644 --- a/drivers/clk/qcom/npucc-sm8150.c +++ b/drivers/clk/qcom/npucc-sm8150.c @@ -822,8 +822,7 @@ static int __init npu_cc_sm8150_init(void) { return platform_driver_register(&npu_cc_sm8150_driver); } -early_subsys_initcall(npu_cc_sm8150_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL5); +subsys_initcall(npu_cc_sm8150_init); static void __exit npu_cc_sm8150_exit(void) { diff --git a/drivers/clk/qcom/scc-sm8150.c b/drivers/clk/qcom/scc-sm8150.c index 09336d64ef62..bc7ea09c07db 100644 --- a/drivers/clk/qcom/scc-sm8150.c +++ b/drivers/clk/qcom/scc-sm8150.c @@ -732,8 +732,7 @@ static int __init scc_sm8150_init(void) { return platform_driver_register(&scc_sm8150_driver); } -early_subsys_initcall(scc_sm8150_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL5); +subsys_initcall(scc_sm8150_init); static void __exit scc_sm8150_exit(void) { diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c index a3f09b302381..19564ba8cbcc 100644 --- a/drivers/clk/qcom/videocc-sm8150.c +++ b/drivers/clk/qcom/videocc-sm8150.c @@ -442,8 +442,7 @@ static int __init video_cc_sm8150_init(void) { return platform_driver_register(&video_cc_sm8150_driver); } -early_subsys_initcall(video_cc_sm8150_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL5); +subsys_initcall(video_cc_sm8150_init); static void __exit video_cc_sm8150_exit(void) { diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index 39249ff72407..fd34c0bc39f5 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -1834,8 +1834,7 @@ static struct platform_driver qcom_ice_driver = { .of_match_table = qcom_ice_match, }, }; -early_module_platform_driver(qcom_ice_driver, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL8); +module_platform_driver(qcom_ice_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("QTI Inline Crypto Engine driver"); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 56d1d9de22ee..e66ea8953792 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1312,7 +1312,7 @@ static int __init devfreq_init(void) return 0; } -early_subsys_initcall(devfreq_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL7); +subsys_initcall(devfreq_init); /* * The following are helper functions for devfreq user device drivers with diff --git a/drivers/devfreq/devfreq_devbw.c b/drivers/devfreq/devfreq_devbw.c index d0dfd4758fc7..a6b8b9fe1cc7 100644 --- a/drivers/devfreq/devfreq_devbw.c +++ b/drivers/devfreq/devfreq_devbw.c @@ -288,7 +288,6 @@ static struct platform_driver devbw_driver = { }, }; -early_module_platform_driver(devbw_driver, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL7); +module_platform_driver(devbw_driver); MODULE_DESCRIPTION("Device DDR bandwidth voting driver MSM SoCs"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/devfreq/governor_bw_vbif.c b/drivers/devfreq/governor_bw_vbif.c index f360a530d389..a19503e7662d 100644 --- a/drivers/devfreq/governor_bw_vbif.c +++ b/drivers/devfreq/governor_bw_vbif.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017, 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -122,8 +122,7 @@ static int __init devfreq_vbif_init(void) { return devfreq_add_governor(&devfreq_vbif); } -early_subsys_initcall(devfreq_vbif_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL7); +subsys_initcall(devfreq_vbif_init); static void __exit devfreq_vbif_exit(void) { diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c index 5b80dc17cf08..e17cf0de19ef 100644 --- a/drivers/devfreq/governor_gpubw_mon.c +++ b/drivers/devfreq/governor_gpubw_mon.c @@ -257,8 +257,7 @@ static int __init devfreq_gpubw_init(void) { return devfreq_add_governor(&devfreq_gpubw); } -early_subsys_initcall(devfreq_gpubw_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL7); +subsys_initcall(devfreq_gpubw_init); static void __exit devfreq_gpubw_exit(void) { diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c index 1a87a3e68429..551823bf5d12 100644 --- a/drivers/devfreq/governor_msm_adreno_tz.c +++ b/drivers/devfreq/governor_msm_adreno_tz.c @@ -565,8 +565,7 @@ static int __init msm_adreno_tz_init(void) { return devfreq_add_governor(&msm_adreno_tz); } -early_subsys_initcall(msm_adreno_tz_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL7); +subsys_initcall(msm_adreno_tz_init); static void __exit msm_adreno_tz_exit(void) { diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c index 5ec3f5271ef6..c72f942f30a8 100644 --- a/drivers/devfreq/governor_performance.c +++ b/drivers/devfreq/governor_performance.c @@ -51,8 +51,7 @@ static int __init devfreq_performance_init(void) { return devfreq_add_governor(&devfreq_performance); } -early_subsys_initcall(devfreq_performance_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL7); +subsys_initcall(devfreq_performance_init); static void __exit devfreq_performance_exit(void) { diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c index aef154ebc856..0c6bed567e6d 100644 --- a/drivers/devfreq/governor_powersave.c +++ b/drivers/devfreq/governor_powersave.c @@ -48,8 +48,7 @@ static int __init devfreq_powersave_init(void) { return devfreq_add_governor(&devfreq_powersave); } -early_subsys_initcall(devfreq_powersave_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL7); +subsys_initcall(devfreq_powersave_init); static void __exit devfreq_powersave_exit(void) { diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index 79a9fa4ae535..2cbd87b7fbdb 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c @@ -148,8 +148,7 @@ static int __init devfreq_simple_ondemand_init(void) { return devfreq_add_governor(&devfreq_simple_ondemand); } -early_subsys_initcall(devfreq_simple_ondemand_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL7); +subsys_initcall(devfreq_simple_ondemand_init); static void __exit devfreq_simple_ondemand_exit(void) { diff --git a/drivers/gpu/drm/bridge/analogix-anx7625.c b/drivers/gpu/drm/bridge/analogix-anx7625.c index 49da83fd3efe..8a45ad58d3f9 100644 --- a/drivers/gpu/drm/bridge/analogix-anx7625.c +++ b/drivers/gpu/drm/bridge/analogix-anx7625.c @@ -1595,7 +1595,6 @@ static struct i2c_driver anx7625_driver = { .id_table = anx7625_id, }; -early_module_i2c_driver(anx7625_driver, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL3); +module_i2c_driver(anx7625_driver); MODULE_DESCRIPTION("anx7625 driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 33fbe536a398..11901e686d18 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -1004,5 +1004,5 @@ error: return ret; } -early_module_init(drm_core_init, EARLY_SUBSYS_2, EARLY_INIT_LEVEL4); +module_init(drm_core_init); module_exit(drm_core_exit); diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index d5ee10482a5f..54e8fb6200e7 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -4303,7 +4303,7 @@ static void __exit kgsl_3d_exit(void) platform_driver_unregister(&kgsl_bus_platform_driver); } -early_module_init(kgsl_3d_init, EARLY_SUBSYS_2, EARLY_INIT_LEVEL4); +module_init(kgsl_3d_init); module_exit(kgsl_3d_exit); MODULE_DESCRIPTION("3D Graphics driver"); diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index c3d1cedea14f..a4dca69e8083 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -5219,7 +5219,7 @@ err: return result; } -early_module_init(kgsl_core_init, EARLY_SUBSYS_2, EARLY_INIT_LEVEL4); +module_init(kgsl_core_init); module_exit(kgsl_core_exit); MODULE_DESCRIPTION("MSM GPU driver"); diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 738e560f09a4..1984b22c1dfa 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -1297,10 +1296,7 @@ static struct platform_driver geni_i2c_driver = { }, }; -early_module_platform_driver_async(geni_i2c_driver, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL4); - -early_init(_geni_i2c_driver_init_sync, EARLY_SUBSYS_2, EARLY_INIT_LEVEL0); +module_platform_driver(geni_i2c_driver); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:i2c_geni"); diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index 60dc37642801..7b992db38021 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -472,8 +472,7 @@ static struct i2c_driver pca954x_driver = { .id_table = pca954x_id, }; -early_module_i2c_driver(pca954x_driver, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL3); +module_i2c_driver(pca954x_driver); MODULE_AUTHOR("Rodolfo Giometti "); MODULE_DESCRIPTION("PCA954x I2C mux/switch driver"); diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_i2c.c b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_i2c.c index a950147633b3..eb2bcc6ce739 100644 --- a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_i2c.c +++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_i2c.c @@ -106,7 +106,7 @@ static struct i2c_driver st_asm330lhh_driver = { .probe = st_asm330lhh_i2c_probe, .id_table = st_asm330lhh_i2c_id_table, }; -early_module_i2c_driver(st_asm330lhh_driver, EARLY_SUBSYS_6, EARLY_INIT_LEVEL2); +module_i2c_driver(st_asm330lhh_driver); MODULE_AUTHOR("Lorenzo Bianconi "); MODULE_DESCRIPTION("STMicroelectronics st_asm330lhh i2c driver"); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index d8de4b409a88..eae4c38d6533 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -5414,7 +5414,7 @@ static void __exit arm_smmu_exit(void) return platform_driver_unregister(&arm_smmu_driver); } -early_subsys_initcall(arm_smmu_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL2); +subsys_initcall(arm_smmu_init); module_exit(arm_smmu_exit); static int __init arm_smmu_of_init(struct device_node *np) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index f245fc6759a0..b2088e1f3e45 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -2033,7 +2033,7 @@ static void __exit dm_bufio_exit(void) BUG_ON(bug); } -early_module_init(dm_bufio_init, EARLY_SUBSYS_1, EARLY_INIT_LEVEL4); +module_init(dm_bufio_init) module_exit(dm_bufio_exit) module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR); diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index aa91219324cb..23399c7131ee 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1262,7 +1262,7 @@ static void __exit dm_verity_exit(void) dm_unregister_target(&verity_target); } -early_module_init(dm_verity_init, EARLY_SUBSYS_1, EARLY_INIT_LEVEL4); +module_init(dm_verity_init); module_exit(dm_verity_exit); MODULE_AUTHOR("Mikulas Patocka "); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ca416e7f74e8..6e741f19a732 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -3013,7 +3013,7 @@ static const struct dax_operations dm_dax_ops = { /* * module hooks */ -early_module_init(dm_init, EARLY_SUBSYS_1, EARLY_INIT_LEVEL4); +module_init(dm_init); module_exit(dm_exit); module_param(major, uint, 0); diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c index cf0464fd9f19..423248f577b6 100644 --- a/drivers/media/media-devnode.c +++ b/drivers/media/media-devnode.c @@ -326,7 +326,7 @@ static void __exit media_devnode_exit(void) unregister_chrdev_region(media_dev_t, MEDIA_NUM_DEVICES); } -early_subsys_initcall(media_devnode_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL1); +subsys_initcall(media_devnode_init); module_exit(media_devnode_exit) MODULE_AUTHOR("Laurent Pinchart "); diff --git a/drivers/media/platform/msm/ais/ais_isp/ais_ife_dev.c b/drivers/media/platform/msm/ais/ais_isp/ais_ife_dev.c index 7fa6050b0328..89e378afcebe 100644 --- a/drivers/media/platform/msm/ais/ais_isp/ais_ife_dev.c +++ b/drivers/media/platform/msm/ais/ais_isp/ais_ife_dev.c @@ -705,7 +705,7 @@ static void __exit ais_ife_dev_exit_module(void) platform_driver_unregister(&ife_driver); } -early_module_init(ais_ife_dev_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(ais_ife_dev_init_module); module_exit(ais_ife_dev_exit_module); MODULE_DESCRIPTION("AIS IFE driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/ais_isp/csid_hw/ais_ife_csid17x.c b/drivers/media/platform/msm/ais/ais_isp/csid_hw/ais_ife_csid17x.c index f7debf94365d..e5005bef0b7b 100644 --- a/drivers/media/platform/msm/ais/ais_isp/csid_hw/ais_ife_csid17x.c +++ b/drivers/media/platform/msm/ais/ais_isp/csid_hw/ais_ife_csid17x.c @@ -76,8 +76,7 @@ static void __exit ais_ife_csid17x_exit_module(void) platform_driver_unregister(&ais_ife_csid17x_driver); } -early_module_init(ais_ife_csid17x_init_module, EARLY_SUBSYS_3, -EARLY_INIT_LEVEL2); +module_init(ais_ife_csid17x_init_module); module_exit(ais_ife_csid17x_exit_module); MODULE_DESCRIPTION("AIS IFE_CSID17X driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/ais_isp/csid_hw/ais_ife_csid_lite17x.c b/drivers/media/platform/msm/ais/ais_isp/csid_hw/ais_ife_csid_lite17x.c index 70bcac88b6af..90530829b394 100644 --- a/drivers/media/platform/msm/ais/ais_isp/csid_hw/ais_ife_csid_lite17x.c +++ b/drivers/media/platform/msm/ais/ais_isp/csid_hw/ais_ife_csid_lite17x.c @@ -55,8 +55,7 @@ static void __exit ais_ife_csid_lite_exit_module(void) platform_driver_unregister(&ais_ife_csid_lite_driver); } -early_module_init(ais_ife_csid_lite_init_module, EARLY_SUBSYS_3, -EARLY_INIT_LEVEL2); +module_init(ais_ife_csid_lite_init_module); module_exit(ais_ife_csid_lite_exit_module); MODULE_DESCRIPTION("CAM IFE_CSID_LITE driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/vfe17x/ais_vfe17x.c b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/vfe17x/ais_vfe17x.c index 12fca4c48c38..7e75089f7724 100644 --- a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/vfe17x/ais_vfe17x.c +++ b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/vfe17x/ais_vfe17x.c @@ -65,7 +65,7 @@ static void __exit ais_vfe_exit_module(void) platform_driver_unregister(&ais_vfe_driver); } -early_module_init(ais_vfe_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(ais_vfe_init_module); module_exit(ais_vfe_exit_module); MODULE_DESCRIPTION("AIS VFE17X driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/ais/cam_cdm/cam_cdm_hw_core.c index 0f1cba8e0cb2..5999ef829d0b 100644 --- a/drivers/media/platform/msm/ais/cam_cdm/cam_cdm_hw_core.c +++ b/drivers/media/platform/msm/ais/cam_cdm/cam_cdm_hw_core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1182,7 +1182,7 @@ static void __exit cam_hw_cdm_exit_module(void) platform_driver_unregister(&cam_hw_cdm_driver); } -early_module_init(cam_hw_cdm_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_hw_cdm_init_module); module_exit(cam_hw_cdm_exit_module); MODULE_DESCRIPTION("MSM Camera HW CDM driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_cdm/cam_cdm_intf.c b/drivers/media/platform/msm/ais/cam_cdm/cam_cdm_intf.c index 004e1418f9c8..871276170ac4 100644 --- a/drivers/media/platform/msm/ais/cam_cdm/cam_cdm_intf.c +++ b/drivers/media/platform/msm/ais/cam_cdm/cam_cdm_intf.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -576,7 +576,7 @@ static void __exit cam_cdm_intf_exit_module(void) platform_driver_unregister(&cam_cdm_intf_driver); } -early_module_init(cam_cdm_intf_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_cdm_intf_init_module); module_exit(cam_cdm_intf_exit_module); MODULE_DESCRIPTION("MSM Camera CDM Intf driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/ais/cam_cpas/cam_cpas_intf.c index 74af0f819c53..a9f1e4f8364e 100644 --- a/drivers/media/platform/msm/ais/cam_cpas/cam_cpas_intf.c +++ b/drivers/media/platform/msm/ais/cam_cpas/cam_cpas_intf.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -656,7 +656,7 @@ static void __exit cam_cpas_dev_exit_module(void) platform_driver_unregister(&cam_cpas_driver); } -early_module_init(cam_cpas_dev_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_cpas_dev_init_module); module_exit(cam_cpas_dev_exit_module); MODULE_DESCRIPTION("MSM CPAS driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_fd/cam_fd_dev.c b/drivers/media/platform/msm/ais/cam_fd/cam_fd_dev.c index c0446884e4ad..d5068ca26971 100644 --- a/drivers/media/platform/msm/ais/cam_fd/cam_fd_dev.c +++ b/drivers/media/platform/msm/ais/cam_fd/cam_fd_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -208,7 +208,7 @@ static void __exit cam_fd_dev_exit_module(void) platform_driver_unregister(&cam_fd_driver); } -early_module_init(cam_fd_dev_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_fd_dev_init_module); module_exit(cam_fd_dev_exit_module); MODULE_DESCRIPTION("MSM FD driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c b/drivers/media/platform/msm/ais/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c index f2bd15f31d8a..083041c21dff 100644 --- a/drivers/media/platform/msm/ais/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c +++ b/drivers/media/platform/msm/ais/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -231,7 +231,7 @@ static void __exit cam_fd_hw_exit_module(void) platform_driver_unregister(&cam_fd_hw_driver); } -early_module_init(cam_fd_hw_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_fd_hw_init_module); module_exit(cam_fd_hw_exit_module); MODULE_DESCRIPTION("CAM FD HW driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_hyp_intf/cam_hyp_intf.c b/drivers/media/platform/msm/ais/cam_hyp_intf/cam_hyp_intf.c index a4f6fea07384..193a5f060234 100644 --- a/drivers/media/platform/msm/ais/cam_hyp_intf/cam_hyp_intf.c +++ b/drivers/media/platform/msm/ais/cam_hyp_intf/cam_hyp_intf.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -518,7 +518,7 @@ static void __exit cam_hyp_intf_exit_module(void) platform_driver_unregister(&cam_hyp_intf_driver); } -early_module_init(cam_hyp_intf_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_hyp_intf_init_module); module_exit(cam_hyp_intf_exit_module); MODULE_DESCRIPTION("MSM Camera Hypervisor Interface"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/ais/cam_icp/cam_icp_subdev.c index 6e8be4d8e031..699ad5f2a0b4 100644 --- a/drivers/media/platform/msm/ais/cam_icp/cam_icp_subdev.c +++ b/drivers/media/platform/msm/ais/cam_icp/cam_icp_subdev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -276,7 +276,7 @@ static void __exit cam_icp_exit_module(void) { platform_driver_unregister(&cam_icp_driver); } -early_module_init(cam_icp_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_icp_init_module); module_exit(cam_icp_exit_module); MODULE_DESCRIPTION("MSM ICP driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/ais/cam_icp/icp_hw/a5_hw/a5_dev.c index 7769f5806dbf..3b652e72466e 100644 --- a/drivers/media/platform/msm/ais/cam_icp/icp_hw/a5_hw/a5_dev.c +++ b/drivers/media/platform/msm/ais/cam_icp/icp_hw/a5_hw/a5_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -229,7 +229,7 @@ static void __exit cam_a5_exit_module(void) platform_driver_unregister(&cam_a5_driver); } -early_module_init(cam_a5_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_a5_init_module); module_exit(cam_a5_exit_module); MODULE_DESCRIPTION("CAM A5 driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_icp/icp_hw/bps_hw/bps_dev.c b/drivers/media/platform/msm/ais/cam_icp/icp_hw/bps_hw/bps_dev.c index 1bbac1328598..56abb4b8e6e9 100644 --- a/drivers/media/platform/msm/ais/cam_icp/icp_hw/bps_hw/bps_dev.c +++ b/drivers/media/platform/msm/ais/cam_icp/icp_hw/bps_hw/bps_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -208,7 +208,7 @@ static void __exit cam_bps_exit_module(void) platform_driver_unregister(&cam_bps_driver); } -early_module_init(cam_bps_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_bps_init_module); module_exit(cam_bps_exit_module); MODULE_DESCRIPTION("CAM BPS driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/ais/cam_icp/icp_hw/ipe_hw/ipe_dev.c index 1d10693e7b90..a01d114bae7b 100644 --- a/drivers/media/platform/msm/ais/cam_icp/icp_hw/ipe_hw/ipe_dev.c +++ b/drivers/media/platform/msm/ais/cam_icp/icp_hw/ipe_hw/ipe_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -200,7 +200,7 @@ static void __exit cam_ipe_exit_module(void) platform_driver_unregister(&cam_ipe_driver); } -early_module_init(cam_ipe_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_ipe_init_module); module_exit(cam_ipe_exit_module); MODULE_DESCRIPTION("CAM IPE driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/ais/cam_isp/cam_isp_dev.c index bae3f80b7789..d8b7a7b5e73e 100644 --- a/drivers/media/platform/msm/ais/cam_isp/cam_isp_dev.c +++ b/drivers/media/platform/msm/ais/cam_isp/cam_isp_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -199,7 +199,7 @@ static void __exit cam_isp_dev_exit_module(void) platform_driver_unregister(&isp_driver); } -early_module_init(cam_isp_dev_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_isp_dev_init_module); module_exit(cam_isp_dev_exit_module); MODULE_DESCRIPTION("MSM ISP driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_csid_ppi170.c b/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_csid_ppi170.c index 02ef549ebe75..2051292be5be 100644 --- a/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_csid_ppi170.c +++ b/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_csid_ppi170.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -53,7 +53,6 @@ static void __exit cam_csid_ppi170_exit_module(void) platform_driver_unregister(&cam_csid_ppi170_driver); } -early_module_init(cam_csid_ppi170_init_module, EARLY_SUBSYS_3, -EARLY_INIT_LEVEL2); +module_init(cam_csid_ppi170_init_module); MODULE_DESCRIPTION("CAM CSID_PPI170 driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c b/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c index 70a6563475d1..80701bf6f8d0 100644 --- a/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c +++ b/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -76,8 +76,7 @@ static void __exit cam_ife_csid17x_exit_module(void) platform_driver_unregister(&cam_ife_csid17x_driver); } -early_module_init(cam_ife_csid17x_init_module, EARLY_SUBSYS_3, -EARLY_INIT_LEVEL2); +module_init(cam_ife_csid17x_init_module); module_exit(cam_ife_csid17x_exit_module); MODULE_DESCRIPTION("CAM IFE_CSID17X driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c b/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c index 2b542e17ae7b..5fdb52a2d9f0 100644 --- a/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c +++ b/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -55,8 +55,7 @@ static void __exit cam_ife_csid_lite_exit_module(void) platform_driver_unregister(&cam_ife_csid_lite_driver); } -early_module_init(cam_ife_csid_lite_init_module, EARLY_SUBSYS_3, -EARLY_INIT_LEVEL2); +module_init(cam_ife_csid_lite_init_module); module_exit(cam_ife_csid_lite_exit_module); MODULE_DESCRIPTION("CAM IFE_CSID_LITE driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c b/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c index 77dedf2db613..2e11dc40d5a3 100644 --- a/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c +++ b/drivers/media/platform/msm/ais/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -65,7 +65,7 @@ static void __exit cam_vfe_exit_module(void) platform_driver_unregister(&cam_vfe_driver); } -early_module_init(cam_vfe_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_vfe_init_module); module_exit(cam_vfe_exit_module); MODULE_DESCRIPTION("CAM VFE17X driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_jpeg/cam_jpeg_dev.c b/drivers/media/platform/msm/ais/cam_jpeg/cam_jpeg_dev.c index 0536d9dbc4ed..14892224e412 100644 --- a/drivers/media/platform/msm/ais/cam_jpeg/cam_jpeg_dev.c +++ b/drivers/media/platform/msm/ais/cam_jpeg/cam_jpeg_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -203,7 +203,7 @@ static void __exit cam_jpeg_dev_exit_module(void) platform_driver_unregister(&jpeg_driver); } -early_module_init(cam_jpeg_dev_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_jpeg_dev_init_module); module_exit(cam_jpeg_dev_exit_module); MODULE_DESCRIPTION("MSM JPEG driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c b/drivers/media/platform/msm/ais/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c index b66603ac100c..fd4fdab19fa7 100644 --- a/drivers/media/platform/msm/ais/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c +++ b/drivers/media/platform/msm/ais/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -234,7 +234,7 @@ static void __exit cam_jpeg_dma_exit_module(void) platform_driver_unregister(&cam_jpeg_dma_driver); } -early_module_init(cam_jpeg_dma_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_jpeg_dma_init_module); module_exit(cam_jpeg_dma_exit_module); MODULE_DESCRIPTION("CAM JPEG_DMA driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c b/drivers/media/platform/msm/ais/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c index fdb5c05fce00..d4daa6dde308 100644 --- a/drivers/media/platform/msm/ais/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c +++ b/drivers/media/platform/msm/ais/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -232,7 +232,7 @@ static void __exit cam_jpeg_enc_exit_module(void) platform_driver_unregister(&cam_jpeg_enc_driver); } -early_module_init(cam_jpeg_enc_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_jpeg_enc_init_module); module_exit(cam_jpeg_enc_exit_module); MODULE_DESCRIPTION("CAM JPEG_ENC driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_lrme/cam_lrme_dev.c b/drivers/media/platform/msm/ais/cam_lrme/cam_lrme_dev.c index 593bc9d84565..6b1250aea714 100644 --- a/drivers/media/platform/msm/ais/cam_lrme/cam_lrme_dev.c +++ b/drivers/media/platform/msm/ais/cam_lrme/cam_lrme_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -236,7 +236,7 @@ static void __exit cam_lrme_dev_exit_module(void) platform_driver_unregister(&cam_lrme_driver); } -early_module_init(cam_lrme_dev_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_lrme_dev_init_module); module_exit(cam_lrme_dev_exit_module); MODULE_DESCRIPTION("MSM LRME driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/ais/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c index 51b868cb9e40..ec4297822fb7 100644 --- a/drivers/media/platform/msm/ais/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c +++ b/drivers/media/platform/msm/ais/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -314,7 +314,7 @@ static void __exit cam_lrme_hw_exit_module(void) platform_driver_unregister(&cam_lrme_hw_driver); } -early_module_init(cam_lrme_hw_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_lrme_hw_init_module); module_exit(cam_lrme_hw_exit_module); MODULE_DESCRIPTION("CAM LRME HW driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/ais/cam_req_mgr/cam_req_mgr_dev.c index 21a71957ae9f..89a48d58073d 100644 --- a/drivers/media/platform/msm/ais/cam_req_mgr/cam_req_mgr_dev.c +++ b/drivers/media/platform/msm/ais/cam_req_mgr/cam_req_mgr_dev.c @@ -869,8 +869,8 @@ static void __exit cam_req_mgr_exit(void) platform_driver_unregister(&cam_req_mgr_driver); } -early_module_init(cam_req_mgr_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); -early_late_initcall(cam_req_mgr_late_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL4); +module_init(cam_req_mgr_init); +late_initcall(cam_req_mgr_late_init); module_exit(cam_req_mgr_exit); MODULE_DESCRIPTION("Camera Request Manager"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_actuator/cam_actuator_dev.c index 215378b5c560..228ccb8a39b3 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_actuator/cam_actuator_dev.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_actuator/cam_actuator_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -445,7 +445,7 @@ static void __exit cam_actuator_driver_exit(void) i2c_del_driver(&cam_actuator_driver_i2c); } -early_module_init(cam_actuator_driver_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_actuator_driver_init); module_exit(cam_actuator_driver_exit); MODULE_DESCRIPTION("cam_actuator_driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.c index da7b6c54efcd..db1eb99661d2 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_cci/cam_cci_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -524,8 +524,8 @@ static void __exit cam_cci_exit_module(void) platform_driver_unregister(&cci_driver); } -early_module_init(cam_cci_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); -early_late_initcall(cam_cci_late_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL4); +module_init(cam_cci_init_module); +late_initcall(cam_cci_late_init); module_exit(cam_cci_exit_module); MODULE_DESCRIPTION("MSM CCI driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c index 7c93ba15c2f7..972b0a549f30 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -254,7 +254,7 @@ static void __exit cam_csiphy_exit_module(void) platform_driver_unregister(&csiphy_driver); } -early_module_init(cam_csiphy_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_csiphy_init_module); module_exit(cam_csiphy_exit_module); MODULE_DESCRIPTION("CAM CSIPHY driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c index 5370517841d1..cf6854c7a527 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -590,7 +590,7 @@ static void __exit cam_eeprom_driver_exit(void) i2c_del_driver(&cam_eeprom_i2c_driver); } -early_module_init(cam_eeprom_driver_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_eeprom_driver_init); module_exit(cam_eeprom_driver_exit); MODULE_DESCRIPTION("CAM EEPROM driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_flash/cam_flash_dev.c index e3b6fffbd939..f4c9d254df7c 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_flash/cam_flash_dev.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_flash/cam_flash_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -647,7 +647,7 @@ static void __exit cam_flash_exit_module(void) i2c_del_driver(&cam_flash_i2c_driver); } -early_module_init(cam_flash_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_flash_init_module); module_exit(cam_flash_exit_module); MODULE_DESCRIPTION("CAM FLASH"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_ois/cam_ois_dev.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_ois/cam_ois_dev.c index 026980e767e6..db583340dad4 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_ois/cam_ois_dev.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_ois/cam_ois_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -423,7 +423,7 @@ static void __exit cam_ois_driver_exit(void) i2c_del_driver(&cam_ois_i2c_driver); } -early_module_init(cam_ois_driver_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_ois_driver_init); module_exit(cam_ois_driver_exit); MODULE_DESCRIPTION("CAM OIS driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_res_mgr/cam_res_mgr.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_res_mgr/cam_res_mgr.c index aed6417020a7..91b19c8f7ff0 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_res_mgr/cam_res_mgr.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_res_mgr/cam_res_mgr.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -740,7 +740,7 @@ static void __exit cam_res_mgr_exit(void) platform_driver_unregister(&cam_res_mgr_driver); } -early_module_init(cam_res_mgr_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_res_mgr_init); module_exit(cam_res_mgr_exit); MODULE_DESCRIPTION("Camera resource manager driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/ais/cam_sensor_module/cam_sensor/cam_sensor_dev.c index 56596f2ad4a6..d29a58f65a47 100644 --- a/drivers/media/platform/msm/ais/cam_sensor_module/cam_sensor/cam_sensor_dev.c +++ b/drivers/media/platform/msm/ais/cam_sensor_module/cam_sensor/cam_sensor_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -414,7 +414,7 @@ static void __exit cam_sensor_driver_exit(void) i2c_del_driver(&cam_sensor_driver_i2c); } -early_module_init(cam_sensor_driver_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_sensor_driver_init); module_exit(cam_sensor_driver_exit); MODULE_DESCRIPTION("cam_sensor_driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/ais/cam_smmu/cam_smmu_api.c index 343399bacf8d..caecc98669bb 100644 --- a/drivers/media/platform/msm/ais/cam_smmu/cam_smmu_api.c +++ b/drivers/media/platform/msm/ais/cam_smmu/cam_smmu_api.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -3636,7 +3636,7 @@ static void __exit cam_smmu_exit_module(void) platform_driver_unregister(&cam_smmu_driver); } -early_module_init(cam_smmu_init_module, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_smmu_init_module); module_exit(cam_smmu_exit_module); MODULE_DESCRIPTION("MSM Camera SMMU driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/cam_sync/cam_sync.c b/drivers/media/platform/msm/ais/cam_sync/cam_sync.c index 305c2e50066f..b76e7c9e2f5a 100644 --- a/drivers/media/platform/msm/ais/cam_sync/cam_sync.c +++ b/drivers/media/platform/msm/ais/cam_sync/cam_sync.c @@ -1265,7 +1265,7 @@ static void __exit cam_sync_exit(void) kfree(sync_dev); } -early_module_init(cam_sync_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL2); +module_init(cam_sync_init); module_exit(cam_sync_exit); MODULE_DESCRIPTION("Camera sync driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index a86e5a1ea9b0..c647ba648805 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -1063,7 +1063,7 @@ static void __exit videodev_exit(void) unregister_chrdev_region(dev, VIDEO_NUM_DEVICES); } -early_subsys_initcall(videodev_init, EARLY_SUBSYS_3, EARLY_INIT_LEVEL1); +subsys_initcall(videodev_init); module_exit(videodev_exit) MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab "); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 2dc9a066ab85..295721efefbe 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1982,5 +1982,5 @@ static void __exit phy_exit(void) mdio_bus_exit(); } -early_subsys_initcall(phy_init, EARLY_SUBSYS_4, EARLY_INIT_LEVEL1); +subsys_initcall(phy_init); module_exit(phy_exit); diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index 6579f57ab3a9..c17089d79ec3 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -2194,14 +2194,7 @@ static void __exit cnss_exit(void) cnss_debug_deinit(); } -static int __init cnss_set_sync(void) -{ - cnss_platform_driver.driver.probe_type = 0; - return 0; -} -early_init(cnss_set_sync, EARLY_SUBSYS_5, EARLY_INIT_LEVEL4); - -early_module_init(cnss_initialize, EARLY_SUBSYS_5, EARLY_INIT_LEVEL5); +module_init(cnss_initialize); module_exit(cnss_exit); MODULE_LICENSE("GPL v2"); diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 578e51a80670..5a4963060b85 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -441,13 +441,6 @@ int of_platform_bus_probe(struct device_node *root, } EXPORT_SYMBOL(of_platform_bus_probe); -int of_platform_node_probe(struct device_node *np, - struct device *parent) -{ - return of_platform_bus_create(np, of_default_bus_match_table, NULL, - parent, true); -} - /** * of_platform_populate() - Populate platform_devices from device tree data * @root: parent of the first level to probe or NULL for the root of the tree diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 727bd0e3368e..ad55f37d0002 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -6823,23 +6823,7 @@ static void __exit pcie_exit(void) msm_pcie_sysfs_exit(&msm_pcie_dev[i]); } -static DECLARE_COMPLETION(pcie_init_start); - -static int __init pcie_init_sync(void) -{ - complete(&pcie_init_start); - return 0; -} -subsys_initcall_sync(pcie_init_sync); - -static int __init pcie_init_wait(void) -{ - wait_for_completion(&pcie_init_start); - return 0; -} -early_init(pcie_init_wait, EARLY_SUBSYS_5, EARLY_INIT_LEVEL3); - -early_subsys_initcall_sync(pcie_init, EARLY_SUBSYS_5, EARLY_INIT_LEVEL4); +subsys_initcall_sync(pcie_init); module_exit(pcie_exit); diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index ea250e3c0ebf..48a365e303e5 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -1013,7 +1013,7 @@ static int __init phy_core_init(void) return 0; } -early_module_init(phy_core_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +module_init(phy_core_init); static void __exit phy_core_exit(void) { diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c index 4c7e54f7f0d7..2e538ce52d2c 100644 --- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c +++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -11,9 +11,6 @@ * */ -#include -#include - #include "phy-qcom-ufs-qmp-v4.h" #define UFS_PHY_NAME "ufs_phy_qmp_v4" @@ -291,8 +288,7 @@ static struct platform_driver ufs_qcom_phy_qmp_v4_driver = { }, }; -early_module_platform_driver(ufs_qcom_phy_qmp_v4_driver, EARLY_SUBSYS_1, -EARLY_INIT_LEVEL2); +module_platform_driver(ufs_qcom_phy_qmp_v4_driver); MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v4"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c index 43a99bca6ff8..8d28839de8a0 100644 --- a/drivers/pinctrl/pinctrl-sx150x.c +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -1328,4 +1328,4 @@ static int __init sx150x_init(void) panic_block); return i2c_add_driver(&sx150x_driver); } -early_subsys_initcall(sx150x_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL3); +subsys_initcall(sx150x_init); diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c index 4de5eb801c27..6c1d04cfe079 100644 --- a/drivers/platform/msm/qcom-geni-se.c +++ b/drivers/platform/msm/qcom-geni-se.c @@ -2095,8 +2095,7 @@ static int __init geni_se_driver_init(void) { return platform_driver_register(&geni_se_driver); } -early_subsys_initcall(geni_se_driver_init, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL3); +subsys_initcall(geni_se_driver_init); static void __exit geni_se_driver_exit(void) { diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 72c299042a4f..a7e4fba724b7 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -842,5 +842,5 @@ static void __exit exit_scsi(void) async_unregister_domain(&scsi_sd_probe_domain); } -early_subsys_initcall(init_scsi, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +subsys_initcall(init_scsi); module_exit(exit_scsi); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 9471e4ea41a7..489737392c37 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3699,7 +3699,7 @@ static void __exit exit_sd(void) } } -early_module_init(init_sd, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +module_init(init_sd); module_exit(exit_sd); static void sd_print_sense_hdr(struct scsi_disk *sdkp, @@ -3726,9 +3726,3 @@ static void sd_print_result(const struct scsi_disk *sdkp, const char *msg, msg, host_byte(result), driver_byte(result)); } -static int __init early_rootdev_wait(void) -{ - async_synchronize_full_domain(&scsi_sd_probe_domain); - return 0; -} -early_init(early_rootdev_wait, EARLY_SUBSYS_1, EARLY_INIT_LEVEL5); diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index e3985df6b1e9..6006876f40e4 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -2944,14 +2944,6 @@ static struct platform_driver ufs_qcom_pltform = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; -early_module_platform_driver(ufs_qcom_pltform, EARLY_SUBSYS_1, -EARLY_INIT_LEVEL3); - -static int __init ufs_qti_pltform_sync(void) -{ - ufs_qcom_pltform.driver.probe_type = PROBE_DEFAULT_STRATEGY; - return 0; -} -early_init(ufs_qti_pltform_sync, EARLY_SUBSYS_1, EARLY_INIT_LEVEL2); +module_platform_driver(ufs_qcom_pltform); MODULE_LICENSE("GPL v2"); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index c342f6ddad54..e562e9c011b5 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -37,7 +37,6 @@ * license terms, and distributes only under these terms. */ -#include #include #include #include @@ -11307,10 +11306,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_cmd_log_init(hba); - if (is_early_userspace) - ufshcd_async_scan(hba, (async_cookie_t)0); - else - async_schedule(ufshcd_async_scan, hba); + async_schedule(ufshcd_async_scan, hba); ufsdbg_add_debugfs(hba); diff --git a/drivers/soc/qcom/boot_marker.c b/drivers/soc/qcom/boot_marker.c index 45408ecda5d8..a4c7d85ff028 100644 --- a/drivers/soc/qcom/boot_marker.c +++ b/drivers/soc/qcom/boot_marker.c @@ -302,7 +302,7 @@ static int __init init_bootkpi(void) set_bootloader_stats(false); return 0; } -early_subsys_initcall(init_bootkpi, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL0); +subsys_initcall(init_bootkpi); static void __exit exit_bootkpi(void) { diff --git a/drivers/soc/qcom/fsa4480-i2c.c b/drivers/soc/qcom/fsa4480-i2c.c index 912d61ba8770..cddb520ac4cf 100644 --- a/drivers/soc/qcom/fsa4480-i2c.c +++ b/drivers/soc/qcom/fsa4480-i2c.c @@ -470,7 +470,7 @@ static int __init fsa4480_init(void) return rc; } -early_module_init(fsa4480_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL3); +module_init(fsa4480_init); static void __exit fsa4480_exit(void) { diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c index 2184328cdcca..708713f92762 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c @@ -2005,6 +2005,5 @@ int __init msm_bus_device_late_init(void) init_time = false; return commit_late_init_data(false); } -early_subsys_initcall(msm_bus_device_init_driver, EARLY_SUBSYS_PLATFORM, -EARLY_INIT_LEVEL1); +subsys_initcall(msm_bus_device_init_driver); late_initcall_sync(msm_bus_device_late_init); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c b/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c index 399f93897990..cdf61f6c8644 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -89,6 +89,5 @@ static int __init msm_bus_proxy_client_unvote(void) return 0; } -early_subsys_initcall_sync(msm_bus_proxy_client_init_driver, -EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL2); +subsys_initcall_sync(msm_bus_proxy_client_init_driver); late_initcall_sync(msm_bus_proxy_client_unvote); diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index a9d326a89841..dc2f25b572e7 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -1617,7 +1617,7 @@ static int __init socinfo_init_sysfs(void) return 0; } -early_late_initcall(socinfo_init_sysfs, EARLY_SUBSYS_1, EARLY_INIT_LEVEL4); +late_initcall(socinfo_init_sysfs); static void socinfo_print(void) { @@ -1859,4 +1859,4 @@ int __init socinfo_init(void) return 0; } -early_subsys_initcall(socinfo_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL0); +subsys_initcall(socinfo_init); diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c index c87ecf9ba163..e267ceadf41a 100644 --- a/drivers/soc/qcom/subsys-pil-tz.c +++ b/drivers/soc/qcom/subsys-pil-tz.c @@ -1329,7 +1329,7 @@ static int __init pil_tz_init(void) { return platform_driver_register(&pil_tz_driver); } -early_module_init(pil_tz_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +module_init(pil_tz_init); static void __exit pil_tz_exit(void) { diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 16f218a7e4a0..62ab59f7831d 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1696,6 +1696,6 @@ exit_netlink: return ret; } -early_subsys_initcall(thermal_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL7); +subsys_initcall(thermal_init); fs_initcall(thermal_netlink_init); module_exit(thermal_exit); diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index 8a06434527cd..b7827f001b38 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -4737,7 +4737,7 @@ static int __init usbpd_init(void) usbpd_ipc_log = ipc_log_context_create(NUM_LOG_PAGES, "usb_pd", 0); return class_register(&usbpd_class); } -early_module_init(usbpd_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +module_init(usbpd_init); static void __exit usbpd_exit(void) { diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 2c46b92f244f..3168ee4e77f4 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -175,4 +175,5 @@ static int __init anon_inode_init(void) return 0; } -early_fs_initcall(anon_inode_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +fs_initcall(anon_inode_init); + diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1260d64a45ad..8582a930e3e2 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -2342,4 +2342,4 @@ static int __init eventpoll_init(void) return 0; } -early_fs_initcall(eventpoll_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +fs_initcall(eventpoll_init); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 003729c86d78..dbf4b9c38a2e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -6154,5 +6154,5 @@ MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodo MODULE_DESCRIPTION("Fourth Extended Filesystem"); MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: crc32c"); -early_module_init(ext4_init_fs, EARLY_SUBSYS_1, EARLY_INIT_LEVEL4); +module_init(ext4_init_fs) module_exit(ext4_exit_fs) diff --git a/fs/filesystems.c b/fs/filesystems.c index 9829317d39c8..8fb7cda40997 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -221,25 +221,6 @@ int __init get_filesystem_list(char *buf) return len; } -#ifdef CONFIG_EARLY_SERVICES -int get_filesystem_list_runtime(char *buf) -{ - int len = 0; - struct file_system_type *tmp; - - read_lock(&file_systems_lock); - tmp = file_systems; - while (tmp && len < PAGE_SIZE - 80) { - len += scnprintf(buf+len, PAGE_SIZE, "%s\t%s\n", - (tmp->fs_flags & FS_REQUIRES_DEV) ? "" : "nodev", - tmp->name); - tmp = tmp->next; - } - read_unlock(&file_systems_lock); - return len; -} -#endif - #ifdef CONFIG_PROC_FS static int filesystems_proc_show(struct seq_file *m, void *v) { diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 9d9e110a02a3..93a466cf58ba 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -2721,6 +2721,6 @@ static void __exit journal_exit(void) } MODULE_LICENSE("GPL"); -early_module_init(journal_init, EARLY_SUBSYS_1, EARLY_INIT_LEVEL4); +module_init(journal_init); module_exit(journal_exit); diff --git a/fs/pipe.c b/fs/pipe.c index edb86b4d03b9..fa3c2c25cec5 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1240,4 +1240,4 @@ static int __init init_pipe_fs(void) return err; } -early_fs_initcall(init_pipe_fs, EARLY_SUBSYS_1, EARLY_INIT_LEVEL4); +fs_initcall(init_pipe_fs); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 259ea55176e2..e3c3900c74d5 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -764,35 +764,6 @@ INIT_CALLS_LEVEL(7) \ VMLINUX_SYMBOL(__initcall_end) = .; -#define EARLY_INIT_CALLS_LEVEL(subsys, level) \ - VMLINUX_SYMBOL(__early##subsys##_initcall##level##_start) = .; \ - KEEP(*(.early##subsys##.initcall##level##.init)) \ - -#define EARLY_INIT_CALLS_SUBSYS(subsys) \ - VMLINUX_SYMBOL(__early##subsys##_initcall_start) = .; \ - EARLY_INIT_CALLS_LEVEL(subsys, 0) \ - EARLY_INIT_CALLS_LEVEL(subsys, 1) \ - EARLY_INIT_CALLS_LEVEL(subsys, 2) \ - EARLY_INIT_CALLS_LEVEL(subsys, 3) \ - EARLY_INIT_CALLS_LEVEL(subsys, 4) \ - EARLY_INIT_CALLS_LEVEL(subsys, 5) \ - EARLY_INIT_CALLS_LEVEL(subsys, 6) \ - EARLY_INIT_CALLS_LEVEL(subsys, 7) \ - EARLY_INIT_CALLS_LEVEL(subsys, 8) \ - VMLINUX_SYMBOL(__early##subsys##_initcall_end) = .; \ - -#define EARLY_INIT_CALLS \ - VMLINUX_SYMBOL(__early_initcall_start) = .; \ - EARLY_INIT_CALLS_SUBSYS(0) \ - EARLY_INIT_CALLS_SUBSYS(1) \ - EARLY_INIT_CALLS_SUBSYS(2) \ - EARLY_INIT_CALLS_SUBSYS(3) \ - EARLY_INIT_CALLS_SUBSYS(4) \ - EARLY_INIT_CALLS_SUBSYS(5) \ - EARLY_INIT_CALLS_SUBSYS(6) \ - EARLY_INIT_CALLS_SUBSYS(7) \ - VMLINUX_SYMBOL(__early_initcall_end) = .; - #define CON_INITCALL \ VMLINUX_SYMBOL(__con_initcall_start) = .; \ KEEP(*(.con_initcall.init)) \ diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h index 295864168063..84d3c81b5978 100644 --- a/include/linux/cpufeature.h +++ b/include/linux/cpufeature.h @@ -57,18 +57,5 @@ static int __init cpu_feature_match_ ## x ## _init(void) \ } \ module_init(cpu_feature_match_ ## x ## _init) -#define early_module_cpu_feature_match(x, __initfunc, subsys, level) \ -static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \ - { { .feature = cpu_feature(x) }, { } }; \ -MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ - \ -static int __init cpu_feature_match_ ## x ## _init(void) \ -{ \ - if (!cpu_have_feature(cpu_feature(x))) \ - return -ENODEV; \ - return __initfunc(); \ -} \ -early_module_init(cpu_feature_match_ ## x ## _init, subsys, level) - #endif #endif diff --git a/include/linux/device.h b/include/linux/device.h index 300d0e10a749..b52773b0598d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1523,31 +1523,6 @@ static void __exit __driver##_exit(void) \ } \ module_exit(__driver##_exit); -#define early_module_driver(__driver, subsys, level, __register, __unregister) \ -static int __init __driver##_init(void) \ -{ \ - return __register(&(__driver)); \ -} \ -early_module_init(__driver##_init, subsys, level); \ -static void __exit __driver##_exit(void) \ -{ \ - __unregister(&(__driver)); \ -} \ -module_exit(__driver##_exit) - -#define early_module_driver_async(__driver, subsys, level, __register, \ - __unregister) \ -static int __init __driver##_init(void) \ -{ \ - return __register(&(__driver)); \ -} \ -early_module_init_async(__driver##_init, subsys, level); \ -static void __exit __driver##_exit(void) \ -{ \ - __unregister(&(__driver)); \ -} \ -module_exit(__driver##_exit) - /** * builtin_driver() - Helper macro for drivers that don't do anything * special in init and have no exit. This eliminates some boilerplate. diff --git a/include/linux/early_async.h b/include/linux/early_async.h deleted file mode 100644 index 1187bc9d7d74..000000000000 --- a/include/linux/early_async.h +++ /dev/null @@ -1,63 +0,0 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _LINUX_INIT_ASYNC_H -#define _LINUX_INIT_ASYNC_H - -#include -#include -#include - - -#define _early_initcall_async(fn, subsys, level) \ - static DECLARE_COMPLETION(_##fn##done); \ - static void __init _##fn##_work(struct work_struct *w) \ - { \ - fn(); \ - complete(&_##fn##done); \ - kfree(w); \ - } \ - static int __init _##fn##_sync(void) \ - { \ - wait_for_completion(&_##fn##done); \ - return 0; \ - } \ - static int __init _##fn##_async(void) \ - { \ - struct work_struct *work; \ - work = kzalloc(sizeof(struct work_struct), GFP_KERNEL); \ - if (work) { \ - INIT_WORK(work, _##fn##_work); \ - queue_work_on(WORK_CPU_UNBOUND, system_highpri_wq, \ - work); \ - } \ - return 0; \ - } \ - __define_early_initcall(_##fn##_async, subsys, level) - -#define early_initcall_type_async(type, fn, subsys, level) \ - static int __init _##fn(void) \ - { \ - if (is_early_userspace) \ - return 0; \ - return fn(); \ - } \ - type(_##fn); \ - _early_initcall_async(fn, subsys, level) - -#define early_device_initcall_async(fn, subsys, level) \ - early_initcall_type_async(device_initcall, fn, subsys, level) - -#define __early_initcall_async(fn, subsys, level) \ - early_device_initcall_async(fn, subsys, level) - -#endif /* _LINUX_INIT_ASYNC_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 9e279ffc895f..7da051e2dfcb 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3349,7 +3349,6 @@ int proc_nr_dentry(struct ctl_table *table, int write, int proc_nr_inodes(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); int __init get_filesystem_list(char *buf); -int get_filesystem_list_runtime(char *buf); #define __FMODE_EXEC ((__force int) FMODE_EXEC) #define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY) diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 4fb2e9fc0c17..d501d3956f13 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -780,10 +780,6 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); module_driver(__i2c_driver, i2c_add_driver, \ i2c_del_driver) -#define early_module_i2c_driver(__i2c_driver, subsys, level) \ - early_module_driver(__i2c_driver, subsys, level, i2c_add_driver, \ - i2c_del_driver) - /** * builtin_i2c_driver() - Helper macro for registering a builtin I2C driver * @__i2c_driver: i2c_driver struct diff --git a/include/linux/init.h b/include/linux/init.h index 8682615c5b99..66eb5b9c138e 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -158,16 +158,8 @@ extern bool initcall_debug; #define ___initcall_name2(c, fn, id) __initcall_##c##_##fn##id #define ___initcall_name1(c, fn, id) ___initcall_name2(c, fn, id) #define __initcall_name(fn, id) ___initcall_name1(__COUNTER__, fn, id) - #define ___early_initcall_name2(c, fn, subsys, level) \ - __early##subsys_initcall_##c##_##fn##level - #define ___early_initcall_name1(c, fn, subsys, level) \ - ___early_initcall_name2(c, fn, subsys, level) - #define __early_initcall_name(fn, subsys, level) \ - ___early_initcall_name1(__COUNTER__, fn, subsys, level) #else #define __initcall_name(fn, id) __initcall_##fn##id - #define __early_initcall_name(fn, subsys, level) \ - __early##subsys_initcall_##fn##level #endif /* @@ -213,11 +205,6 @@ extern bool initcall_debug; #define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id) -#define __define_early_initcall(fn, subsys, level) \ - static initcall_t __early_initcall_name(fn, subsys, level) __used \ - __attribute__((__section__(".early" #subsys ".initcall" #level ".init"\ - ))) = fn - /* * Early initcalls run before initializing SMP. * @@ -250,66 +237,6 @@ extern bool initcall_debug; #define late_initcall(fn) __define_initcall(fn, 7) #define late_initcall_sync(fn) __define_initcall(fn, 7s) -extern void early_subsys_finish(void); -extern initcall_t __early0_initcall_start[]; -extern initcall_t __early1_initcall_start[]; -extern initcall_t __early2_initcall_start[]; -extern initcall_t __early3_initcall_start[]; -extern initcall_t __early4_initcall_start[]; -extern initcall_t __early5_initcall_start[]; -extern initcall_t __early6_initcall_start[]; -extern initcall_t __early7_initcall_start[]; -extern initcall_t __early_initcall_end[]; -extern bool is_early_userspace; - -#define early_initcall_type(type, fn, subsys, level) \ - static int __init _##fn(void) \ - { \ - if (is_early_userspace) \ - return 0; \ - return fn(); \ - } \ - type(_##fn); \ - __define_early_initcall(fn, subsys, level) - -#define early_subsys_initcall(fn, subsys, level) \ - early_initcall_type(subsys_initcall, fn, subsys, level) -#define early_device_initcall(fn, subsys, level) \ - early_initcall_type(device_initcall, fn, subsys, level) -#define early_rootfs_initcall(fn, subsys, level) \ - early_initcall_type(rootfs_initcall, fn, subsys, level) -#define early_fs_initcall(fn, subsys, level) \ - early_initcall_type(fs_initcall, fn, subsys, level) -#define early_late_initcall(fn, subsys, level) \ - early_initcall_type(late_initcall, fn, subsys, level) -#define early_subsys_initcall_sync(fn, subsys, level) \ - early_initcall_type(subsys_initcall_sync, fn, subsys, level) - -#define early_init(fn, subsys, level) \ - __define_early_initcall(fn, subsys, level) -#define __early_initcall(fn, subsys, level) \ - early_device_initcall(fn, subsys, level) - -#define EARLY_SUBSYS_PLATFORM 0 -#define EARLY_SUBSYS_1 1 -#define EARLY_SUBSYS_2 2 -#define EARLY_SUBSYS_3 3 -#define EARLY_SUBSYS_4 4 -#define EARLY_SUBSYS_5 5 -#define EARLY_SUBSYS_6 6 -#define EARLY_SUBSYS_7 7 -#define EARLY_SUBSYS_NUM 8 - -#define EARLY_INIT_LEVEL0 0 -#define EARLY_INIT_LEVEL1 1 -#define EARLY_INIT_LEVEL2 2 -#define EARLY_INIT_LEVEL3 3 -#define EARLY_INIT_LEVEL4 4 -#define EARLY_INIT_LEVEL5 5 -#define EARLY_INIT_LEVEL6 6 -#define EARLY_INIT_LEVEL7 7 -#define EARLY_INIT_LEVEL8 8 - #define __initcall(fn) device_initcall(fn) #define __exitcall(fn) \ diff --git a/include/linux/module.h b/include/linux/module.h index df3ba710f511..f3fbbdcd981f 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -85,12 +85,6 @@ extern void cleanup_module(void); */ #define module_init(x) __initcall(x); -#define early_module_init(x, subsys, level) \ - __early_initcall(x, subsys, level) - -#define early_module_init_async(x, subsys, level) \ - __early_initcall_async(x, subsys, level) - /** * module_exit() - driver exit entry point * @x: function to be run when driver is removed @@ -131,16 +125,6 @@ extern void cleanup_module(void); #define console_initcall(fn) module_init(fn) #define security_initcall(fn) module_init(fn) -#define early_subsys_initcall(fn, subsys, level) module_init(fn) -#define early_device_initcall(fn, subsys, level) module_init(fn) -#define early_rootfs_initcall(fn, subsys, level) module_init(fn) -#define early_fs_initcall(fn, subsys, level) module_init(fn) -#define early_late_initcall(fn, subsys, level) module_init(fn) -#define early_subsys_initcall_sync(fn, subsys, level) module_init(fn) -#define early_init(fn, subsys, level) -#define early_module_init(fn, subsys, level) module_init(fn) -#define early_module_init_async(fn, subsys, level) module_init(fn) - /* Each module must use one module_init(). */ #define module_init(initfn) \ static inline initcall_t __maybe_unused __inittest(void) \ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 9998220ec456..fb908e598348 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -75,8 +75,6 @@ extern int of_platform_device_destroy(struct device *dev, void *data); extern int of_platform_bus_probe(struct device_node *root, const struct of_device_id *matches, struct device *parent); -extern int of_platform_node_probe(struct device_node *np, - struct device *parent); #ifdef CONFIG_OF_ADDRESS extern int of_platform_populate(struct device_node *root, const struct of_device_id *matches, diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index b5f6dad99cb0..49f634d96118 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -228,14 +228,6 @@ static inline void platform_set_drvdata(struct platform_device *pdev, module_driver(__platform_driver, platform_driver_register, \ platform_driver_unregister) -#define early_module_platform_driver(__platform_driver, subsys, level) \ - early_module_driver(__platform_driver, subsys, level, \ - platform_driver_register, platform_driver_unregister) - -#define early_module_platform_driver_async(__platform_driver, subsys, level) \ - early_module_driver_async(__platform_driver, subsys, level, \ - platform_driver_register, platform_driver_unregister) - /* builtin_platform_driver() - Helper macro for builtin drivers that * don't do anything special in driver init. This eliminates some * boilerplate. Each driver may only use this macro once, and diff --git a/init/Makefile b/init/Makefile index 230853115245..62d8be1cee07 100644 --- a/init/Makefile +++ b/init/Makefile @@ -9,7 +9,6 @@ obj-y := main.o version.o mounts.o obj-y += noinitramfs.o obj-$(CONFIG_BLK_DEV_INITRD) += initramfs.o obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o -obj-y += early_userspace.o ifneq ($(CONFIG_ARCH_INIT_TASK),y) obj-y += init_task.o diff --git a/init/do_mounts.c b/init/do_mounts.c index 80716bd5eb05..f8a861183f29 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -42,7 +42,6 @@ int root_mountflags = MS_RDONLY | MS_SILENT; static char * __initdata root_device_name; static char __initdata saved_root_name[64] = "PARTLABEL=system"; static int root_wait; - dev_t ROOT_DEV; static int __init load_ramdisk(char *str) @@ -446,26 +445,6 @@ static void __init get_fs_names(char *page) *s = '\0'; } -#ifdef CONFIG_EARLY_SERVICES -static void get_fs_names_runtime(char *page) -{ - char *s = page; - int len = get_filesystem_list_runtime(page); - char *p, *next; - - page[len] = '\0'; - - for (p = page-1; p; p = next) { - next = strnchr(++p, len, '\n'); - if (*p++ != '\t') - continue; - while ((*s++ = *p++) != '\n') - ; - s[-1] = '\0'; - } - *s = '\0'; -} -#endif static int __init do_mount_root(char *name, char *fs, int flags, void *data) { struct super_block *s; @@ -663,74 +642,61 @@ void __init mount_root(void) void __init prepare_namespace(void) { int is_floppy; - static int first_time = 1; - if ((!is_early_userspace) || (is_early_userspace && (!first_time))) { - if (root_delay) { - pr_info( - "Waiting %d sec before mounting root device...\n", - root_delay); - ssleep(root_delay); - } - - /* - * wait for the known devices to complete their probing - * - * Note: this is a potential source of long boot delays. - * For example, it is not atypical to wait 5 seconds here - * for the touchpad of a laptop to initialize. - */ - if (!is_early_userspace) - wait_for_device_probe(); + if (root_delay) { + printk(KERN_INFO "Waiting %d sec before mounting root device...\n", + root_delay); + ssleep(root_delay); } - if ((!is_early_userspace) || (is_early_userspace && first_time)) { - md_run_setup(); - dm_run_setup(); - dm_verity_setup(); + /* + * wait for the known devices to complete their probing + * + * Note: this is a potential source of long boot delays. + * For example, it is not atypical to wait 5 seconds here + * for the touchpad of a laptop to initialize. + */ + wait_for_device_probe(); - if (saved_root_name[0]) { - root_device_name = saved_root_name; - if (!memcmp(root_device_name, "mtd", 3) || - !memcmp(root_device_name, "ubi", 3)) { - mount_block_root(root_device_name, - root_mountflags); - goto out; - } - ROOT_DEV = name_to_dev_t(root_device_name); - if (memcmp(root_device_name, "/dev/", 5) == 0) - root_device_name += 5; - } + md_run_setup(); + dm_run_setup(); + dm_verity_setup(); if (initrd_load()) goto out; - /* wait for any asynchronous scanning to complete */ - if ((ROOT_DEV == 0) && root_wait) { - pr_info("Waiting for root device %s...\n", - saved_root_name); - while (driver_probe_done() != 0 || - (ROOT_DEV = name_to_dev_t(saved_root_name)) - == 0) - msleep(20); - async_synchronize_full(); + if (saved_root_name[0]) { + root_device_name = saved_root_name; + if (!strncmp(root_device_name, "mtd", 3) || + !strncmp(root_device_name, "ubi", 3)) { + mount_block_root(root_device_name, root_mountflags); + goto out; } - - is_floppy = MAJOR(ROOT_DEV) == FLOPPY_MAJOR; - - if (is_floppy && rd_doload && rd_load_disk(0)) - ROOT_DEV = Root_RAM0; - - mount_root(); + ROOT_DEV = name_to_dev_t(root_device_name); + if (strncmp(root_device_name, "/dev/", 5) == 0) + root_device_name += 5; } + + if (initrd_load()) + goto out; + + /* wait for any asynchronous scanning to complete */ + if ((ROOT_DEV == 0) && root_wait) { + printk(KERN_INFO "Waiting for root device %s...\n", + saved_root_name); + while (driver_probe_done() != 0 || + (ROOT_DEV = name_to_dev_t(saved_root_name)) == 0) + msleep(5); + async_synchronize_full(); + } + + is_floppy = MAJOR(ROOT_DEV) == FLOPPY_MAJOR; + + mount_root(); out: - if ((!is_early_userspace) || (is_early_userspace && first_time)) { - devtmpfs_mount("dev"); - sys_mount((char __user *)".", (char __user *)"/", - NULL, MS_MOVE, NULL); - sys_chroot((char __user *)"."); - } - first_time = 0; + devtmpfs_mount("dev"); + sys_mount(".", "/", NULL, MS_MOVE, NULL); + sys_chroot("."); } static bool is_tmpfs; @@ -775,26 +741,3 @@ int __init init_rootfs(void) return err; } - -static int __init early_prepare_namespace(void) -{ - prepare_namespace(); - return 0; -} -early_init(early_prepare_namespace, EARLY_SUBSYS_1, EARLY_INIT_LEVEL6); - -static char init_prog[128] = "/usr/sbin/early_init"; -static char *init_prog_argv[2] = { init_prog, NULL }; - -static int __init early_userspace_start(void) -{ - int rc; - - rc = call_usermodehelper(init_prog, init_prog_argv, NULL, 0); - if (!rc) - pr_info("early_init launched\n"); - else - pr_err("early_init failed\n"); - return rc; -} -early_init(early_userspace_start, EARLY_SUBSYS_1, EARLY_INIT_LEVEL8); diff --git a/init/early_userspace.c b/init/early_userspace.c deleted file mode 100644 index e47b954d3d74..000000000000 --- a/init/early_userspace.c +++ /dev/null @@ -1,196 +0,0 @@ -/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct subsystem_work { - struct work_struct work; - int id; -}; - -static DECLARE_COMPLETION(populate_done); -static DECLARE_COMPLETION(subsys_done); -static DECLARE_COMPLETION(plat_subsys_done); -static atomic_t subsys_finish = ATOMIC_INIT(EARLY_SUBSYS_NUM); -bool is_early_userspace; -EXPORT_SYMBOL(is_early_userspace); - -static int __init early_userspace(char *p) -{ - is_early_userspace = true; - return 0; -} -early_param("early_userspace", early_userspace); - -static initcall_t *early_initcall_levels[] = { - __early0_initcall_start, - __early1_initcall_start, - __early2_initcall_start, - __early3_initcall_start, - __early4_initcall_start, - __early5_initcall_start, - __early6_initcall_start, - __early7_initcall_start, - __early_initcall_end, -}; - -static void do_early_subsys_init(int id) -{ - int ret; - initcall_t *fn; - - for (fn = early_initcall_levels[id]; fn < early_initcall_levels[id+1]; - fn++) { - ret = (*fn)(); - if (ret) { - print_ip_sym(*((unsigned long *)fn)); - pr_err("fails with %d\n", ret); - } - } -} - -void __ref early_subsys_finish(void) -{ - if (atomic_dec_and_test(&subsys_finish)) { - ftrace_free_init_mem(); - free_initmem(); - } -} - -static void early_subsys_init(struct work_struct *w) -{ - struct subsystem_work *subsys_work = - container_of(w, struct subsystem_work, work); - - do_early_subsys_init(subsys_work->id); - early_subsys_finish(); - kfree(subsys_work); -} - -static void early_system_init(struct work_struct *w) -{ - struct subsystem_work *subsys_work; - int id; - - do_early_subsys_init(EARLY_SUBSYS_PLATFORM); - complete(&plat_subsys_done); - pr_info("early_common_platform initialized\n"); - wait_for_completion(&populate_done); - pr_info("early_subsystems starting\n"); - - for (id = EARLY_SUBSYS_1; id < EARLY_SUBSYS_NUM; id++) { - subsys_work = kzalloc(sizeof(struct subsystem_work), - GFP_KERNEL); - if (subsys_work) { - subsys_work->id = id; - INIT_WORK(&subsys_work->work, early_subsys_init); - queue_work_on(WORK_CPU_UNBOUND, system_unbound_wq, - &subsys_work->work); - } else { - pr_err("no mem to start early_subsys_init\n"); - } - } - kfree(w); -} - -static const struct of_device_id early_devices_match_table[] = { - { .compatible = "qcom,early-devices" }, - { } -}; -MODULE_DEVICE_TABLE(of, early_devices_match_table); - -static int early_devices_probe(struct platform_device *pdev) -{ - struct device_node *early_node; - int i, len = 0; - struct work_struct *work; - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - - of_find_property(np, "devices", &len); - for (i = 0; i < len / sizeof(u32); i++) { - early_node = of_parse_phandle(np, "devices", i); - of_platform_node_probe(early_node, pdev->dev.parent); - of_node_put(early_node); - } - work = kzalloc(sizeof(struct work_struct), GFP_KERNEL); - if (work) { - INIT_WORK(work, early_system_init); - queue_work_on(WORK_CPU_UNBOUND, system_unbound_wq, work); - } else { - pr_err("no mem to start early_system_init\n"); - } - - return 0; -} - -static struct platform_driver early_devices_driver = { - .probe = early_devices_probe, - .driver = { - .name = "early-devices", - .of_match_table = early_devices_match_table, - }, -}; - -static int __init early_devices_init(void) -{ - if (is_early_userspace) - return platform_driver_register(&early_devices_driver); - else - return 0; -} -arch_initcall(early_devices_init); - -static void __exit early_devices_exit(void) -{ - if (is_early_userspace) - platform_driver_unregister(&early_devices_driver); -} -module_exit(early_devices_exit); - -static int __init early_populate_sync(void) -{ - complete(&populate_done); - pr_info("early populate_sync start\n"); - if (is_early_userspace) - wait_for_completion(&plat_subsys_done); - pr_info("early populate_sync end\n"); - return 0; -} -subsys_initcall(early_populate_sync); - -static int __init early_subsys_sync(void) -{ - complete(&subsys_done); - return 0; -} -subsys_initcall_sync(early_subsys_sync); - -static int __init early_subsys_wait(void) -{ - pr_info("early subsys wait start\n"); - wait_for_completion(&subsys_done); - pr_info("early subsys wait end\n"); - return 0; -} -early_init(early_subsys_wait, EARLY_SUBSYS_1, EARLY_INIT_LEVEL7); diff --git a/init/initramfs.c b/init/initramfs.c index 9e5be7c97c56..5ea7f1b5ec44 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -682,4 +682,4 @@ static int __init populate_rootfs(void) return 0; } -early_rootfs_initcall(populate_rootfs, EARLY_SUBSYS_1, EARLY_INIT_LEVEL4); +rootfs_initcall(populate_rootfs); diff --git a/init/main.c b/init/main.c index 84cae89ba580..9c9b5d206205 100644 --- a/init/main.c +++ b/init/main.c @@ -96,8 +96,6 @@ #include #include -#include "do_mounts.h" - static int kernel_init(void *); extern void init_IRQ(void); @@ -1007,8 +1005,7 @@ static void mark_readonly(void) * flushed so that we don't hit false positives looking for * insecure pages which are W+X. */ - if (!is_early_userspace) - rcu_barrier_sched(); + rcu_barrier_sched(); mark_rodata_ro(); rodata_test(); } else @@ -1024,18 +1021,12 @@ static inline void mark_readonly(void) static int __ref kernel_init(void *unused) { int ret; -#ifdef CONFIG_EARLY_SERVICES - int status = 0; -#endif + kernel_init_freeable(); /* need to finish all async __init code before freeing the memory */ async_synchronize_full(); - if (!is_early_userspace) { - ftrace_free_init_mem(); - free_initmem(); - } else { - early_subsys_finish(); - } + ftrace_free_init_mem(); + free_initmem(); mark_readonly(); system_state = SYSTEM_RUNNING; numa_default_policy(); @@ -1043,15 +1034,6 @@ static int __ref kernel_init(void *unused) rcu_end_inkernel_boot(); place_marker("M - DRIVER Kernel Boot Done"); -#ifdef CONFIG_EARLY_SERVICES - status = get_early_services_status(); - if (status) { - struct kstat stat; - /* Wait for early services SE policy load completion signal */ - while (vfs_stat("/dev/sedone", &stat) != 0) - ; - } -#endif if (ramdisk_execute_command) { ret = run_init_process(ramdisk_execute_command); if (!ret) diff --git a/lib/sg_pool.c b/lib/sg_pool.c index 6c5a1389478c..6dd30615a201 100644 --- a/lib/sg_pool.c +++ b/lib/sg_pool.c @@ -168,5 +168,5 @@ static __exit void sg_pool_exit(void) } } -early_module_init(sg_pool_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +module_init(sg_pool_init); module_exit(sg_pool_exit); diff --git a/net/core/dev.c b/net/core/dev.c index c5ff6bb42fca..7c5a4afd10d6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8912,4 +8912,4 @@ out: return rc; } -early_subsys_initcall(net_dev_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +subsys_initcall(net_dev_init); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 83e0c5f4a8ec..091e93798eac 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2973,7 +2973,7 @@ static void __exit af_unix_exit(void) request_module() don't end up in a loop when modprobe tries to use a UNIX socket. But later than subsys_initcall() because we depend on stuff initialised there */ -early_fs_initcall(af_unix_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +fs_initcall(af_unix_init); module_exit(af_unix_exit); MODULE_LICENSE("GPL"); diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c index 2419404d5c13..e7f5b5a578f1 100644 --- a/security/pfe/pfk.c +++ b/security/pfe/pfk.c @@ -564,7 +564,7 @@ void pfk_clear_on_reset(void) pfk_kc_clear_on_reset(); } -early_module_init(pfk_init, EARLY_SUBSYS_PLATFORM, EARLY_INIT_LEVEL8); +module_init(pfk_init); module_exit(pfk_exit); MODULE_LICENSE("GPL v2"); From 69e3d069c30a893c94d9b00301e944801514b002 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 10:47:58 +0530 Subject: [PATCH 137/141] Revert "Workaround: These are work around which need to de addressed" This reverts commit f31f78f873cb8e608fc8e8ba8e0f0638f73f130d. Signed-off-by: UtsavBalar1231 --- drivers/gpu/msm/kgsl_pwrctrl.c | 3 +-- drivers/media/platform/msm/sde/rotator/sde_rotator_base.c | 4 ++-- security/selinux/include/classmap.h | 6 +++--- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 40ad5fe70750..7eff18dfa1f7 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -2510,8 +2510,7 @@ int kgsl_pwrctrl_init(struct kgsl_device *device) KGSL_PWR_ERR(device, "Failed to register client with CX Ipeak %d\n", result); - //goto error_cleanup_pwr_limit; - result = 0; + goto error_cleanup_pwr_limit; } } return result; diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c index 399c293f0ffc..af6338d6f9e0 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c @@ -147,7 +147,7 @@ void vbif_lock(struct platform_device *parent_pdev) if (!parent_pdev) return; -// mdp_vbif_lock(parent_pdev, true); + mdp_vbif_lock(parent_pdev, true); } void vbif_unlock(struct platform_device *parent_pdev) @@ -155,7 +155,7 @@ void vbif_unlock(struct platform_device *parent_pdev) if (!parent_pdev) return; -// mdp_vbif_lock(parent_pdev, false); + mdp_vbif_lock(parent_pdev, false); } void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params) diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index f11020511d0e..34631690b5f9 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -28,9 +28,9 @@ #define COMMON_CAP2_PERMS "mac_override", "mac_admin", "syslog", \ "wake_alarm", "block_suspend", "audit_read" -//#if CAP_LAST_CAP > CAP_AUDIT_READ -//#error New capability defined, please update COMMON_CAP2_PERMS. -//#endif +#if CAP_LAST_CAP > CAP_AUDIT_READ +#error New capability defined, please update COMMON_CAP2_PERMS. +#endif /* * Note: The name for any socket class should be suffixed by "socket", From a07c45129fdd5bfa1fb58d26116c506161ca51f2 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 23:15:51 +0530 Subject: [PATCH 138/141] ARM64: configs: raphael: Regenerate Signed-off-by: UtsavBalar1231 --- arch/arm64/configs/raphael_defconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/configs/raphael_defconfig b/arch/arm64/configs/raphael_defconfig index c77ca7415813..7754e947baf5 100644 --- a/arch/arm64/configs/raphael_defconfig +++ b/arch/arm64/configs/raphael_defconfig @@ -2167,6 +2167,7 @@ CONFIG_SPI_QCOM_GENI=y CONFIG_SPI_SPIDEV=y # CONFIG_SPI_TLE62X0 is not set # CONFIG_SPI_SLAVE is not set +# CONFIG_SPI_DYNAMIC is not set CONFIG_SPMI=y CONFIG_SPMI_MSM_PMIC_ARB=y # CONFIG_SPMI_MSM_PMIC_ARB_DEBUG is not set @@ -4240,6 +4241,7 @@ CONFIG_QCOM_AOP_DDRSS_COMMANDS=y # CONFIG_QCOM_ADSP_MANUAL_VOTE is not set # CONFIG_MSM_BAM_DMUX is not set # CONFIG_MSM_BGCOM is not set +# CONFIG_MSM_BGRSB is not set # CONFIG_MSM_PIL_SSR_BG is not set CONFIG_QCOM_SOC_INFO=y # CONFIG_RENAME_BLOCK_DEVICE is not set From ce35a381647abbbd75cad943ba5894582f9bf7d7 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Fri, 28 Aug 2020 23:17:48 +0530 Subject: [PATCH 139/141] ARM64: configs: raphael: Enable char diagnostics driver Signed-off-by: UtsavBalar1231 --- arch/arm64/configs/raphael_defconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/configs/raphael_defconfig b/arch/arm64/configs/raphael_defconfig index 7754e947baf5..c468d7d508cd 100644 --- a/arch/arm64/configs/raphael_defconfig +++ b/arch/arm64/configs/raphael_defconfig @@ -2069,6 +2069,8 @@ CONFIG_HW_RANDOM_MSM_LEGACY=y # # Diag Support # +CONFIG_DIAG_CHAR=y +CONFIG_DIAG_OVER_USB=y CONFIG_MSM_FASTCVPD=y CONFIG_MSM_ADSPRPC=y # CONFIG_MSM_RDBG is not set @@ -3520,6 +3522,7 @@ CONFIG_USB_F_MTP=y CONFIG_USB_F_PTP=y CONFIG_USB_F_AUDIO_SRC=y CONFIG_USB_F_ACC=y +CONFIG_USB_F_DIAG=y CONFIG_USB_F_CDEV=y CONFIG_USB_F_CCID=y CONFIG_USB_F_GSI=y @@ -3551,7 +3554,7 @@ CONFIG_USB_CONFIGFS_F_MIDI=y # CONFIG_USB_CONFIGFS_F_HID is not set # CONFIG_USB_CONFIGFS_F_UVC is not set # CONFIG_USB_CONFIGFS_F_PRINTER is not set -# CONFIG_USB_CONFIGFS_F_DIAG is not set +CONFIG_USB_CONFIGFS_F_DIAG=y CONFIG_USB_CONFIGFS_F_CDEV=y CONFIG_USB_CONFIGFS_F_CCID=y CONFIG_USB_CONFIGFS_F_GSI=y From 64bd14875f6e13758b5a030afc81da1111a719b2 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 13:12:25 +0530 Subject: [PATCH 140/141] drone: integrate drone ci pipeline Signed-off-by: UtsavBalar1231 --- .drone.yml | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .drone.yml diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 000000000000..1a294476ce61 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,29 @@ +--- +kind: pipeline +type: docker +name: kernel_xiaomi_raphael + +platform: + os: linux + arch: amd64 + +clone: + depth: 1 + +steps: +- name: immensity_kernel + image: ubuntu:latest + environment: + CI_CHANNEL_ID: + from_secret: ci_channel_id + BOT_API_KEY: + from_secret: bot_api_key + RELEASE_VERSION: "test" + commands: + - apt-get update && apt-get install -y bison build-essential bc bison curl libssl-dev git zip python flex + - git clone --depth=1 https://github.com/UtsavBalar1231/scripts.git -b master script && cd script + - ./dronesetup.sh --proton + - ./kernel.sh --proton --beta + when: + branch: + - auto-kernel-ci From 62ff5031f791de5be20c5e1c398afdde466668c8 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Sat, 29 Aug 2020 14:21:49 +0530 Subject: [PATCH 141/141] README: update Signed-off-by: UtsavBalar1231 --- .drone.yml | 2 +- README.md | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.drone.yml b/.drone.yml index 1a294476ce61..58917f3e1780 100644 --- a/.drone.yml +++ b/.drone.yml @@ -23,7 +23,7 @@ steps: - apt-get update && apt-get install -y bison build-essential bc bison curl libssl-dev git zip python flex - git clone --depth=1 https://github.com/UtsavBalar1231/scripts.git -b master script && cd script - ./dronesetup.sh --proton - - ./kernel.sh --proton --beta + - ./kernel.sh --proton when: branch: - auto-kernel-ci diff --git a/README.md b/README.md index d8ba802aed14..416009d9e10a 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,8 @@ # IMMENSiTY KERNAL for Redmi K20pro / Mi9Tpro +[![Build Status](https://cloud.drone.io/api/badges/UtsavBalar1231/kernel_xiaomi_raphael/status.svg?ref=refs/heads/auto-kernel-ci)](https://cloud.drone.io/UtsavBalar1231/kernel_xiaomi_raphael) ![logo](https://github.com/UtsavBalar1231/xda-stuff/raw/master/immensity-new.png "logo here") -> Merged AOSP android-4.14-stable [4.14.192] -> Latest CAF tag: **LE.UM.3.2.3-45100-SA2150p** +> Merged AOSP android-4.14-stable [4.14.195] +> Latest CAF tag: **LE.UM.3.2.3-00110-SA2150p**