[ALPS05007074] crypto: Revert MediaTek HIE-related patch

Revert MediaTek HIE-related patch so that Google inline encryption patch
can be merged without conflict.

MTK-Commit-Id: 2e3047774c01226befaf18674bc4ac7e0d62e3f6

Change-Id: I16e7189cc3f3b9251d391c4531eb4e6024276599
Signed-off-by: Light Hsieh <light.hsieh@mediatek.com>
CR-Id: ALPS05007074
Feature: [Android Default] EXT4 File System
This commit is contained in:
Light Hsieh
2020-03-03 04:12:35 +08:00
parent 3ab573ba67
commit 95834338c7
39 changed files with 51 additions and 2424 deletions

View File

@@ -434,7 +434,5 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_KGDB=y
CONFIG_KGDB_KDB=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_HIE=y
CONFIG_HIE_DEBUG=y
CONFIG_CRYPTO_TWOFISH=y
# CONFIG_CRYPTO_HW is not set

View File

@@ -408,6 +408,5 @@ CONFIG_FAULT_INJECTION=y
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_MTK_SCHED_TRACERS=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_HIE=y
CONFIG_CRYPTO_TWOFISH=y
# CONFIG_CRYPTO_HW is not set

View File

@@ -435,7 +435,5 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_KGDB=y
CONFIG_KGDB_KDB=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_HIE=y
CONFIG_HIE_DEBUG=y
CONFIG_CRYPTO_TWOFISH=y
# CONFIG_CRYPTO_HW is not set

View File

@@ -409,6 +409,5 @@ CONFIG_FAULT_INJECTION=y
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_MTK_SCHED_TRACERS=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_HIE=y
CONFIG_CRYPTO_TWOFISH=y
# CONFIG_CRYPTO_HW is not set

View File

@@ -253,12 +253,6 @@ static void bio_free(struct bio *bio)
bio_uninit(bio);
if (bio->bi_crypt_ctx.bc_info_act) {
bio->bi_crypt_ctx.bc_info_act(
bio->bi_crypt_ctx.bc_info,
BIO_BC_INFO_PUT);
}
if (bs) {
bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
@@ -583,23 +577,14 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
}
EXPORT_SYMBOL(bio_phys_segments);
#if defined(CONFIG_MTK_HW_FDE)
static inline void bio_clone_crypt_info(struct bio *dst, const struct bio *src)
{
/* for HIE */
dst->bi_crypt_ctx = src->bi_crypt_ctx;
if (src->bi_crypt_ctx.bc_info) {
src->bi_crypt_ctx.bc_info_act(
src->bi_crypt_ctx.bc_info,
BIO_BC_INFO_GET);
}
#if defined(CONFIG_MTK_HW_FDE)
/* for FDE */
dst->bi_hw_fde = src->bi_hw_fde;
dst->bi_key_idx = src->bi_key_idx;
#endif
}
#endif
/**
* __bio_clone_fast - clone a bio that shares the original bio's biovec
@@ -630,7 +615,9 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
#if defined(CONFIG_MTK_HW_FDE)
bio_clone_crypt_info(bio, bio_src);
#endif
bio_clone_blkcg_association(bio, bio_src);
}
@@ -740,7 +727,9 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
}
}
#if defined(CONFIG_MTK_HW_FDE)
bio_clone_crypt_info(bio, bio_src);
#endif
bio_clone_blkcg_association(bio, bio_src);
@@ -1061,9 +1050,6 @@ void bio_advance(struct bio *bio, unsigned bytes)
bio_integrity_advance(bio, bytes);
bio_advance_iter(bio, &bio->bi_iter, bytes);
/* also advance bc_iv for HIE */
bio->bi_crypt_ctx.bc_iv += (bytes >> PAGE_SHIFT);
}
EXPORT_SYMBOL(bio_advance);
@@ -2199,22 +2185,6 @@ void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
#endif /* CONFIG_BLK_CGROUP */
unsigned long bio_bc_iv_get(struct bio *bio)
{
if (bio_bcf_test(bio, BC_IV_CTX))
return bio->bi_crypt_ctx.bc_iv;
if (bio_bcf_test(bio, BC_IV_PAGE_IDX)) {
struct page *p;
p = bio_page(bio);
if (p && page_mapping(p))
return page_index(p);
}
return BC_INVALID_IV;
}
EXPORT_SYMBOL_GPL(bio_bc_iv_get);
static void __init biovec_init_slabs(void)
{
int i;

View File

@@ -13,8 +13,6 @@
#include "blk.h"
#include <linux/hie.h>
static struct bio *blk_bio_discard_split(struct request_queue *q,
struct bio *bio,
struct bio_set *bs,
@@ -667,124 +665,6 @@ static void blk_account_io_merge(struct request *req)
}
}
static int crypto_try_merge_bio(struct bio *bio, struct bio *nxt, int type)
{
unsigned long iv_bio, iv_nxt;
struct bio_vec bv;
struct bvec_iter iter;
unsigned int count = 0;
iv_bio = bio_bc_iv_get(bio);
iv_nxt = bio_bc_iv_get(nxt);
if (iv_bio == BC_INVALID_IV || iv_nxt == BC_INVALID_IV)
return ELEVATOR_NO_MERGE;
bio_for_each_segment(bv, bio, iter)
count++;
if ((iv_bio + count) != iv_nxt)
return ELEVATOR_NO_MERGE;
return type;
}
static int crypto_try_merge(struct request *rq, struct bio *bio, int type)
{
/* flag mismatch => don't merge */
if (rq->bio->bi_crypt_ctx.bc_flags != bio->bi_crypt_ctx.bc_flags)
return ELEVATOR_NO_MERGE;
/*
* Check both sector and crypto iv here to make
* sure blk_try_merge() allows merging only if crypto iv
* is also allowed to fix below cases,
*
* rq and bio can do front-merge in sector view, but
* not allowed by their crypto ivs.
*/
if (type == ELEVATOR_BACK_MERGE) {
if (blk_rq_pos(rq) + blk_rq_sectors(rq) !=
bio->bi_iter.bi_sector)
return ELEVATOR_NO_MERGE;
return crypto_try_merge_bio(rq->biotail, bio, type);
} else if (type == ELEVATOR_FRONT_MERGE) {
if (bio->bi_iter.bi_sector + bio_sectors(bio) !=
blk_rq_pos(rq))
return ELEVATOR_NO_MERGE;
return crypto_try_merge_bio(bio, rq->bio, type);
}
return ELEVATOR_NO_MERGE;
}
static bool crypto_not_mergeable(struct request *req, struct bio *nxt)
{
struct bio *bio = req->bio;
/* If neither is encrypted, no veto from us. */
if (~(bio->bi_crypt_ctx.bc_flags | nxt->bi_crypt_ctx.bc_flags) &
BC_CRYPT) {
return false;
}
/* If one's encrypted and the other isn't, don't merge. */
/* If one's using page index as iv, and the other isn't don't merge */
if ((bio->bi_crypt_ctx.bc_flags ^ nxt->bi_crypt_ctx.bc_flags)
& (BC_CRYPT | BC_IV_PAGE_IDX))
return true;
/* If both using page index as iv */
if (bio->bi_crypt_ctx.bc_flags & nxt->bi_crypt_ctx.bc_flags &
BC_IV_PAGE_IDX) {
/*
* Must be the same file on the same mount.
*
* If the same, keys shall be the same as well since
* keys are bound to inodes.
*/
if ((bio_bc_inode(bio) != bio_bc_inode(nxt)) ||
(bio_bc_sb(bio) != bio_bc_sb(nxt)))
return true;
/*
* Page index must be contiguous.
*
* Check both back and front direction because
* req and nxt here are not promised any orders.
*
* For example, merge attempt from blk_attempt_plug_merge().
*/
if ((crypto_try_merge(req, nxt, ELEVATOR_BACK_MERGE) ==
ELEVATOR_NO_MERGE) &&
(crypto_try_merge(req, nxt, ELEVATOR_FRONT_MERGE) ==
ELEVATOR_NO_MERGE))
return true;
} else {
/*
* Not using page index as iv: allow merge bios belong to
* different inodes if their keys are exactly the same.
*
* Above case could happen in hw-crypto path because
* key is not derived for different inodes.
*
* Checking keys only is sufficient here since iv or
* dun shall be physical sector number which shall be taken
* care by blk_try_merge().
*/
/* Keys shall be the same if inodes are the same */
if ((bio_bc_inode(bio) == bio_bc_inode(nxt)) &&
(bio_bc_sb(bio) == bio_bc_sb(nxt)))
return false;
/* Check keys if inodes are different */
if (!hie_key_verify(bio, nxt))
return true;
}
return false;
}
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
@@ -816,8 +696,6 @@ static struct request *attempt_merge(struct request_queue *q,
!blk_write_same_mergeable(req->bio, next->bio))
return NULL;
if (crypto_not_mergeable(req, next->bio))
return NULL;
/*
* Don't allow merge of different write hints, or for a hint with
* non-hint IO.
@@ -949,8 +827,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
!blk_write_same_mergeable(rq->bio, bio))
return false;
if (crypto_not_mergeable(rq, bio))
return false;
/*
* Don't allow merge of different write hints, or for a hint with
* non-hint IO.

View File

@@ -105,7 +105,6 @@ source "drivers/misc/mediatek/partition/Kconfig"
source "drivers/misc/mediatek/blocktag/Kconfig"
source "drivers/misc/mediatek/io_boost/Kconfig"
source "drivers/misc/mediatek/pidmap/Kconfig"
source "drivers/misc/mediatek/keyhint/Kconfig"
source "drivers/misc/mediatek/nand/Kconfig"
endmenu # Storage

View File

@@ -104,7 +104,6 @@ obj-$(CONFIG_MICROTRUST_TEE_SUPPORT) += teei/
obj-$(CONFIG_TRUSTKERNEL_TEE_SUPPORT) += tkcore/
obj-$(CONFIG_MTK_DEVMPU) += devmpu/
obj-y += pidmap/
obj-$(CONFIG_MTK_KEY_HINT) += keyhint/
obj-$(CONFIG_MTK_RAM_CONSOLE) += ram_console/
obj-$(CONFIG_MEDIATEK_SOLUTION) += aee/
obj-$(CONFIG_MEDIATEK_SOLUTION) += sched/

View File

@@ -1,60 +0,0 @@
/*
* Copyright (C) 2018 MediaTek Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __KEYHINT_H
#define __KEYHINT_H
#include <linux/key.h>
struct kh_dev {
unsigned long *kh;
unsigned long long *kh_last_access;
unsigned short *kh_slot_usage_cnt;
unsigned short kh_slot_total_cnt;
unsigned short kh_slot_active_cnt;
unsigned short kh_unit_per_key;
};
#ifdef CONFIG_HIE
int kh_get_hint(struct kh_dev *dev, const char *key,
int *need_update);
int kh_register(struct kh_dev *dev, unsigned int key_bits,
unsigned int key_slot);
int kh_release_hint(struct kh_dev *dev, int slot);
int kh_suspend(struct kh_dev *dev);
#else
static inline int kh_get_hint(struct kh_dev *dev, const char *key,
int *need_update)
{
return 0;
}
static inline int kh_register(struct kh_dev *dev, unsigned int key_bits,
unsigned int key_slot)
{
return 0;
}
static inline int kh_release_hint(struct kh_dev *dev, int slot)
{
return 0;
}
static inline int kh_suspend(struct kh_dev *dev)
{
return 0;
}
#endif
#endif

View File

@@ -1,11 +0,0 @@
config MTK_KEY_HINT
bool "Key management tool for HIE"
depends on HIE
default y
help
Enable Key Hint to manage keys for HIE
(Hardware Inline Encryption).
Keys in key-hint are managed by LRU and the key index
is directly mapped to crypto slots index in crypto IP
embbeded in storage host.

View File

@@ -1,14 +0,0 @@
#
# Copyright (C) 2018 MediaTek Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
obj-$(CONFIG_MTK_KEY_HINT) += keyhint.o

View File

@@ -1,297 +0,0 @@
/*
* Copyright (C) 2018 MediaTek Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/key.h>
#include <linux/module.h>
#include <linux/sched/clock.h>
#include <linux/slab.h>
#include <mt-plat/keyhint.h>
/* #define KH_DEBUG */
#ifdef KH_DEBUG
#define kh_info(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
#else
#define kh_info(fmt, ...)
#endif
#define kh_err(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
int kh_register(struct kh_dev *dev, unsigned int key_bits,
unsigned int key_slot)
{
int size;
int ret = 0;
if (dev->kh) {
kh_info("already registered, dev 0x%p\n", dev);
return -EPERM;
}
if (key_bits % (sizeof(unsigned int) * BITS_PER_LONG)) {
kh_info("key_bits %u shall be multiple of %u\n",
key_bits, BITS_PER_LONG);
}
size = (key_bits / BITS_PER_BYTE) * key_slot;
kh_info("key_bits=%u, key_slot=%u, size=%u bytes\n",
key_bits, key_slot, size);
dev->kh = kzalloc(size, GFP_KERNEL);
if (!dev->kh)
goto nomem_kh;
size = key_slot * sizeof(unsigned long long);
dev->kh_last_access = kzalloc(size, GFP_KERNEL);
if (!dev->kh_last_access)
goto nomem_last_access;
size = key_slot * sizeof(unsigned short);
dev->kh_slot_usage_cnt = kzalloc(size, GFP_KERNEL);
if (!dev->kh_slot_usage_cnt)
goto nomem_slot_usage_cnt;
dev->kh_slot_total_cnt = key_slot;
dev->kh_unit_per_key = (key_bits / BITS_PER_BYTE) /
sizeof(unsigned long);
dev->kh_slot_active_cnt = 0;
kh_info("kh=%p, kh_last_access=%p, kh_slot_usage_cnt=%p\n",
dev->kh, dev->kh_last_access, dev->kh_slot_usage_cnt);
goto exit;
nomem_slot_usage_cnt:
kfree(dev->kh_last_access);
nomem_last_access:
kfree(dev->kh);
nomem_kh:
ret = -ENOMEM;
exit:
kh_info("register ret=%d\n", ret);
return ret;
}
static int kh_get_free_slot(struct kh_dev *dev)
{
int i, min_slot;
unsigned long long min_time = LLONG_MAX;
if (dev->kh_slot_active_cnt < dev->kh_slot_total_cnt) {
dev->kh_slot_active_cnt++;
kh_info("new, slot=%d\n", (dev->kh_slot_active_cnt - 1));
return (dev->kh_slot_active_cnt - 1);
}
min_slot = dev->kh_slot_active_cnt;
for (i = 0; i < dev->kh_slot_active_cnt; i++) {
if ((dev->kh_slot_usage_cnt[i] == 0) &&
dev->kh_last_access[i] < min_time) {
min_time = dev->kh_last_access[i];
min_slot = i;
}
}
if (min_slot == dev->kh_slot_active_cnt) {
kh_err("no available slot!\n");
return -ENOMEM;
}
kh_info("vic, slot=%d, mint=%lu\n", min_slot, min_time);
return min_slot;
}
int kh_release_hint(struct kh_dev *dev, int slot)
{
if (unlikely(!dev->kh))
return -ENODEV;
if (unlikely(!dev->kh_slot_usage_cnt[slot])) {
kh_err("unbalanced get and release! slot=%d\n", slot);
/* shall we bug on here? */
return -1;
}
dev->kh_slot_usage_cnt[slot]--;
kh_info("rel, %d, %d\n", slot,
dev->kh_slot_usage_cnt[slot]);
return 0;
}
int kh_get_hint(struct kh_dev *dev, const char *key, int *need_update)
{
int i, j, matched, matched_slot;
unsigned long *ptr_kh, *ptr_key;
if (unlikely(!dev->kh || !need_update)) {
kh_info("get, err, key=0x%lx\n", *(unsigned long *)key);
return -ENODEV;
}
/* round 1: simple match */
matched = 0;
matched_slot = 0;
ptr_kh = (unsigned long *)dev->kh;
ptr_key = (unsigned long *)key;
for (i = 0; i < dev->kh_slot_active_cnt; i++) {
if (*ptr_kh == *ptr_key) {
matched_slot = i;
matched++;
}
ptr_kh += dev->kh_unit_per_key;
}
if (matched == 1) {
/* fully match rest part to ensure 100% matched */
ptr_kh = (unsigned long *)dev->kh;
ptr_kh += (dev->kh_unit_per_key * matched_slot);
for (i = 0; i < dev->kh_unit_per_key - 1; i++) {
ptr_kh++;
ptr_key++;
if (*ptr_kh != *ptr_key) {
matched = 0;
break;
}
}
if (matched) {
*need_update = 0;
dev->kh_last_access[matched_slot] =
sched_clock();
dev->kh_slot_usage_cnt[matched_slot]++;
kh_info("get, 1, %d, key=0x%lx, %d\n",
matched_slot, *(unsigned long *)key,
dev->kh_slot_usage_cnt[matched_slot]);
return matched_slot;
}
}
/* round 2: full match if simple match finds multiple targets */
if (matched) {
matched = 0;
for (i = 0; i < dev->kh_slot_active_cnt; i++) {
ptr_kh = (unsigned long *)dev->kh;
ptr_kh += (i * dev->kh_unit_per_key);
ptr_key = (unsigned long *)key;
for (j = 0; j < dev->kh_unit_per_key; j++) {
if (*ptr_kh++ != *ptr_key++)
break;
}
if (j == dev->kh_unit_per_key) {
*need_update = 0;
dev->kh_last_access[i] =
sched_clock();
dev->kh_slot_usage_cnt[i]++;
kh_info("get, 2, %d, key=0x%lx %d\n",
i, *(unsigned long *)key,
dev->kh_slot_usage_cnt[i]);
return i;
}
}
}
/* nothing matched, add new hint */
j = kh_get_free_slot(dev);
if (j < 0)
return j;
ptr_kh = (unsigned long *)dev->kh;
ptr_kh += (j * dev->kh_unit_per_key);
ptr_key = (unsigned long *)key;
for (i = 0; i < dev->kh_unit_per_key; i++)
*ptr_kh++ = *ptr_key++;
dev->kh_last_access[j] = sched_clock();
dev->kh_slot_usage_cnt[j]++;
*need_update = 1;
kh_info("get, n, %d, key=0x%lx, %d\n", j,
*(unsigned long *)key,
dev->kh_slot_usage_cnt[j]);
return j;
}
static int kh_reset(struct kh_dev *dev)
{
if (unlikely(!dev->kh))
return -ENODEV;
dev->kh_slot_active_cnt = 0;
memset(dev->kh_slot_usage_cnt, 0,
sizeof(unsigned short) *
dev->kh_slot_total_cnt);
kh_info("rst, dev=0x%p\n", dev);
return 0;
}
int kh_suspend(struct kh_dev *dev)
{
int i;
if (unlikely(!dev->kh))
return -ENODEV;
/* shall have zero key reference before suspend */
for (i = 0; i < dev->kh_slot_active_cnt; i++)
WARN_ON(dev->kh_slot_usage_cnt[i]);
return kh_reset(dev);
}
MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Key Hint");

View File

@@ -46,12 +46,10 @@
#include <linux/bit_spinlock.h>
#include <linux/pagevec.h>
#include <trace/events/block.h>
#include <linux/hie.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static int submit_bh_wbc_crypt(struct inode *inode, int op, int op_flags,
struct buffer_head *bh, enum rw_hint hint,
struct writeback_control *wbc);
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
enum rw_hint hint, struct writeback_control *wbc);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
@@ -1830,9 +1828,8 @@ int __block_write_full_page(struct inode *inode, struct page *page,
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
submit_bh_wbc_crypt(inode, REQ_OP_WRITE,
write_flags, bh, inode->i_write_hint,
wbc);
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
inode->i_write_hint, wbc);
nr_underway++;
}
bh = next;
@@ -1886,8 +1883,8 @@ recover:
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
clear_buffer_dirty(bh);
submit_bh_wbc_crypt(inode, REQ_OP_WRITE,
write_flags, bh, inode->i_write_hint, wbc);
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
inode->i_write_hint, wbc);
nr_underway++;
}
bh = next;
@@ -3109,9 +3106,8 @@ void guard_bio_eod(int op, struct bio *bio)
}
}
static int submit_bh_wbc_crypt(struct inode *inode, int op, int op_flags,
struct buffer_head *bh, enum rw_hint write_hint,
struct writeback_control *wbc)
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
enum rw_hint write_hint, struct writeback_control *wbc)
{
struct bio *bio;
@@ -3147,8 +3143,6 @@ static int submit_bh_wbc_crypt(struct inode *inode, int op, int op_flags,
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
if (inode)
hie_set_bio_crypt_context(inode, bio);
/* Take care of bh's that straddle the end of the device */
guard_bio_eod(op, bio);
@@ -3162,23 +3156,9 @@ static int submit_bh_wbc_crypt(struct inode *inode, int op, int op_flags,
return 0;
}
int _submit_bh(int op, int op_flags, struct buffer_head *bh,
unsigned long bio_flags)
{
return submit_bh_wbc_crypt(NULL, op, op_flags, bh, bio_flags, NULL);
}
EXPORT_SYMBOL_GPL(_submit_bh);
int submit_bh_crypt(struct inode *inode, int op, int op_flags,
struct buffer_head *bh)
{
return submit_bh_wbc_crypt(inode, op, op_flags, bh, 0, NULL);
}
EXPORT_SYMBOL(submit_bh_crypt);
int submit_bh(int op, int op_flags, struct buffer_head *bh)
{
return submit_bh_wbc_crypt(NULL, op, op_flags, bh, 0, NULL);
return submit_bh_wbc(op, op_flags, bh, 0, NULL);
}
EXPORT_SYMBOL(submit_bh);
@@ -3208,8 +3188,7 @@ EXPORT_SYMBOL(submit_bh);
* All of the buffers must be for the same device, and must also be a
* multiple of the current approved size for the device.
*/
void ll_rw_block_crypt(struct inode *inode, int op, int op_flags, int nr,
struct buffer_head *bhs[])
void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[])
{
int i;
@@ -3222,26 +3201,20 @@ void ll_rw_block_crypt(struct inode *inode, int op, int op_flags, int nr,
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
submit_bh_crypt(inode, op, op_flags, bh);
submit_bh(op, op_flags, bh);
continue;
}
} else {
if (!buffer_uptodate(bh)) {
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
submit_bh_crypt(inode, op, op_flags, bh);
submit_bh(op, op_flags, bh);
continue;
}
}
unlock_buffer(bh);
}
}
EXPORT_SYMBOL(ll_rw_block_crypt);
void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[])
{
ll_rw_block_crypt(NULL, op, op_flags, nr, bhs);
}
EXPORT_SYMBOL(ll_rw_block);
void write_dirty_buffer(struct buffer_head *bh, int op_flags)
@@ -3508,7 +3481,13 @@ int bh_uptodate_or_lock(struct buffer_head *bh)
}
EXPORT_SYMBOL(bh_uptodate_or_lock);
int bh_submit_read_crypt(struct inode *inode, struct buffer_head *bh)
/**
* bh_submit_read - Submit a locked buffer for reading
* @bh: struct buffer_head
*
* Returns zero on success and -EIO on error.
*/
int bh_submit_read(struct buffer_head *bh)
{
BUG_ON(!buffer_locked(bh));
@@ -3519,24 +3498,12 @@ int bh_submit_read_crypt(struct inode *inode, struct buffer_head *bh)
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh_crypt(inode, REQ_OP_READ, 0, bh);
submit_bh(REQ_OP_READ, 0, bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return 0;
return -EIO;
}
EXPORT_SYMBOL(bh_submit_read_crypt);
/**
* bh_submit_read - Submit a locked buffer for reading
* @bh: struct buffer_head
*
* Returns zero on success and -EIO on error.
*/
int bh_submit_read(struct buffer_head *bh)
{
return bh_submit_read_crypt(NULL, bh);
}
EXPORT_SYMBOL(bh_submit_read);
/*

View File

@@ -159,7 +159,6 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
BUG_ON(fscrypt_is_hw_encrypt(inode));
if (WARN_ON_ONCE(len <= 0))
return -EINVAL;
if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))

View File

@@ -17,8 +17,6 @@
#include <crypto/hash.h>
/* Encryption parameters */
#define FS_AES_256_XTS_KEY_SIZE 64
#define FS_KEY_DERIVATION_NONCE_SIZE 16
/**
@@ -43,12 +41,6 @@ struct fscrypt_context {
#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
enum fscrypt_ci_mode {
CI_NONE_MODE = 0,
CI_DATA_MODE,
CI_FNAME_MODE,
};
/**
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
@@ -58,8 +50,6 @@ struct fscrypt_symlink_data {
char encrypted_path[1];
} __packed;
#define CI_FREEING (1 << 0)
/*
* fscrypt_info - the "encryption key" for an inode
*
@@ -92,16 +82,11 @@ struct fscrypt_info {
struct fscrypt_master_key *ci_master_key;
/* fields from the fscrypt_context */
u8 ci_format;
u8 ci_data_mode;
u8 ci_filename_mode;
u8 ci_flags;
u8 ci_status;
atomic_t ci_count;
spinlock_t ci_lock;
u8 ci_master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE];
u8 ci_raw_key[FS_MAX_KEY_SIZE];
};
typedef enum {
@@ -112,12 +97,6 @@ typedef enum {
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
static inline bool fscrypt_is_private_mode(struct fscrypt_info *ci)
{
return ci->ci_format == CI_DATA_MODE &&
ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
}
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
u32 filenames_mode)
{
@@ -130,9 +109,6 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
if (contents_mode == FS_ENCRYPTION_MODE_ADIANTUM &&
filenames_mode == FS_ENCRYPTION_MODE_ADIANTUM)
return true;
if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE &&
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
return false;
}
@@ -193,7 +169,4 @@ struct fscrypt_mode {
extern void __exit fscrypt_essiv_cleanup(void);
/* policy.c */
extern u8 fscrypt_data_crypt_mode(const struct inode *inode, u8 mode);
#endif /* _FSCRYPT_PRIVATE_H */

View File

@@ -17,7 +17,6 @@
#include <crypto/algapi.h>
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <linux/hie.h>
#include "fscrypt_private.h"
static struct crypto_shash *essiv_hash_tfm;
@@ -105,14 +104,6 @@ find_and_lock_process_key(const char *prefix,
goto invalid;
payload = (const struct fscrypt_key *)ukp->data;
#ifdef CONFIG_HIE_DEBUG
if (hie_debug(HIE_DBG_FS))
pr_info("HIE: %s: prefix:%s ci:%p, payload:%p, size:%d, mode:%d, min_keysize:%d\n",
__func__, prefix, payload,
payload->size, payload->mode, min_keysize);
#endif
if (ukp->datalen != sizeof(struct fscrypt_key) ||
payload->size < 1 || payload->size > FS_MAX_KEY_SIZE) {
fscrypt_warn(NULL,
@@ -172,7 +163,7 @@ static struct fscrypt_mode available_modes[] = {
};
static struct fscrypt_mode *
select_encryption_mode(struct fscrypt_info *ci, const struct inode *inode)
select_encryption_mode(const struct fscrypt_info *ci, const struct inode *inode)
{
if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) {
fscrypt_warn(inode->i_sb,
@@ -182,18 +173,11 @@ select_encryption_mode(struct fscrypt_info *ci, const struct inode *inode)
return ERR_PTR(-EINVAL);
}
if (S_ISREG(inode->i_mode)) {
ci->ci_format = CI_DATA_MODE;
/* HIE: default use aes-256-xts */
if (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE)
return &available_modes[FS_ENCRYPTION_MODE_AES_256_XTS];
if (S_ISREG(inode->i_mode))
return &available_modes[ci->ci_data_mode];
}
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
ci->ci_format = CI_FNAME_MODE;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
return &available_modes[ci->ci_filename_mode];
}
WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
inode->i_ino, (inode->i_mode & S_IFMT));
@@ -201,8 +185,7 @@ select_encryption_mode(struct fscrypt_info *ci, const struct inode *inode)
}
/* Find the master key, then derive the inode's actual encryption key */
static int find_and_derive_key(struct fscrypt_info *crypt_info,
const struct inode *inode,
static int find_and_derive_key(const struct inode *inode,
const struct fscrypt_context *ctx,
u8 *derived_key, const struct fscrypt_mode *mode)
{
@@ -221,9 +204,6 @@ static int find_and_derive_key(struct fscrypt_info *crypt_info,
if (IS_ERR(key))
return PTR_ERR(key);
memcpy(crypt_info->ci_raw_key,
payload->raw, sizeof(crypt_info->ci_raw_key));
if (ctx->flags & FS_POLICY_FLAG_DIRECT_KEY) {
if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) {
fscrypt_warn(inode->i_sb,
@@ -240,11 +220,8 @@ static int find_and_derive_key(struct fscrypt_info *crypt_info,
err = 0;
}
} else {
if (!fscrypt_is_private_mode(crypt_info)) {
err = derive_key_aes(payload->raw, ctx, derived_key,
mode->keysize);
} else
err = 0;
err = derive_key_aes(payload->raw, ctx, derived_key,
mode->keysize);
}
up_read(&key->sem);
key_put(key);
@@ -465,14 +442,6 @@ void __exit fscrypt_essiv_cleanup(void)
crypto_free_shash(essiv_hash_tfm);
}
u8 fscrypt_data_crypt_mode(const struct inode *inode, u8 mode)
{
if (mode == FS_ENCRYPTION_MODE_INVALID)
return FS_ENCRYPTION_MODE_INVALID;
return hie_is_capable(inode->i_sb) ?
FS_ENCRYPTION_MODE_PRIVATE : mode;
}
/*
* Given the encryption mode and key (normally the derived key, but for
* FS_POLICY_FLAG_DIRECT_KEY mode it's the master key), set up the inode's
@@ -530,58 +499,6 @@ static void put_crypt_info(struct fscrypt_info *ci)
kmem_cache_free(fscrypt_info_cachep, ci);
}
static void fscrypt_put_crypt_info(struct fscrypt_info *ci)
{
unsigned long flags;
if (!ci)
return;
/* only ci_count == 1, add lock protection */
if (atomic_dec_and_lock_irqsafe(&ci->ci_count, &ci->ci_lock, &flags)) {
ci->ci_status |= CI_FREEING;
spin_unlock_irqrestore(&ci->ci_lock, flags);
put_crypt_info(ci);
}
}
static struct fscrypt_info *fscrypt_get_crypt_info(struct fscrypt_info *ci,
bool init)
{
unsigned long flags;
if (init) {
spin_lock_init(&ci->ci_lock);
atomic_set(&ci->ci_count, 0);
ci->ci_status = 0;
}
spin_lock_irqsave(&ci->ci_lock, flags);
if (!(ci->ci_status & CI_FREEING)) {
atomic_inc(&ci->ci_count);
spin_unlock_irqrestore(&ci->ci_lock, flags);
} else {
spin_unlock_irqrestore(&ci->ci_lock, flags);
ci = NULL;
}
return ci;
}
void *fscrypt_crypt_info_act(void *ci, int act)
{
struct fscrypt_info *fi;
fi = (struct fscrypt_info *)ci;
if (act & BIO_BC_INFO_GET)
return fscrypt_get_crypt_info(ci, false);
else if (act & BIO_BC_INFO_PUT)
fscrypt_put_crypt_info(ci);
return NULL;
}
int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
@@ -622,21 +539,12 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (!crypt_info)
return -ENOMEM;
fscrypt_get_crypt_info(crypt_info, true);
crypt_info->ci_flags = ctx.flags;
crypt_info->ci_data_mode =
fscrypt_data_crypt_mode(inode, ctx.contents_encryption_mode);
crypt_info->ci_data_mode = ctx.contents_encryption_mode;
crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
memcpy(crypt_info->ci_master_key_descriptor, ctx.master_key_descriptor,
FS_KEY_DESCRIPTOR_SIZE);
memcpy(crypt_info->ci_nonce, ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
#ifdef CONFIG_HIE_DEBUG
if (hie_debug(HIE_DBG_FS))
pr_info("HIE: %s: inode: %p, %ld, res: %d, dmode: %d, fmode: %d\n",
__func__, inode, inode->i_ino,
res, crypt_info->ci_data_mode,
crypt_info->ci_filename_mode);
#endif
mode = select_encryption_mode(crypt_info, inode);
if (IS_ERR(mode)) {
res = PTR_ERR(mode);
@@ -645,12 +553,6 @@ int fscrypt_get_encryption_info(struct inode *inode)
WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
crypt_info->ci_mode = mode;
#ifdef CONFIG_HIE_DEBUG
if (hie_debug(HIE_DBG_FS))
pr_info("HIE: %s: fscrypt_mode<%s> key_size<%d>\n",
__func__, mode->friendly_name, mode->keysize);
#endif
/*
* This cannot be a stack buffer because it may be passed to the
* scatterlist crypto API as part of key derivation.
@@ -660,18 +562,14 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (!raw_key)
goto out;
res = find_and_derive_key(crypt_info, inode, &ctx, raw_key, mode);
res = find_and_derive_key(inode, &ctx, raw_key, mode);
if (res)
goto out;
if (fscrypt_is_private_mode(crypt_info))
goto hw_encrypt_out;
res = setup_crypto_transform(crypt_info, mode, raw_key, inode);
if (res)
goto out;
hw_encrypt_out:
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;
out:
@@ -685,7 +583,7 @@ EXPORT_SYMBOL(fscrypt_get_encryption_info);
void fscrypt_put_encryption_info(struct inode *inode)
{
fscrypt_put_crypt_info(inode->i_crypt_info);
put_crypt_info(inode->i_crypt_info);
inode->i_crypt_info = NULL;
}
EXPORT_SYMBOL(fscrypt_put_encryption_info);

View File

@@ -12,7 +12,6 @@
#include <linux/random.h>
#include <linux/string.h>
#include <linux/mount.h>
#include <linux/hie.h>
#include "fscrypt_private.h"
/*
@@ -20,20 +19,13 @@
*/
static bool is_encryption_context_consistent_with_policy(
const struct fscrypt_context *ctx,
const struct fscrypt_policy *policy,
const struct inode *inode)
const struct fscrypt_policy *policy)
{
if ((ctx->contents_encryption_mode !=
policy->contents_encryption_mode) &&
!(hie_is_capable(inode->i_sb) &&
(ctx->contents_encryption_mode ==
FS_ENCRYPTION_MODE_PRIVATE)))
return 0;
return memcmp(ctx->master_key_descriptor, policy->master_key_descriptor,
FS_KEY_DESCRIPTOR_SIZE) == 0 &&
(ctx->flags == policy->flags) &&
(ctx->contents_encryption_mode ==
policy->contents_encryption_mode) &&
(ctx->filenames_encryption_mode ==
policy->filenames_encryption_mode);
}
@@ -54,9 +46,7 @@ static int create_encryption_context_from_policy(struct inode *inode,
if (policy->flags & ~FS_POLICY_FLAGS_VALID)
return -EINVAL;
ctx.contents_encryption_mode =
fscrypt_data_crypt_mode(inode,
policy->contents_encryption_mode);
ctx.contents_encryption_mode = policy->contents_encryption_mode;
ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
ctx.flags = policy->flags;
BUILD_BUG_ON(sizeof(ctx.nonce) != FS_KEY_DERIVATION_NONCE_SIZE);
@@ -100,8 +90,7 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
&policy);
} else if (ret == sizeof(ctx) &&
is_encryption_context_consistent_with_policy(&ctx,
&policy,
inode)) {
&policy)) {
/* The file already uses the same encryption policy. */
ret = 0;
} else if (ret >= 0 || ret == -ERANGE) {
@@ -139,13 +128,6 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
policy.filenames_encryption_mode = ctx.filenames_encryption_mode;
policy.flags = ctx.flags;
/* in compliance with android */
if (S_ISDIR(inode->i_mode) &&
policy.contents_encryption_mode !=
FS_ENCRYPTION_MODE_INVALID)
policy.contents_encryption_mode =
FS_ENCRYPTION_MODE_AES_256_XTS;
memcpy(policy.master_key_descriptor, ctx.master_key_descriptor,
FS_KEY_DESCRIPTOR_SIZE);
@@ -237,13 +219,6 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
if (res != sizeof(child_ctx))
return 0;
parent_ctx.contents_encryption_mode =
fscrypt_data_crypt_mode(parent,
parent_ctx.contents_encryption_mode);
child_ctx.contents_encryption_mode =
fscrypt_data_crypt_mode(child,
child_ctx.contents_encryption_mode);
return memcmp(parent_ctx.master_key_descriptor,
child_ctx.master_key_descriptor,
FS_KEY_DESCRIPTOR_SIZE) == 0 &&
@@ -294,75 +269,3 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child,
return preload ? fscrypt_get_encryption_info(child): 0;
}
EXPORT_SYMBOL(fscrypt_inherit_context);
int fscrypt_set_bio_ctx(struct inode *inode, struct bio *bio)
{
struct fscrypt_info *ci;
int ret = -ENOENT;
if (!inode || !bio)
return ret;
ci = inode->i_crypt_info;
if (S_ISREG(inode->i_mode) && ci &&
(ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE)) {
WARN_ON(!hie_is_capable(inode->i_sb));
/* HIE: default use aes-256-xts */
bio_bcf_set(bio, BC_CRYPT | BC_AES_256_XTS);
bio->bi_crypt_ctx.bc_key_size = FS_AES_256_XTS_KEY_SIZE;
bio->bi_crypt_ctx.bc_ino = inode->i_ino;
bio->bi_crypt_ctx.bc_sb = inode->i_sb;
bio->bi_crypt_ctx.bc_info_act = &fscrypt_crypt_info_act;
bio->bi_crypt_ctx.bc_info =
fscrypt_crypt_info_act(
ci, BIO_BC_INFO_GET);
WARN_ON(!bio->bi_crypt_ctx.bc_info);
#ifdef CONFIG_HIE_DEBUG
if (hie_debug(HIE_DBG_FS))
pr_info("HIE: %s: ino: %ld, bio: %p\n",
__func__, inode->i_ino, bio);
#endif
ret = 0;
} else
bio_bcf_clear(bio, BC_CRYPT);
return ret;
}
int fscrypt_key_payload(struct bio_crypt_ctx *ctx,
const unsigned char **key)
{
struct fscrypt_info *fi;
fi = (struct fscrypt_info *)ctx->bc_info;
if (!fi) {
pr_info("HIE: %s: missing crypto info\n", __func__);
return -ENOKEY;
}
if (key)
*key = &(fi->ci_raw_key[0]);
return ctx->bc_key_size;
}
int fscrypt_is_hw_encrypt(const struct inode *inode)
{
struct fscrypt_info *ci = inode->i_crypt_info;
return S_ISREG(inode->i_mode) && ci &&
ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
}
int fscrypt_is_sw_encrypt(const struct inode *inode)
{
struct fscrypt_info *ci = inode->i_crypt_info;
return S_ISREG(inode->i_mode) && ci &&
ci->ci_data_mode != FS_ENCRYPTION_MODE_INVALID &&
ci->ci_data_mode != FS_ENCRYPTION_MODE_PRIVATE;
}

View File

@@ -37,7 +37,6 @@
#include <linux/uio.h>
#include <linux/atomic.h>
#include <linux/prefetch.h>
#include <linux/hie.h>
/*
* How many user pages to map in one call to get_user_pages(). This determines
@@ -451,7 +450,6 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
sdio->bio = bio;
sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
hie_set_dio_crypt_context(dio->inode, bio, sdio->cur_page_fs_offset);
}
/*

View File

@@ -210,7 +210,6 @@ struct ext4_io_submit {
struct bio *io_bio;
ext4_io_end_t *io_end;
sector_t io_next_block;
struct inode *inode;
};
/*
@@ -2581,7 +2580,6 @@ extern int ext4_alloc_flex_bg_array(struct super_block *sb,
ext4_group_t ngroup);
extern const char *ext4_decode_error(struct super_block *sb, int errno,
char nbuf[16]);
extern int ext4_set_bio_ctx(struct inode *inode, struct bio *bio);
extern __printf(4, 5)
void __ext4_error(struct super_block *, const char *, unsigned int,

View File

@@ -1172,7 +1172,6 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
unsigned bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
bool decrypt = false;
bool hwcrypt = fscrypt_is_hw_encrypt(inode);
BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_SIZE);
@@ -1224,13 +1223,6 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
if (hwcrypt) {
ll_rw_block_crypt(inode, REQ_OP_READ,
0, 1, &bh);
*wait_bh++ = bh;
decrypt = false;
continue;
}
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
@@ -2194,7 +2186,6 @@ static int ext4_writepage(struct page *page,
return __ext4_journalled_writepage(page, len);
ext4_io_submit_init(&io_submit, wbc);
io_submit.inode = inode;
io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_submit.io_end) {
redirty_page_for_writepage(wbc, page);
@@ -2836,7 +2827,6 @@ static int ext4_writepages(struct address_space *mapping,
mpd.inode = inode;
mpd.wbc = wbc;
ext4_io_submit_init(&mpd.io_submit, wbc);
mpd.io_submit.inode = inode;
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
@@ -3727,13 +3717,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#if 0
BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
#else
WARN_ON(fscrypt_is_sw_encrypt(inode));
#endif
#endif
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
get_block_func, ext4_end_io_dio, NULL,
dio_flags);
@@ -3843,11 +3826,8 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
int rw = iov_iter_rw(iter);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (fscrypt_is_hw_encrypt(inode))
goto skip_check;
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return 0;
skip_check:
#endif
/*
@@ -4049,16 +4029,12 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (!buffer_uptodate(bh)) {
err = -EIO;
if (fscrypt_is_hw_encrypt(inode))
ll_rw_block_crypt(inode, REQ_OP_READ, 0, 1, &bh);
else
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) &&
fscrypt_is_sw_encrypt(inode) &&
ext4_encrypted_inode(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));

View File

@@ -225,11 +225,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
for (i = 0; i < nr; i++) {
bh = arr[i];
if (!bh_uptodate_or_lock(bh)) {
/*
* Inline encryption shall be engaged for
* moved data blocks
*/
err = bh_submit_read_crypt(inode, bh);
err = bh_submit_read(bh);
if (err)
return err;
}
@@ -607,18 +603,11 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
return -EOPNOTSUPP;
}
/*
* Limitaion is only applicable for SW encryption but not for
* inline encryption
*/
if (!fscrypt_is_hw_encrypt(orig_inode) ||
!fscrypt_is_hw_encrypt(donor_inode)) {
if (ext4_encrypted_inode(orig_inode) ||
ext4_encrypted_inode(donor_inode)) {
ext4_msg(orig_inode->i_sb, KERN_ERR,
"Online defrag not supported for encrypted files");
return -EOPNOTSUPP;
}
if (ext4_encrypted_inode(orig_inode) ||
ext4_encrypted_inode(donor_inode)) {
ext4_msg(orig_inode->i_sb, KERN_ERR,
"Online defrag not supported for encrypted files");
return -EOPNOTSUPP;
}
/* Protect orig and donor inodes against a truncate */

View File

@@ -353,7 +353,6 @@ void ext4_io_submit(struct ext4_io_submit *io)
REQ_SYNC : 0;
io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
ext4_set_bio_ctx(io->inode, bio);
submit_bio(io->io_bio);
}
io->io_bio = NULL;
@@ -365,7 +364,6 @@ void ext4_io_submit_init(struct ext4_io_submit *io,
io->io_wbc = wbc;
io->io_bio = NULL;
io->io_end = NULL;
io->inode = NULL;
}
static int io_submit_init_bio(struct ext4_io_submit *io,
@@ -402,7 +400,6 @@ submit_and_retry:
if (ret)
return ret;
io->io_bio->bi_write_hint = inode->i_write_hint;
io->inode = inode;
}
ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
if (ret != bh->b_size)
@@ -480,9 +477,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
bh = head = page_buffers(page);
if (fscrypt_is_hw_encrypt(inode))
goto submit_buf;
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
nr_to_submit) {
gfp_t gfp_flags = GFP_NOFS;
@@ -505,7 +499,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
}
}
submit_buf:
/* Now submit buffers to write */
do {
if (!buffer_async_write(bh))

View File

@@ -88,11 +88,6 @@ static void mpage_end_io(struct bio *bio)
if (trace_android_fs_dataread_start_enabled())
ext4_trace_read_completion(bio);
if (bio_encrypted(bio)) {
WARN_ON(bio->bi_private);
goto uptodate;
}
if (ext4_bio_encrypted(bio)) {
if (bio->bi_status) {
fscrypt_release_ctx(bio->bi_private);
@@ -101,7 +96,6 @@ static void mpage_end_io(struct bio *bio)
return;
}
}
uptodate:
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
@@ -281,7 +275,6 @@ int ext4_mpage_readpages(struct address_space *mapping,
*/
if (bio && (last_block_in_bio != blocks[0] - 1)) {
submit_and_realloc:
ext4_set_bio_ctx(inode, bio);
ext4_submit_bio_read(bio);
bio = NULL;
}
@@ -289,8 +282,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
struct fscrypt_ctx *ctx = NULL;
if (ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode) &&
!fscrypt_is_hw_encrypt(inode)) {
S_ISREG(inode->i_mode)) {
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
goto set_error_page;
@@ -316,7 +308,6 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
(relative_block == map.m_len)) ||
(first_hole != blocks_per_page)) {
ext4_set_bio_ctx(inode, bio);
ext4_submit_bio_read(bio);
bio = NULL;
} else
@@ -324,7 +315,6 @@ int ext4_mpage_readpages(struct address_space *mapping,
goto next_page;
confused:
if (bio) {
ext4_set_bio_ctx(inode, bio);
ext4_submit_bio_read(bio);
bio = NULL;
}
@@ -337,9 +327,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
put_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio) {
ext4_set_bio_ctx(inode, bio);
if (bio)
ext4_submit_bio_read(bio);
}
return 0;
}

View File

@@ -39,7 +39,6 @@
#include <linux/crc16.h>
#include <linux/dax.h>
#include <linux/cleancache.h>
#include <linux/hie.h>
#include <linux/uaccess.h>
#include <linux/kthread.h>
@@ -5919,44 +5918,6 @@ static struct file_system_type ext4_fs_type = {
};
MODULE_ALIAS_FS("ext4");
#ifdef CONFIG_EXT4_ENCRYPTION
int ext4_set_bio_ctx(struct inode *inode,
struct bio *bio)
{
return fscrypt_set_bio_ctx(inode, bio);
}
static int __ext4_set_bio_ctx(struct inode *inode,
struct bio *bio)
{
if (inode->i_sb->s_magic != EXT4_SUPER_MAGIC)
return -EINVAL;
return fscrypt_set_bio_ctx(inode, bio);
}
static int __ext4_key_payload(struct bio_crypt_ctx *ctx,
const unsigned char **key)
{
if (ctx->bc_sb->s_magic != EXT4_SUPER_MAGIC)
return -EINVAL;
return fscrypt_key_payload(ctx, key);
}
struct hie_fs ext4_hie = {
.name = "ext4",
.key_payload = __ext4_key_payload,
.set_bio_context = __ext4_set_bio_ctx,
.priv = NULL,
};
#else
int ext4_set_bio_ctx(struct inode *inode, struct bio *bio)
{
return 0;
}
#endif
/* Shared across all ext4 file systems */
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
@@ -6002,10 +5963,6 @@ static int __init ext4_init_fs(void)
if (err)
goto out;
#ifdef CONFIG_EXT4_ENCRYPTION
hie_register_fs(&ext4_hie);
#endif
return 0;
out:
unregister_as_ext2();

View File

@@ -17,7 +17,6 @@
#include <linux/prefetch.h>
#include <linux/uio.h>
#include <linux/cleancache.h>
#include <linux/hie.h>
#include <linux/sched/signal.h>
#include "f2fs.h"
@@ -139,9 +138,6 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
static bool f2fs_bio_post_read_required(struct bio *bio)
{
if (bio_encrypted(bio))
return false;
return bio->bi_private && !bio->bi_status;
}
@@ -383,7 +379,6 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
else
trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
f2fs_set_bio_ctx_fio(fio, io->bio);
__submit_bio(io->sbi, io->bio, fio->type);
io->bio = NULL;
}
@@ -517,56 +512,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
inc_page_count(fio->sbi, is_read_io(fio->op) ?
__read_io_type(page): WB_DATA_TYPE(fio->page));
f2fs_set_bio_ctx_fio(fio, bio);
__f2fs_submit_read_bio(fio->sbi, bio, fio->type);
return 0;
}
#ifdef CONFIG_F2FS_FS_ENCRYPTION
static int f2fs_crypt_bio_not_mergeable(struct bio *bio, struct page *nxt)
{
struct address_space *bio_mapping;
struct address_space *nxt_mapping;
struct page *p;
if (!bio || !nxt)
return 0;
p = bio_page(bio);
if (!p)
return 0;
bio_mapping = page_mapping(p);
nxt_mapping = page_mapping(nxt);
if (bio_mapping && nxt_mapping) {
if (!bio_mapping->host || !nxt_mapping->host)
return 0;
/* both not hw encrypted => don't care */
if (!fscrypt_is_hw_encrypt(bio_mapping->host) &&
!fscrypt_is_hw_encrypt(nxt_mapping->host))
return 0;
/* different file => don't merge */
if (bio_mapping->host->i_ino != nxt_mapping->host->i_ino)
return 1;
/* discontiguous page index => don't merge */
if ((p->index + bio_segments(bio)) != (nxt->index))
return 1;
}
return 0;
}
#else
static int f2fs_crypt_bio_not_mergeable(struct bio *bio struct page *nxt)
{
return 0;
}
#endif
void f2fs_submit_page_write(struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
@@ -606,9 +555,6 @@ next:
!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
if (f2fs_crypt_bio_not_mergeable(io->bio, bio_page))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
if ((fio->type == DATA || fio->type == NODE) &&
@@ -663,11 +609,6 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
bio->bi_end_io = f2fs_read_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
if (fscrypt_is_hw_encrypt(inode)) {
f2fs_wait_on_block_writeback(inode, blkaddr);
return bio;
}
if (f2fs_encrypted_file(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (post_read_steps) {
@@ -704,7 +645,6 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
ClearPageError(page);
inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
f2fs_set_bio_ctx(inode, bio);
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
@@ -1704,14 +1644,10 @@ zero_out:
if (bio && (last_block_in_bio != block_nr - 1 ||
!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
submit_and_realloc:
f2fs_set_bio_ctx(inode, bio);
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (f2fs_crypt_bio_not_mergeable(bio, page))
goto submit_and_realloc;
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
is_readahead ? REQ_RAHEAD : 0);
@@ -1741,7 +1677,6 @@ set_error_page:
goto next_page;
confused:
if (bio) {
f2fs_set_bio_ctx(inode, bio);
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
@@ -1751,10 +1686,8 @@ next_page:
put_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio) {
f2fs_set_bio_ctx(inode, bio);
if (bio)
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
}
return 0;
}
@@ -1801,9 +1734,6 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
if (fscrypt_is_hw_encrypt(inode))
return 0;
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);

View File

@@ -2977,9 +2977,6 @@ int f2fs_sync_fs(struct super_block *sb, int sync);
extern __printf(3, 4)
void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
int sanity_check_ckpt(struct f2fs_sb_info *sbi);
int f2fs_set_bio_ctx(struct inode *inode, struct bio *bio);
int f2fs_set_bio_ctx_fio(struct f2fs_io_info *fio, struct bio *bio);
/*
* hash.c
@@ -3642,7 +3639,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int rw = iov_iter_rw(iter);
if (f2fs_post_read_required(inode) && fscrypt_is_sw_encrypt(inode))
if (f2fs_post_read_required(inode))
return true;
if (sbi->s_ndevs)
return true;

View File

@@ -23,7 +23,6 @@
#include <linux/f2fs_fs.h>
#include <linux/sysfs.h>
#include <linux/quota.h>
#include <linux/hie.h>
#include "f2fs.h"
#include "node.h"
@@ -2227,77 +2226,6 @@ static const struct fscrypt_operations f2fs_cryptops = {
.empty_dir = f2fs_empty_dir,
.max_namelen = F2FS_NAME_LEN,
};
int f2fs_set_bio_ctx(struct inode *inode, struct bio *bio)
{
int ret;
ret = fscrypt_set_bio_ctx(inode, bio);
if (!ret && bio_encrypted(bio))
bio_bcf_set(bio, BC_IV_PAGE_IDX);
return ret;
}
int f2fs_set_bio_ctx_fio(struct f2fs_io_info *fio, struct bio *bio)
{
int ret = 0;
struct address_space *mapping;
/* Don't attach bio ctx for sw encrypted pages,
* including moving raw blocks in GC.
*/
if (fio->encrypted_page)
return 0;
mapping = page_mapping(fio->page);
if (mapping)
ret = f2fs_set_bio_ctx(mapping->host, bio);
return ret;
}
static int __f2fs_set_bio_ctx(struct inode *inode,
struct bio *bio)
{
if (inode->i_sb->s_magic != F2FS_SUPER_MAGIC)
return -EINVAL;
return f2fs_set_bio_ctx(inode, bio);
}
static int __f2fs_key_payload(struct bio_crypt_ctx *ctx,
const unsigned char **key)
{
if (ctx->bc_sb->s_magic != F2FS_SUPER_MAGIC)
return -EINVAL;
return fscrypt_key_payload(ctx, key);
}
struct hie_fs f2fs_hie = {
.name = "f2fs",
.key_payload = __f2fs_key_payload,
.set_bio_context = __f2fs_set_bio_ctx,
.priv = NULL,
};
#else
static const struct fscrypt_operations f2fs_cryptops = {
.is_encrypted = f2fs_encrypted_inode,
};
int f2fs_set_bio_ctx(struct inode *inode, struct bio *bio)
{
return 0;
}
int f2fs_set_bio_ctx_fio(struct f2fs_io_info *fio, struct bio *bio)
{
return 0;
}
#endif
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
@@ -3670,10 +3598,6 @@ static int __init init_f2fs_fs(void)
if (err)
goto free_root_stats;
#ifdef CONFIG_F2FS_FS_ENCRYPTION
hie_register_fs(&f2fs_hie);
#endif
return 0;
free_root_stats:

View File

@@ -18,21 +18,6 @@ struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
#define BIO_BC_INFO_GET (1 << 0)
#define BIO_BC_INFO_PUT (1 << 1)
struct bio_crypt_ctx {
unsigned int bc_flags;
unsigned int bc_key_size;
struct super_block *bc_sb;
unsigned long bc_ino;
unsigned long bc_iv; /* for BC_IV_CTX only */
void *bc_info;
void *(*bc_info_act)(void *ci, int act);
#ifdef CONFIG_HIE_DUMMY_CRYPT
u32 dummy_crypt_key;
#endif
};
/*
* Block error status values. See block/blk-core:blk_errors for the details.
* Alpha cannot write a byte atomically, so we need to use 32-bit value.
@@ -144,8 +129,6 @@ struct bio {
struct bio_set *bi_pool;
/* Encryption context. May contain secret key material. */
struct bio_crypt_ctx bi_crypt_ctx;
/*
* We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member
@@ -381,79 +364,4 @@ struct blk_rq_stat {
u64 batch;
};
/*
* block crypt flags
*/
enum bc_flags_bits {
__BC_CRYPT, /* marks the request needs crypt */
__BC_IV_PAGE_IDX, /* use page index as iv. */
__BC_IV_CTX, /* use the iv saved in crypt context */
__BC_AES_128_XTS, /* crypt algorithms */
__BC_AES_192_XTS,
__BC_AES_256_XTS,
__BC_AES_128_CBC,
__BC_AES_256_CBC,
__BC_AES_128_ECB,
__BC_AES_256_ECB,
};
#define BC_CRYPT (1UL << __BC_CRYPT)
#define BC_IV_PAGE_IDX (1UL << __BC_IV_PAGE_IDX)
#define BC_IV_CTX (1UL << __BC_IV_CTX)
#define BC_AES_128_XTS (1UL << __BC_AES_128_XTS)
#define BC_AES_192_XTS (1UL << __BC_AES_192_XTS)
#define BC_AES_256_XTS (1UL << __BC_AES_256_XTS)
#define BC_AES_128_CBC (1UL << __BC_AES_128_CBC)
#define BC_AES_256_CBC (1UL << __BC_AES_256_CBC)
#define BC_AES_128_ECB (1UL << __BC_AES_128_ECB)
#define BC_AES_256_ECB (1UL << __BC_AES_256_ECB)
#define BC_INVALID_IV (~0UL)
static inline void bio_bcf_set(struct bio *bio, unsigned int flag)
{
if (bio)
bio->bi_crypt_ctx.bc_flags |= flag;
}
static inline void bio_bcf_clear(struct bio *bio, unsigned int flag)
{
if (bio)
bio->bi_crypt_ctx.bc_flags &= (~flag);
}
static inline bool bio_bcf_test(struct bio *bio, unsigned int flag)
{
return bio ? (bio->bi_crypt_ctx.bc_flags & flag) : 0;
}
static inline bool bio_encrypted(struct bio *bio)
{
return bio_bcf_test(bio, BC_CRYPT);
}
static inline unsigned long bio_bc_inode(const struct bio *bio)
{
return bio->bi_crypt_ctx.bc_ino;
}
static inline void *bio_bc_sb(const struct bio *bio)
{
return (void *)bio->bi_crypt_ctx.bc_sb;
}
static inline unsigned int bio_bc_key_size(const struct bio *bio)
{
return bio->bi_crypt_ctx.bc_key_size;
}
static inline
void bio_bc_iv_set(struct bio *bio, unsigned long iv)
{
bio->bi_crypt_ctx.bc_iv = iv;
bio_bcf_set(bio, BC_IV_CTX);
}
unsigned long bio_bc_iv_get(struct bio *bio);
#endif /* __LINUX_BLK_TYPES_H */

View File

@@ -195,8 +195,6 @@ void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
void ll_rw_block(int, int, int, struct buffer_head * bh[]);
void ll_rw_block_crypt(struct inode *inode, int op, int op_flags, int nr,
struct buffer_head *bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
void write_dirty_buffer(struct buffer_head *bh, int op_flags);
@@ -205,7 +203,6 @@ void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
int bh_submit_read(struct buffer_head *bh);
int bh_submit_read_crypt(struct inode *inode, struct buffer_head *bh);
loff_t page_cache_seek_hole_data(struct inode *inode, loff_t offset,
loff_t length, int whence);

View File

@@ -31,7 +31,6 @@
#include <linux/uidgid.h>
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/blk_types.h>
#include <linux/workqueue.h>
#include <linux/delayed_call.h>
#include <linux/uuid.h>

View File

@@ -93,28 +93,6 @@ static inline int fscrypt_inherit_context(struct inode *parent,
return -EOPNOTSUPP;
}
static inline int fscrypt_set_bio_ctx(struct inode *inode,
struct bio *bio)
{
return -EOPNOTSUPP;
}
static inline int fscrypt_key_payload(struct bio_crypt_ctx *ctx,
const unsigned char **key)
{
return -EOPNOTSUPP;
}
static inline int fscrypt_is_hw_encrypt(const struct inode *inode)
{
return 0;
}
static inline int fscrypt_is_sw_encrypt(const struct inode *inode)
{
return 0;
}
/* keyinfo.c */
static inline int fscrypt_get_encryption_info(struct inode *inode)
{
@@ -126,11 +104,6 @@ static inline void fscrypt_put_encryption_info(struct inode *inode)
return;
}
static inline void *fscrypt_crypt_info_act(void *ci, int act)
{
return NULL;
}
/* fname.c */
static inline int fscrypt_setup_filename(struct inode *dir,
const struct qstr *iname,

View File

@@ -81,16 +81,10 @@ extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
void *, bool);
extern int fscrypt_set_bio_ctx(struct inode *inode, struct bio *bio);
extern int fscrypt_key_payload(struct bio_crypt_ctx *ctx,
const unsigned char **key);
extern int fscrypt_is_hw_encrypt(const struct inode *inode);
extern int fscrypt_is_sw_encrypt(const struct inode *inode);
/* keyinfo.c */
extern int fscrypt_get_encryption_info(struct inode *);
extern void fscrypt_put_encryption_info(struct inode *);
extern void *fscrypt_crypt_info_act(void *ci, int act);
/* fname.c */
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,

View File

@@ -1,188 +0,0 @@
/*
* Copyright (C) 2017 MediaTek Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __HIE_H_
#define __HIE_H_
#include <linux/fs.h>
#include <linux/blk_types.h>
#include <linux/blkdev.h>
#include <keys/user-type.h>
#define HIE_MAX_KEY_SIZE 64
#define HIE_DBG_FS 0x02
#define HIE_DBG_BIO 0x04
#define HIE_DBG_KEY 0x08
#define HIE_DBG_HIE 0x10
#define HIE_DBG_DRV 0x20
#define HIE_DBG_CRY 0x40
struct hie_fs {
const char *name;
int (*key_payload)(struct bio_crypt_ctx *ctx,
const unsigned char **key);
int (*set_bio_context)(struct inode *inode,
struct bio *bio);
void *priv; /* fs specific data */
struct list_head list;
};
struct hie_dev {
const char *name;
unsigned int mode; /* encryption modes supported by the device */
int (*encrypt)(unsigned int mode, const char *key, int len,
struct request *req, void *priv);
int (*decrypt)(unsigned int mode, const char *key, int len,
struct request *req, void *priv);
void *priv; /* device specific data */
struct list_head list;
};
typedef int (*hie_act)(unsigned int, const char *, int,
struct request *, void *);
static inline bool hie_request_crypted(struct request *req)
{
return (req && req->bio) ?
(req->bio->bi_crypt_ctx.bc_flags & BC_CRYPT) : 0;
}
#ifdef CONFIG_HIE
bool hie_is_capable(const struct super_block *sb);
int hie_is_dummy(void);
int hie_is_nocrypt(void);
int hie_register_fs(struct hie_fs *fs);
int hie_register_device(struct hie_dev *dev);
int hie_decrypt(struct hie_dev *dev, struct request *req, void *priv);
int hie_encrypt(struct hie_dev *dev, struct request *req, void *priv);
bool hie_key_verify(struct bio *bio1, struct bio *bio2);
int hie_set_bio_crypt_context(struct inode *inode, struct bio *bio);
int hie_set_dio_crypt_context(struct inode *inode, struct bio *bio,
loff_t fs_offset);
u64 hie_get_iv(struct request *req);
int hie_debug(unsigned int mask);
int hie_debug_ino(unsigned long ino);
int hie_req_end_size(struct request *req, unsigned long bytes);
int hie_dump_req(struct request *req, const char *prefix);
#else
static inline
bool hie_is_capable(const struct super_block *sb)
{
return false;
}
static inline
int hie_is_dummy(void)
{
return 0;
}
static inline
int hie_is_nocrypt(void)
{
return 0;
}
static inline
bool hie_key_verify(struct bio *bio1, struct bio *bio2)
{
return true;
}
static inline
int hie_register_fs(struct hie_fs *fs)
{
return 0;
}
static inline
int hie_register_device(struct hie_dev *dev)
{
return 0;
}
static inline
int hie_decrypt(struct hie_dev *dev, struct request *req, void *priv)
{
return 0;
}
static inline
int hie_encrypt(struct hie_dev *dev, struct request *req, void *priv)
{
return 0;
}
static inline
int hie_set_bio_crypt_context(struct inode *inode, struct bio *bio)
{
return 0;
}
static inline
int hie_set_dio_crypt_context(struct inode *inode, struct bio *bio,
loff_t fs_offset)
{
return 0;
}
static inline
u64 hie_get_iv(struct request *req)
{
return 0;
}
static inline
int hie_debug(unsigned int mask)
{
return 0;
}
static inline
int hie_debug_ino(unsigned long ino)
{
return 0;
}
static inline
int hie_req_end_size(struct request *req, unsigned long bytes)
{
return 0;
}
static inline
void hie_dump_bio_file(struct bio *bio, const char *prefix,
const char *filename)
{
}
static inline
int hie_dump_req(struct request *req, const char *prefix)
{
return 0;
}
#endif
static inline
int hie_req_end(struct request *req)
{
return hie_req_end_size(req, 0);
}
#endif

View File

@@ -279,8 +279,6 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
#define FS_ENCRYPTION_MODE_ADIANTUM 9
#define FS_ENCRYPTION_MODE_PRIVATE 127
struct fscrypt_policy {
__u8 version;

View File

@@ -234,7 +234,6 @@ source security/loadpin/Kconfig
source security/yama/Kconfig
source security/integrity/Kconfig
source security/hie/Kconfig
choice
prompt "Default security module"

View File

@@ -30,4 +30,3 @@ obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
subdir-$(CONFIG_INTEGRITY) += integrity
obj-$(CONFIG_INTEGRITY) += integrity/
obj-$(CONFIG_HIE) += hie/

View File

@@ -1,73 +0,0 @@
menu "Storage Hardware Inline Encryption"
config HIE
bool "enable Hardware-Inline-Encryption"
default n
select KEYS
help
This driver provides an interface for storage driver to
retrieve file-oriented encrypt keys from file system.
The storage driver must register hardware encryption
capability to HIE(Hardware-Inline-Encryption).
It is must to Hardware file-oriented encrypt.
config HIE_DEBUG
bool "HIE debug"
default n
depends on DEBUG_FS
depends on HIE
help
Enable debug facility. Print logs when encryption,
decryption, attach/retrieve key from bio.
/d/hie/debug: debug log control
Mask Debug Messages
0x02 print logs when accessing certain inode.
0x04 dump bio basic info when decrypt/encrypt.
0x08 print keys when decrypt/encrypt.
0x10 print pointer to the bio when decrypt/encrypt.
Example: "echo 16 > /d/hie/debug"
/d/hie/ino: target inode number, when log mask = 0x02.
config HIE_NO_CRYPT
bool "HIE no crypt"
default n
depends on HIE
help
Skip calling driver registered encryption/decryption
function. Used to simulate theoretical speed limit of
hardware encryption, that is, encryption overhead time
equals zero.
config HIE_DUMMY_CRYPT
bool "HIE dummy crypt"
default n
depends on HIE
help
XOR the buffer with 0xFFFF, instead of calling driver
registered encryption/decryption function. Used to
verify the correctness of hie_encrypt(), hie_decrypt()
insertion points in the driver.
config HIE_DUMMY_CRYPT_KEY_SWITCH
bool "HIE dummy crypt with key switch"
default n
depends on HIE_DUMMY_CRYPT
help
XOR the buffer with real keys. Used to verify the
correctness of key passing from the file system to
the block layer, and finally to the HIE driver.
It is must to dummy test
config HIE_DUMMY_CRYPT_IV
bool "HIE dummy crypt with iv"
default n
depends on HIE_DUMMY_CRYPT
help
XOR the buffer with the initialzation vector(iv.),
if BC_IV_PAGE_IDX flag is set in the bio crypt
context. Used to verify the correctness of iv.
calculation and block merge.
endmenu

View File

@@ -1,7 +0,0 @@
#
# Makefile for Hardware Inline Encryption
#
ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
obj-$(CONFIG_HIE) += hie.o

View File

@@ -1,918 +0,0 @@
/*
* Copyright (C) 2017 MediaTek Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define DEBUG 1
#include <linux/module.h>
#include <linux/blk_types.h>
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/printk.h>
#include <linux/key.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
#include <linux/debugfs.h>
#include <linux/hie.h>
#include <linux/preempt.h>
#ifdef CONFIG_MTK_PLATFORM
//#include <mt-plat/aee.h>
/*temp for build :aee_kernel_warning*/
#define aee_kernel_warning(...)
#else
#define aee_kernel_warning(...)
#endif
static DEFINE_SPINLOCK(hie_dev_list_lock);
static LIST_HEAD(hie_dev_list);
static DEFINE_SPINLOCK(hie_fs_list_lock);
static LIST_HEAD(hie_fs_list);
static int hie_key_payload(struct bio_crypt_ctx *ctx,
const unsigned char **key);
static struct hie_dev *hie_default_dev;
static struct hie_fs *hie_default_fs;
#ifdef CONFIG_HIE_DEBUG
struct dentry *hie_droot;
struct dentry *hie_ddebug;
u32 hie_dbg;
u64 hie_dbg_ino;
u64 hie_dbg_sector;
#endif
int hie_debug(unsigned int mask)
{
#ifdef CONFIG_HIE_DEBUG
return (hie_dbg & mask);
#else
return 0;
#endif
}
int hie_debug_ino(unsigned long ino)
{
#ifdef CONFIG_HIE_DEBUG
return ((hie_dbg & HIE_DBG_FS) && (hie_dbg_ino == ino));
#else
return 0;
#endif
}
bool hie_is_capable(const struct super_block *sb)
{
return blk_queue_inline_crypt(bdev_get_queue(sb->s_bdev));
}
EXPORT_SYMBOL_GPL(hie_is_capable);
int hie_is_dummy(void)
{
#ifdef CONFIG_HIE_DUMMY_CRYPT
return 1;
#else
return 0;
#endif
}
EXPORT_SYMBOL_GPL(hie_is_dummy);
int hie_is_nocrypt(void)
{
#ifdef CONFIG_HIE_NO_CRYPT
return 1;
#else
return 0;
#endif
}
EXPORT_SYMBOL_GPL(hie_is_nocrypt);
int hie_register_device(struct hie_dev *dev)
{
unsigned long flags;
spin_lock_irqsave(&hie_dev_list_lock, flags);
list_add(&dev->list, &hie_dev_list);
spin_unlock_irqrestore(&hie_dev_list_lock, flags);
if (IS_ERR_OR_NULL(hie_default_dev))
hie_default_dev = dev;
return 0;
}
EXPORT_SYMBOL_GPL(hie_register_device);
int hie_register_fs(struct hie_fs *fs)
{
unsigned long flags;
if (IS_ERR_OR_NULL(fs))
return -EINVAL;
spin_lock_irqsave(&hie_fs_list_lock, flags);
list_add(&fs->list, &hie_fs_list);
spin_unlock_irqrestore(&hie_fs_list_lock, flags);
if (IS_ERR_OR_NULL(hie_default_fs))
hie_default_fs = fs;
return 0;
}
EXPORT_SYMBOL_GPL(hie_register_fs);
#ifdef CONFIG_HIE_DEBUG
#define __rw_str(bio) ((bio_data_dir(bio) == READ) ? "R" : "W")
static const char *get_page_name_nolock(struct page *p, char *buf, int len,
unsigned long *ino)
{
struct inode *inode;
struct address_space *mapping = page_mapping(p);
struct dentry *dentry = NULL;
struct dentry *alias;
char *ptr = buf;
if (!mapping)
return "?";
inode = mapping->host;
hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
dentry = alias;
if (dentry)
break;
}
if (dentry && dentry->d_name.name)
strncpy(buf, dentry->d_name.name, len);
else
return "#";
if (ino)
*ino = inode->i_ino;
return ptr;
}
static const char *get_page_name(struct page *p, char *buf, int len,
unsigned long *ino)
{
struct inode *inode;
struct dentry *dentry;
struct address_space *mapping;
char *ptr = buf;
if (!p || !buf || len <= 0)
return "#";
mapping = page_mapping(p);
if (!mapping || !mapping->host)
return "?";
if (in_interrupt() || irqs_disabled() || preempt_count())
return get_page_name_nolock(p, buf, len, ino);
inode = p->mapping->host;
dentry = d_find_alias(inode);
if (dentry) {
ptr = dentry_path_raw(dentry, buf, len);
dput(dentry);
} else {
return "?";
}
if (ino)
*ino = inode->i_ino;
return ptr;
}
static void hie_dump_bio(struct bio *bio, const char *prefix)
{
struct bio_vec bvec;
struct bvec_iter iter;
struct page *last_page = NULL;
unsigned int size = 0;
const char *ptr = NULL;
unsigned long ino = 0;
unsigned long iv;
char path[256];
bio_for_each_segment(bvec, bio, iter) {
size += bvec.bv_len;
if (bvec.bv_page)
if (last_page != bvec.bv_page && !ptr) {
last_page = bvec.bv_page;
ptr = get_page_name(bvec.bv_page, path, 255,
&ino);
}
}
iv = bio_bc_iv_get(bio);
pr_info("HIE: %s: bio: %p %s, flag: %x, size: %d, file: %s, ino: %ld, iv: %lu\n",
prefix, bio, __rw_str(bio),
bio->bi_crypt_ctx.bc_flags,
size, ptr?ptr:"", ino, iv);
}
int hie_dump_req(struct request *req, const char *prefix)
{
struct bio *bio;
__rq_for_each_bio(bio, req)
hie_dump_bio(bio, prefix);
return 0;
}
#else
int hie_dump_req(struct request *req, const char *prefix)
{
return 0;
}
#endif
#if defined(CONFIG_HIE_DUMMY_CRYPT) && !defined(CONFIG_HIE_NO_CRYPT)
static void hie_xor(void *buf, unsigned int length, u32 key)
{
unsigned int i;
u32 *p = (u32 *)buf;
for (i = 0; i < length; i += 4, p++)
*p = *p ^ key;
}
static void hie_dummy_crypt_set_key(struct request *req, u32 key)
{
if (req->bio) {
struct bio *bio;
__rq_for_each_bio(bio, req) {
bio->bi_crypt_ctx.dummy_crypt_key = key;
}
}
}
static unsigned long hie_dummy_crypt_bio(const char *prefix, struct bio *bio,
unsigned long max, unsigned int blksize, u64 *iv)
{
unsigned long flags;
struct bio_vec bv;
struct bvec_iter iter;
unsigned long ret = 0;
unsigned int len;
#ifdef CONFIG_HIE_DEBUG
const char *ptr = NULL;
unsigned long ino = 0;
char path[256];
#endif
if (!bio)
return 0;
bio_for_each_segment(bv, bio, iter) {
u32 key;
char *data = bvec_kmap_irq(&bv, &flags);
unsigned int i;
unsigned int remain;
if (max && (ret + bv.bv_len > max))
len = max - ret;
else
len = bv.bv_len;
#ifdef CONFIG_HIE_DEBUG
if (!ptr)
ptr = get_page_name(bv.bv_page, path, 255, &ino);
if (hie_debug(HIE_DBG_CRY)) {
pr_info("HIE: %s: %s bio: %p, base: %p %s len: %d, file: %s, ino: %ld, sec: %lu, iv: %llx, pgidx: %u\n",
__func__, prefix, bio, data,
__rw_str(bio), bv.bv_len,
ptr, ino, (unsigned long)iter.bi_sector, *iv,
(unsigned int)bv.bv_page->index);
print_hex_dump(KERN_DEBUG, "before crypt: ",
DUMP_PREFIX_OFFSET, 32, 1, data, 32, 0);
}
#endif
remain = len;
for (i = 0; i < len; i += blksize) {
key = bio->bi_crypt_ctx.dummy_crypt_key;
if (iv && *iv) {
#ifdef CONFIG_HIE_DUMMY_CRYPT_IV
key = (key & 0xFFFF0000) |
(((u32)*iv) & 0xFFFF);
#endif
(*iv)++;
}
hie_xor(data+i,
(remain > blksize) ? blksize : remain, key);
remain -= blksize;
}
ret += len;
#ifdef CONFIG_HIE_DEBUG
if (hie_debug(HIE_DBG_CRY))
print_hex_dump(KERN_DEBUG, "after crypt: ",
DUMP_PREFIX_OFFSET, 32, 1, data, 32, 0);
#endif
flush_dcache_page(bv.bv_page);
bvec_kunmap_irq(data, &flags);
}
return ret;
}
static int hie_dummy_crypt_req(const char *prefix, struct request *req,
unsigned long bytes)
{
u64 iv;
struct bio *bio;
unsigned int blksize;
if (!req->bio)
return 0;
iv = hie_get_iv(req);
blksize = queue_physical_block_size(req->q);
if (hie_debug(HIE_DBG_CRY)) {
pr_info("HIE: %s: %s req: %p, req_iv: %llx\n",
__func__, prefix, req, iv);
}
__rq_for_each_bio(bio, req) {
unsigned long cnt;
#ifdef CONFIG_HIE_DEBUG
if (hie_debug(HIE_DBG_CRY)) {
u64 bio_iv;
bio_iv = bio_bc_iv_get(bio);
pr_info("HIE: %s: %s req: %p, req_iv: %llx, bio: %p, %s, bio_iv: %llu\n",
__func__, prefix, req, iv, bio,
__rw_str(bio),
bio_iv);
}
#endif
cnt = hie_dummy_crypt_bio(prefix, bio, bytes, blksize, &iv);
if (bytes) {
if (bytes > cnt)
bytes -= cnt;
else
break;
}
}
return 0;
}
#endif
int hie_req_end_size(struct request *req, unsigned long bytes)
{
#if defined(CONFIG_HIE_DUMMY_CRYPT) && !defined(CONFIG_HIE_NO_CRYPT)
struct bio *bio = req->bio;
if (!hie_request_crypted(req))
return 0;
return hie_dummy_crypt_req("<end>", req,
(bio_data_dir(bio) == WRITE) ? 0 : bytes);
#else
return 0;
#endif
}
/**
* Verify the correctness of crypto_not_mergeable() @ block/blk-merge.c
* The bios of different keys should not be merge in the same request.
*/
static int hie_req_verify(struct request *req, struct hie_dev *dev,
unsigned int *crypt_mode)
{
struct bio *bio, *bio_head;
unsigned int key_size;
unsigned int mode;
unsigned int last_mode;
unsigned int flag;
unsigned long iv = BC_INVALID_IV;
unsigned long count = 0;
if (!req->bio)
return -ENOENT;
bio = bio_head = req->bio;
key_size = bio_bc_key_size(bio);
mode = last_mode = bio->bi_crypt_ctx.bc_flags & dev->mode;
flag = bio->bi_crypt_ctx.bc_flags;
if (bio_bcf_test(bio, BC_IV_PAGE_IDX))
iv = bio_bc_iv_get(bio);
__rq_for_each_bio(bio, req) {
if ((!bio_encrypted(bio)) ||
!hie_key_verify(bio_head, bio)) {
pr_info("%s: inconsistent keys. bio: %p, key_size: %d, req: %p.\n",
__func__, bio, key_size, req);
return -EINVAL;
}
mode = bio->bi_crypt_ctx.bc_flags & dev->mode;
if (!mode) {
pr_info("%s: %s: unsupported crypt mode %x\n",
__func__, dev->name,
bio->bi_crypt_ctx.bc_flags);
return -EINVAL;
}
if (mode != last_mode) {
pr_info("%s: %s: inconsistent crypt mode %x, expected: %x, bio: %p, req: %p\n",
__func__, dev->name,
mode, last_mode, bio, req);
return -EINVAL;
}
if (bio->bi_crypt_ctx.bc_flags != flag) {
pr_info("%s: %s: inconsistent flag %x, expected: %x, bio: %p, req: %p\n",
__func__, dev->name,
bio->bi_crypt_ctx.bc_flags, flag, bio, req);
hie_dump_req(req, __func__);
aee_kernel_warning("HIE", "inconsistent flags");
return -EINVAL;
}
if (iv != BC_INVALID_IV) {
struct bio_vec bv;
struct bvec_iter iter;
unsigned long bio_iv;
bio_iv = bio_bc_iv_get(bio);
if ((iv + count) != bio_iv) {
pr_info("%s: %s: inconsis. iv %lu, expected: %lu, bio: %p, req: %p\n",
__func__, dev->name, bio_iv,
(iv + count), bio, req);
hie_dump_req(req, __func__);
aee_kernel_warning("HIE", "inconsistent iv.");
return -EINVAL;
}
bio_for_each_segment(bv, bio, iter)
count++;
}
}
if (crypt_mode)
*crypt_mode = mode;
return 0;
}
static int hie_req_key_act(struct hie_dev *dev, struct request *req,
hie_act act, void *priv)
{
const unsigned char *key = NULL;
struct bio *bio = req->bio;
unsigned int mode = 0;
int key_size = 0;
int ret;
if (!hie_is_capable(bio_bc_sb(bio)))
return -ENODEV;
if (!hie_request_crypted(req))
return 0;
if (hie_debug(HIE_DBG_BIO))
hie_dump_req(req, __func__);
if (hie_req_verify(req, dev, &mode))
return -EINVAL;
key_size = bio_bc_key_size(bio);
ret = hie_key_payload(&bio->bi_crypt_ctx, &key);
if (ret == -EINVAL) {
pr_info("HIE: %s: key payload was not recognized\n",
__func__);
ret = -ENOKEY;
} else if (ret >= 0 && ret != key_size) {
pr_info("HIE: %s: key size mismatch, ctx: %d, payload: %d\n",
__func__, key_size, ret);
ret = -ENOKEY;
}
if (ret < 0)
goto out;
ret = 0;
#ifndef CONFIG_HIE_NO_CRYPT
#ifdef CONFIG_HIE_DUMMY_CRYPT
#ifdef CONFIG_HIE_DUMMY_CRYPT_KEY_SWITCH
hie_dummy_crypt_set_key(req, readl(key));
#else
hie_dummy_crypt_set_key(req, 0xFFFFFFFF);
#endif
if (bio_data_dir(bio) == WRITE)
ret = hie_dummy_crypt_req("<req>", req, 0);
#else
if (act)
ret = act(mode, key, key_size, req, priv);
#endif
#endif
#ifdef CONFIG_HIE_DEBUG
if (key && hie_debug(HIE_DBG_KEY)) {
pr_info("HIE: %s: master key\n", __func__);
print_hex_dump(KERN_DEBUG, "fs-key: ", DUMP_PREFIX_ADDRESS,
16, 1, key, key_size, 0);
}
#endif
out:
return ret;
}
static int hie_key_payload(struct bio_crypt_ctx *ctx,
const unsigned char **key)
{
int ret = -EINVAL;
unsigned long flags;
struct hie_fs *fs, *n;
spin_lock_irqsave(&hie_fs_list_lock, flags);
list_for_each_entry_safe(fs, n, &hie_fs_list, list) {
if (fs->key_payload) {
ret = fs->key_payload(ctx, key);
if (ret != -EINVAL || ret >= 0)
break;
}
}
spin_unlock_irqrestore(&hie_fs_list_lock, flags);
return ret;
}
bool hie_key_verify(struct bio *bio1, struct bio *bio2)
{
const unsigned char *key1 = NULL;
const unsigned char *key2 = NULL;
int ret;
/* compare key size */
if (bio_bc_key_size(bio1) !=
bio_bc_key_size(bio2))
return false;
/* compare keys */
ret = hie_key_payload(&bio1->bi_crypt_ctx, &key1);
if (ret < 0)
return false;
ret = hie_key_payload(&bio2->bi_crypt_ctx, &key2);
if (ret < 0)
return false;
if (memcmp(key1, key2, bio_bc_key_size(bio1)))
return false;
return true;
}
struct hie_key_info {
char *key;
int size;
};
/**
* hie_decrypt / hie_encrypt - get key from bio and invoke cryption callback.
* @dev: hie device
* @bio: bio request
* @priv: private data to decryption callback.
*
* RETURNS:
* The return value of cryption callback.
* -ENODEV, if the hie device is not registered.
* -EINVAL, if the crpyt algorithm is not supported by the device.
* -ENOKEY, if the master key is absent.
*/
int hie_decrypt(struct hie_dev *dev, struct request *req, void *priv)
{
int ret;
ret = hie_req_key_act(dev, req, dev->decrypt, priv);
#ifdef CONFIG_HIE_DEBUG
if (hie_debug(HIE_DBG_HIE))
pr_info("HIE: %s: req: %p, ret=%d\n", __func__, req, ret);
#endif
return ret;
}
EXPORT_SYMBOL(hie_decrypt);
int hie_encrypt(struct hie_dev *dev, struct request *req, void *priv)
{
int ret;
ret = hie_req_key_act(dev, req, dev->encrypt, priv);
#ifdef CONFIG_HIE_DEBUG
if (hie_debug(HIE_DBG_HIE))
pr_info("HIE: %s: req: %p, ret=%d\n", __func__, req, ret);
#endif
return ret;
}
EXPORT_SYMBOL(hie_encrypt);
/**
* hie_set_bio_crypt_context - attach encrpytion info to the bio
* @inode: reference inode
* @bio: target bio
* RETURNS:
* 0, the inode has enabled encryption, and it's encryption info is
* successfully attached to the bio.
* -EINVAL, the inode has not enabled encryption, or there's no matching
* file system.
*/
int hie_set_bio_crypt_context(struct inode *inode, struct bio *bio)
{
int ret = 0;
struct hie_fs *fs, *n;
unsigned long flags;
spin_lock_irqsave(&hie_fs_list_lock, flags);
list_for_each_entry_safe(fs, n, &hie_fs_list, list) {
if (fs->set_bio_context) {
ret = fs->set_bio_context(inode, bio);
if (ret != -EINVAL || ret == 0)
break;
}
}
spin_unlock_irqrestore(&hie_fs_list_lock, flags);
return ret;
}
EXPORT_SYMBOL(hie_set_bio_crypt_context);
/**
* hie_set_dio_crypt_context - attach encrpytion info to the bio of sdio
* @inode: reference inode
* @bio: target sdio->bio
* RETURNS:
* 0, the inode has enabled encryption, and it's encryption info is
* successfully attached to the bio.
* -EINVAL, the inode has not enabled encryption, or there's no matching
* file system.
*/
int hie_set_dio_crypt_context(struct inode *inode, struct bio *bio,
loff_t fs_offset)
{
int ret = 0;
ret = hie_set_bio_crypt_context(inode, bio);
if (bio_encrypted(bio) && bio_bcf_test(bio, BC_IV_PAGE_IDX))
bio_bc_iv_set(bio, fs_offset >> PAGE_SHIFT);
return ret;
}
EXPORT_SYMBOL(hie_set_dio_crypt_context);
/**
* hie_get_iv - get initialization vector(iv.) from the request.
* The iv. is the file logical block number translated from
* (page index * page size + page offset) / physical block size.
* @req: request
*
* RETURNS:
* Zero, if the iv. was not assigned in the request,
* or the request was not crypt.
* Non-Zero, the iv. of the starting bio.
*/
u64 hie_get_iv(struct request *req)
{
u64 ino;
u64 iv;
unsigned int bz_bits;
struct bio *bio = req->bio;
if (!req->q)
return 0;
if (!hie_request_crypted(req))
return 0;
if (!bio_bcf_test(bio, BC_IV_PAGE_IDX))
return 0;
ino = bio_bc_inode(bio);
iv = bio_bc_iv_get(bio);
WARN_ON(iv == BC_INVALID_IV);
bz_bits = blksize_bits(queue_physical_block_size(req->q));
if (bz_bits < PAGE_SHIFT) {
struct bio_vec iter;
bio_get_first_bvec(bio, &iter);
iv = (iv << (PAGE_SHIFT - bz_bits)) +
(iter.bv_offset >> bz_bits);
} else
iv = iv >> (bz_bits - PAGE_SHIFT);
iv = (ino << 32 | (iv & 0xFFFFFFFF));
if (!iv)
iv = ~iv;
return iv;
}
EXPORT_SYMBOL(hie_get_iv);
#ifdef CONFIG_HIE_DEBUG
static void *hie_seq_start(struct seq_file *seq, loff_t *pos)
{
unsigned int idx;
if (*pos < 0 || *pos >= 1)
return NULL;
idx = *pos + 1;
return (void *) ((unsigned long) idx);
}
static void *hie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
unsigned int idx;
++*pos;
if (*pos < 0 || *pos >= 1)
return NULL;
idx = *pos + 1;
return (void *) ((unsigned long) idx);
}
static void hie_seq_stop(struct seq_file *seq, void *v)
{
}
static struct {
const char *name;
unsigned int flag;
} crypt_type[] = {
{"AES_128_XTS", BC_AES_128_XTS},
{"AES_192_XTS", BC_AES_192_XTS},
{"AES_256_XTS", BC_AES_256_XTS},
{"AES_128_CBC", BC_AES_128_CBC},
{"AES_256_CBC", BC_AES_256_CBC},
{"AES_128_ECB", BC_AES_128_ECB},
{"AES_256_ECB", BC_AES_256_ECB},
{"", 0}
};
static int hie_seq_status_dev(struct seq_file *seq, struct hie_dev *dev)
{
int i;
seq_printf(seq, "<%s>\n", dev->name);
seq_puts(seq, "supported modes:");
for (i = 0; crypt_type[i].flag ; i++)
if (crypt_type[i].flag & dev->mode)
seq_printf(seq, " %s", crypt_type[i].name);
seq_puts(seq, "\n");
return 0;
}
static int hie_seq_status_show(struct seq_file *seq, void *v)
{
struct hie_dev *dev, *dn;
struct hie_fs *fs, *fn;
unsigned long flags;
seq_puts(seq, "[Config]\n");
if (hie_is_nocrypt())
seq_puts(seq, "no-crypt\n");
else if (hie_is_dummy()) {
seq_puts(seq, "dummy-crpyt");
#ifdef CONFIG_HIE_DUMMY_CRYPT_KEY_SWITCH
seq_puts(seq, " (key switch)");
#endif
#ifdef CONFIG_HIE_DUMMY_CRYPT_IV
seq_puts(seq, " (iv.)");
#endif
seq_puts(seq, "\n");
} else
seq_puts(seq, "hardware-inline-crpyt\n");
seq_puts(seq, "\n[Registered file systems]\n");
spin_lock_irqsave(&hie_fs_list_lock, flags);
list_for_each_entry_safe(fs, fn, &hie_fs_list, list) {
seq_printf(seq, "%s\n", fs->name);
}
spin_unlock_irqrestore(&hie_fs_list_lock, flags);
seq_puts(seq, "\n[Registered devices]\n");
spin_lock_irqsave(&hie_dev_list_lock, flags);
list_for_each_entry_safe(dev, dn, &hie_dev_list, list) {
hie_seq_status_dev(seq, dev);
}
spin_unlock_irqrestore(&hie_dev_list_lock, flags);
return 0;
}
static const struct seq_operations hie_seq_ops = {
.start = hie_seq_start,
.next = hie_seq_next,
.stop = hie_seq_stop,
.show = hie_seq_status_show,
};
static int hie_seq_open(struct inode *inode, struct file *file)
{
int rc;
rc = seq_open(file, &hie_seq_ops);
return rc;
}
static ssize_t hie_seq_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
return count;
}
static const struct file_operations hie_status_fops = {
.owner = THIS_MODULE,
.open = hie_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.write = hie_seq_write,
};
#endif
static void hie_init_debugfs(void)
{
#ifdef CONFIG_HIE_DEBUG
if (hie_droot)
return;
hie_droot = debugfs_create_dir("hie", NULL);
if (IS_ERR(hie_droot)) {
pr_info("[HIE] fail to create debugfs root\n");
hie_droot = NULL;
return;
}
hie_dbg = 0;
hie_dbg_ino = 0;
hie_dbg_sector = 0;
hie_ddebug = debugfs_create_u32("debug", 0660, hie_droot, &hie_dbg);
debugfs_create_u64("ino", 0660, hie_droot, &hie_dbg_ino);
debugfs_create_u64("sector", 0660, hie_droot, &hie_dbg_sector);
debugfs_create_file("status", 0444, hie_droot,
(void *)0, &hie_status_fops);
#endif
}
static int __init hie_init(void)
{
hie_init_debugfs();
return 0;
}
static void __exit hie_exit(void)
{
}
module_init(hie_init);
module_exit(hie_exit);
MODULE_AUTHOR("Perry Hsu <perry.hsu@mediatek.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Hardware Inline Encryption");