Merge android-4.9 into android-msm-bluecross-4.9-lts

Merge android-4.9 common kernel into B1/C1 kernel LTS staging branch.

Since android-msm-bluecross-4.9-lts is currently merged to LTS 4.9.150,
I deliberately chose to merge only up to:

commit ca975794ea ("ANDROID: cuttlefish: enable CONFIG_NET_CLS_BPF=y")

which is the last commit on android-4.9 before LTS 4.9.151 was merged in.

drivers/android/binder.c
reviewed by tkjos@

drivers/block/zram/
resolved and reviewed by minchan@

drivers/cpufreq/cpufreq_times.c
reviewed by cfries@

drivers/staging/android/ion/
reviewed by pgynther@

fs/crypto/
resolved and reviewed by ebiggers@ and jaegeuk@

fs/f2fs/
resolved and reviewed by jaegeuk@

fs/squashfs/
resolved with 'git checkout --theirs' (take android-4.9 as-is)

kernel/sched/
resolved and reviewed by tkjos@

Bug: 115649324
Test: Manual testing
Change-Id: Ib374f720a7ab4cf4146177584e486124eff75de3
Signed-off-by: Petri Gynther <pgynther@google.com>
This commit is contained in:
Petri Gynther
2019-01-24 16:29:12 -08:00
195 changed files with 24876 additions and 5454 deletions

View File

@@ -0,0 +1,16 @@
What: /proc/uid_concurrent_active_time
Date: December 2018
Contact: Connor O'Brien <connoro@google.com>
Description:
The /proc/uid_concurrent_active_time file displays aggregated cputime
numbers for each uid, broken down by the total number of cores that were
active while the uid's task was running.
What: /proc/uid_concurrent_policy_time
Date: December 2018
Contact: Connor O'Brien <connoro@google.com>
Description:
The /proc/uid_concurrent_policy_time file displays aggregated cputime
numbers for each uid, broken down based on the cpufreq policy
of the core used by the uid's task and the number of cores associated
with that policy that were active while the uid's task was running.

View File

@@ -76,6 +76,20 @@ submit_from_crypt_cpus
thread because it benefits CFQ to have writes submitted using the
same context.
sector_size:<bytes>
Use <bytes> as the encryption unit instead of 512 bytes sectors.
This option can be in range 512 - 4096 bytes and must be power of two.
Virtual device will announce this size as a minimal IO and logical sector.
iv_large_sectors
IV generators will use sector number counted in <sector_size> units
instead of default 512 bytes sectors.
For example, if <sector_size> is 4096 bytes, plain64 IV for the second
sector will be 8 (without flag) and 1 if iv_large_sectors is present.
The <iv_offset> must be multiple of <sector_size> (in 512 bytes units)
if this flag is specified.
Example scripts
===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk

View File

@@ -3988,6 +3988,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
last alloc / free. For more information see
Documentation/vm/slub.txt.
slub_memcg_sysfs= [MM, SLUB]
Determines whether to enable sysfs directories for
memory cgroup sub-caches. 1 to enable, 0 to disable.
The default is determined by CONFIG_SLUB_MEMCG_SYSFS_ON.
Enabling this can lead to a very high number of debug
directories and files being created under
/sys/kernel/slub.
slub_max_order= [MM, SLUB]
Determines the maximum allowed order for slabs.
A high setting may cause OOMs due to memory

View File

@@ -520,6 +520,9 @@ ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_TRIPLE ?= $(CROSS_COMPILE)
CLANG_TARGET := --target=$(notdir $(CLANG_TRIPLE:%-=%))
ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_TARGET)), y)
$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
endif
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)

View File

@@ -530,6 +530,7 @@ config LTO_CLANG
bool "Use clang Link Time Optimization (LTO) (EXPERIMENTAL)"
depends on ARCH_SUPPORTS_LTO_CLANG
depends on !FTRACE_MCOUNT_RECORD || HAVE_C_RECORDMCOUNT
depends on !KASAN
select LTO
select THIN_ARCHIVES
select LD_DEAD_CODE_DATA_ELIMINATION

View File

@@ -62,34 +62,25 @@ config CRYPTO_SHA512_ARM
using optimized ARM assembler and NEON, when available.
config CRYPTO_AES_ARM
tristate "AES cipher algorithms (ARM-asm)"
depends on ARM
tristate "Scalar AES cipher for ARM"
select CRYPTO_ALGAPI
select CRYPTO_AES
help
Use optimized AES assembler routines for ARM platforms.
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
On ARM processors without the Crypto Extensions, this is the
fastest AES implementation for single blocks. For multiple
blocks, the NEON bit-sliced implementation is usually faster.
Rijndael appears to be consistently a very good performer in
both hardware and software across a wide range of computing
environments regardless of its use in feedback or non-feedback
modes. Its key setup time is excellent, and its key agility is
good. Rijndael's very low memory requirements make it very well
suited for restricted-space environments, in which it also
demonstrates excellent performance. Rijndael's operations are
among the easiest to defend against power and timing attacks.
The AES specifies three key sizes: 128, 192 and 256 bits
See <http://csrc.nist.gov/encryption/aes/> for more information.
This implementation may be vulnerable to cache timing attacks,
since it uses lookup tables. However, as countermeasures it
disables IRQs and preloads the tables; it is hoped this makes
such attacks very difficult.
config CRYPTO_AES_ARM_BS
tristate "Bit sliced AES using NEON instructions"
depends on KERNEL_MODE_NEON
select CRYPTO_ALGAPI
select CRYPTO_AES_ARM
select CRYPTO_ABLK_HELPER
help
Use a faster and more secure NEON based implementation of AES in CBC,
@@ -120,11 +111,15 @@ config CRYPTO_GHASH_ARM_CE
that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
that is part of the ARMv8 Crypto Extensions
config CRYPTO_SPECK_NEON
tristate "NEON accelerated Speck cipher algorithms"
config CRYPTO_CHACHA20_NEON
tristate "NEON accelerated ChaCha stream cipher algorithms"
depends on KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_GF128MUL
select CRYPTO_SPECK
select CRYPTO_CHACHA20
config CRYPTO_NHPOLY1305_NEON
tristate "NEON accelerated NHPoly1305 hash function (for Adiantum)"
depends on KERNEL_MODE_NEON
select CRYPTO_NHPOLY1305
endif

View File

@@ -8,7 +8,8 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
@@ -25,8 +26,8 @@ $(warning $(ce-obj-y) $(ce-obj-m))
endif
endif
aes-arm-y := aes-armv4.o aes_glue.o
aes-arm-bs-y := aesbs-core.o aesbs-glue.o
aes-arm-y := aes-cipher-core.o aes-cipher-glue.o
aes-arm-bs-y := aes-armv4.o aesbs-core.o aesbs-glue.o
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
@@ -37,7 +38,8 @@ sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
speck-neon-y := speck-neon-core.o speck-neon-glue.o
chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)

View File

@@ -0,0 +1,264 @@
/*
* Scalar AES core transform
*
* Copyright (C) 2017 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cache.h>
.text
.align 5
rk .req r0
rounds .req r1
in .req r2
out .req r3
ttab .req ip
t0 .req lr
t1 .req r2
t2 .req r3
.macro __select, out, in, idx
.if __LINUX_ARM_ARCH__ < 7
and \out, \in, #0xff << (8 * \idx)
.else
ubfx \out, \in, #(8 * \idx), #8
.endif
.endm
.macro __load, out, in, idx, sz, op
.if __LINUX_ARM_ARCH__ < 7 && \idx > 0
ldr\op \out, [ttab, \in, lsr #(8 * \idx) - \sz]
.else
ldr\op \out, [ttab, \in, lsl #\sz]
.endif
.endm
.macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc, sz, op, oldcpsr
__select \out0, \in0, 0
__select t0, \in1, 1
__load \out0, \out0, 0, \sz, \op
__load t0, t0, 1, \sz, \op
.if \enc
__select \out1, \in1, 0
__select t1, \in2, 1
.else
__select \out1, \in3, 0
__select t1, \in0, 1
.endif
__load \out1, \out1, 0, \sz, \op
__select t2, \in2, 2
__load t1, t1, 1, \sz, \op
__load t2, t2, 2, \sz, \op
eor \out0, \out0, t0, ror #24
__select t0, \in3, 3
.if \enc
__select \t3, \in3, 2
__select \t4, \in0, 3
.else
__select \t3, \in1, 2
__select \t4, \in2, 3
.endif
__load \t3, \t3, 2, \sz, \op
__load t0, t0, 3, \sz, \op
__load \t4, \t4, 3, \sz, \op
.ifnb \oldcpsr
/*
* This is the final round and we're done with all data-dependent table
* lookups, so we can safely re-enable interrupts.
*/
restore_irqs \oldcpsr
.endif
eor \out1, \out1, t1, ror #24
eor \out0, \out0, t2, ror #16
ldm rk!, {t1, t2}
eor \out1, \out1, \t3, ror #16
eor \out0, \out0, t0, ror #8
eor \out1, \out1, \t4, ror #8
eor \out0, \out0, t1
eor \out1, \out1, t2
.endm
.macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op, oldcpsr
__hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op
__hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op, \oldcpsr
.endm
.macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op, oldcpsr
__hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op
__hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr
.endm
.macro __rev, out, in
.if __LINUX_ARM_ARCH__ < 6
lsl t0, \in, #24
and t1, \in, #0xff00
and t2, \in, #0xff0000
orr \out, t0, \in, lsr #24
orr \out, \out, t1, lsl #8
orr \out, \out, t2, lsr #8
.else
rev \out, \in
.endif
.endm
.macro __adrl, out, sym, c
.if __LINUX_ARM_ARCH__ < 7
ldr\c \out, =\sym
.else
movw\c \out, #:lower16:\sym
movt\c \out, #:upper16:\sym
.endif
.endm
.macro do_crypt, round, ttab, ltab, bsz
push {r3-r11, lr}
// Load keys first, to reduce latency in case they're not cached yet.
ldm rk!, {r8-r11}
ldr r4, [in]
ldr r5, [in, #4]
ldr r6, [in, #8]
ldr r7, [in, #12]
#ifdef CONFIG_CPU_BIG_ENDIAN
__rev r4, r4
__rev r5, r5
__rev r6, r6
__rev r7, r7
#endif
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
eor r7, r7, r11
__adrl ttab, \ttab
/*
* Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into
* L1 cache, assuming cacheline size >= 32. This is a hardening measure
* intended to make cache-timing attacks more difficult. They may not
* be fully prevented, however; see the paper
* https://cr.yp.to/antiforgery/cachetiming-20050414.pdf
* ("Cache-timing attacks on AES") for a discussion of the many
* difficulties involved in writing truly constant-time AES software.
*/
save_and_disable_irqs t0
.set i, 0
.rept 1024 / 128
ldr r8, [ttab, #i + 0]
ldr r9, [ttab, #i + 32]
ldr r10, [ttab, #i + 64]
ldr r11, [ttab, #i + 96]
.set i, i + 128
.endr
push {t0} // oldcpsr
tst rounds, #2
bne 1f
0: \round r8, r9, r10, r11, r4, r5, r6, r7
\round r4, r5, r6, r7, r8, r9, r10, r11
1: subs rounds, rounds, #4
\round r8, r9, r10, r11, r4, r5, r6, r7
bls 2f
\round r4, r5, r6, r7, r8, r9, r10, r11
b 0b
2: .ifb \ltab
add ttab, ttab, #1
.else
__adrl ttab, \ltab
// Prefetch inverse S-box for final round; see explanation above
.set i, 0
.rept 256 / 64
ldr t0, [ttab, #i + 0]
ldr t1, [ttab, #i + 32]
.set i, i + 64
.endr
.endif
pop {rounds} // oldcpsr
\round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds
#ifdef CONFIG_CPU_BIG_ENDIAN
__rev r4, r4
__rev r5, r5
__rev r6, r6
__rev r7, r7
#endif
ldr out, [sp]
str r4, [out]
str r5, [out, #4]
str r6, [out, #8]
str r7, [out, #12]
pop {r3-r11, pc}
.align 3
.ltorg
.endm
ENTRY(__aes_arm_encrypt)
do_crypt fround, crypto_ft_tab,, 2
ENDPROC(__aes_arm_encrypt)
.align 5
ENTRY(__aes_arm_decrypt)
do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0
ENDPROC(__aes_arm_decrypt)
.section ".rodata", "a"
.align L1_CACHE_SHIFT
.type __aes_arm_inverse_sbox, %object
__aes_arm_inverse_sbox:
.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
.size __aes_arm_inverse_sbox, . - __aes_arm_inverse_sbox

View File

@@ -0,0 +1,74 @@
/*
* Scalar AES core transform
*
* Copyright (C) 2017 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <crypto/aes.h>
#include <linux/crypto.h>
#include <linux/module.h>
asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
EXPORT_SYMBOL(__aes_arm_encrypt);
asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
EXPORT_SYMBOL(__aes_arm_decrypt);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int rounds = 6 + ctx->key_length / 4;
__aes_arm_encrypt(ctx->key_enc, rounds, in, out);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int rounds = 6 + ctx->key_length / 4;
__aes_arm_decrypt(ctx->key_dec, rounds, in, out);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-arm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE,
.cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE,
.cra_cipher.cia_setkey = crypto_aes_set_key,
.cra_cipher.cia_encrypt = aes_encrypt,
.cra_cipher.cia_decrypt = aes_decrypt,
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
.cra_alignmask = 3,
#endif
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Scalar AES cipher for ARM");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("aes");

View File

@@ -1,98 +0,0 @@
/*
* Glue Code for the asm optimized version of the AES Cipher Algorithm
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <crypto/aes.h>
#include "aes_glue.h"
EXPORT_SYMBOL(AES_encrypt);
EXPORT_SYMBOL(AES_decrypt);
EXPORT_SYMBOL(private_AES_set_encrypt_key);
EXPORT_SYMBOL(private_AES_set_decrypt_key);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
AES_encrypt(src, dst, &ctx->enc_key);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
AES_decrypt(src, dst, &ctx->dec_key);
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
key_len = 128;
break;
case AES_KEYSIZE_192:
key_len = 192;
break;
case AES_KEYSIZE_256:
key_len = 256;
break;
default:
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
if (private_AES_set_encrypt_key(in_key, key_len, &ctx->enc_key) == -1) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* private_AES_set_decrypt_key expects an encryption key as input */
ctx->dec_key = ctx->enc_key;
if (private_AES_set_decrypt_key(in_key, key_len, &ctx->dec_key) == -1) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
return 0;
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct AES_CTX),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
}
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("aes");
MODULE_ALIAS_CRYPTO("aes-asm");
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");

View File

@@ -0,0 +1,560 @@
/*
* ChaCha/XChaCha NEON helper functions
*
* Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Based on:
* ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSE3 functions
*
* Copyright (C) 2015 Martin Willi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/*
* NEON doesn't have a rotate instruction. The alternatives are, more or less:
*
* (a) vshl.u32 + vsri.u32 (needs temporary register)
* (b) vshl.u32 + vshr.u32 + vorr (needs temporary register)
* (c) vrev32.16 (16-bit rotations only)
* (d) vtbl.8 + vtbl.8 (multiple of 8 bits rotations only,
* needs index vector)
*
* ChaCha has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit rotations,
* the only choices are (a) and (b). We use (a) since it takes two-thirds the
* cycles of (b) on both Cortex-A7 and Cortex-A53.
*
* For the 16-bit rotation, we use vrev32.16 since it's consistently fastest
* and doesn't need a temporary register.
*
* For the 8-bit rotation, we use vtbl.8 + vtbl.8. On Cortex-A7, this sequence
* is twice as fast as (a), even when doing (a) on multiple registers
* simultaneously to eliminate the stall between vshl and vsri. Also, it
* parallelizes better when temporary registers are scarce.
*
* A disadvantage is that on Cortex-A53, the vtbl sequence is the same speed as
* (a), so the need to load the rotation table actually makes the vtbl method
* slightly slower overall on that CPU (~1.3% slower ChaCha20). Still, it
* seems to be a good compromise to get a more significant speed boost on some
* CPUs, e.g. ~4.8% faster ChaCha20 on Cortex-A7.
*/
#include <linux/linkage.h>
.text
.fpu neon
.align 5
/*
* chacha_permute - permute one block
*
* Permute one 64-byte block where the state matrix is stored in the four NEON
* registers q0-q3. It performs matrix operations on four words in parallel,
* but requires shuffling to rearrange the words after each round.
*
* The round count is given in r3.
*
* Clobbers: r3, ip, q4-q5
*/
chacha_permute:
adr ip, .Lrol8_table
vld1.8 {d10}, [ip, :64]
.Ldoubleround:
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
veor q3, q3, q0
vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #12
vsri.u32 q1, q4, #20
// x0 += x1, x3 = rotl32(x3 ^ x0, 8)
vadd.i32 q0, q0, q1
veor q3, q3, q0
vtbl.8 d6, {d6}, d10
vtbl.8 d7, {d7}, d10
// x2 += x3, x1 = rotl32(x1 ^ x2, 7)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #7
vsri.u32 q1, q4, #25
// x1 = shuffle32(x1, MASK(0, 3, 2, 1))
vext.8 q1, q1, q1, #4
// x2 = shuffle32(x2, MASK(1, 0, 3, 2))
vext.8 q2, q2, q2, #8
// x3 = shuffle32(x3, MASK(2, 1, 0, 3))
vext.8 q3, q3, q3, #12
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
veor q3, q3, q0
vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #12
vsri.u32 q1, q4, #20
// x0 += x1, x3 = rotl32(x3 ^ x0, 8)
vadd.i32 q0, q0, q1
veor q3, q3, q0
vtbl.8 d6, {d6}, d10
vtbl.8 d7, {d7}, d10
// x2 += x3, x1 = rotl32(x1 ^ x2, 7)
vadd.i32 q2, q2, q3
veor q4, q1, q2
vshl.u32 q1, q4, #7
vsri.u32 q1, q4, #25
// x1 = shuffle32(x1, MASK(2, 1, 0, 3))
vext.8 q1, q1, q1, #12
// x2 = shuffle32(x2, MASK(1, 0, 3, 2))
vext.8 q2, q2, q2, #8
// x3 = shuffle32(x3, MASK(0, 3, 2, 1))
vext.8 q3, q3, q3, #4
subs r3, r3, #2
bne .Ldoubleround
bx lr
ENDPROC(chacha_permute)
ENTRY(chacha_block_xor_neon)
// r0: Input state matrix, s
// r1: 1 data block output, o
// r2: 1 data block input, i
// r3: nrounds
push {lr}
// x0..3 = s0..3
add ip, r0, #0x20
vld1.32 {q0-q1}, [r0]
vld1.32 {q2-q3}, [ip]
vmov q8, q0
vmov q9, q1
vmov q10, q2
vmov q11, q3
bl chacha_permute
add ip, r2, #0x20
vld1.8 {q4-q5}, [r2]
vld1.8 {q6-q7}, [ip]
// o0 = i0 ^ (x0 + s0)
vadd.i32 q0, q0, q8
veor q0, q0, q4
// o1 = i1 ^ (x1 + s1)
vadd.i32 q1, q1, q9
veor q1, q1, q5
// o2 = i2 ^ (x2 + s2)
vadd.i32 q2, q2, q10
veor q2, q2, q6
// o3 = i3 ^ (x3 + s3)
vadd.i32 q3, q3, q11
veor q3, q3, q7
add ip, r1, #0x20
vst1.8 {q0-q1}, [r1]
vst1.8 {q2-q3}, [ip]
pop {pc}
ENDPROC(chacha_block_xor_neon)
ENTRY(hchacha_block_neon)
// r0: Input state matrix, s
// r1: output (8 32-bit words)
// r2: nrounds
push {lr}
vld1.32 {q0-q1}, [r0]!
vld1.32 {q2-q3}, [r0]
mov r3, r2
bl chacha_permute
vst1.32 {q0}, [r1]!
vst1.32 {q3}, [r1]
pop {pc}
ENDPROC(hchacha_block_neon)
.align 4
.Lctrinc: .word 0, 1, 2, 3
.Lrol8_table: .byte 3, 0, 1, 2, 7, 4, 5, 6
.align 5
ENTRY(chacha_4block_xor_neon)
push {r4-r5}
mov r4, sp // preserve the stack pointer
sub ip, sp, #0x20 // allocate a 32 byte buffer
bic ip, ip, #0x1f // aligned to 32 bytes
mov sp, ip
// r0: Input state matrix, s
// r1: 4 data blocks output, o
// r2: 4 data blocks input, i
// r3: nrounds
//
// This function encrypts four consecutive ChaCha blocks by loading
// the state matrix in NEON registers four times. The algorithm performs
// each operation on the corresponding word of each state matrix, hence
// requires no word shuffling. The words are re-interleaved before the
// final addition of the original state and the XORing step.
//
// x0..15[0-3] = s0..15[0-3]
add ip, r0, #0x20
vld1.32 {q0-q1}, [r0]
vld1.32 {q2-q3}, [ip]
adr r5, .Lctrinc
vdup.32 q15, d7[1]
vdup.32 q14, d7[0]
vld1.32 {q4}, [r5, :128]
vdup.32 q13, d6[1]
vdup.32 q12, d6[0]
vdup.32 q11, d5[1]
vdup.32 q10, d5[0]
vadd.u32 q12, q12, q4 // x12 += counter values 0-3
vdup.32 q9, d4[1]
vdup.32 q8, d4[0]
vdup.32 q7, d3[1]
vdup.32 q6, d3[0]
vdup.32 q5, d2[1]
vdup.32 q4, d2[0]
vdup.32 q3, d1[1]
vdup.32 q2, d1[0]
vdup.32 q1, d0[1]
vdup.32 q0, d0[0]
adr ip, .Lrol8_table
b 1f
.Ldoubleround4:
vld1.32 {q8-q9}, [sp, :256]
1:
// x0 += x4, x12 = rotl32(x12 ^ x0, 16)
// x1 += x5, x13 = rotl32(x13 ^ x1, 16)
// x2 += x6, x14 = rotl32(x14 ^ x2, 16)
// x3 += x7, x15 = rotl32(x15 ^ x3, 16)
vadd.i32 q0, q0, q4
vadd.i32 q1, q1, q5
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
veor q12, q12, q0
veor q13, q13, q1
veor q14, q14, q2
veor q15, q15, q3
vrev32.16 q12, q12
vrev32.16 q13, q13
vrev32.16 q14, q14
vrev32.16 q15, q15
// x8 += x12, x4 = rotl32(x4 ^ x8, 12)
// x9 += x13, x5 = rotl32(x5 ^ x9, 12)
// x10 += x14, x6 = rotl32(x6 ^ x10, 12)
// x11 += x15, x7 = rotl32(x7 ^ x11, 12)
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vadd.i32 q10, q10, q14
vadd.i32 q11, q11, q15
vst1.32 {q8-q9}, [sp, :256]
veor q8, q4, q8
veor q9, q5, q9
vshl.u32 q4, q8, #12
vshl.u32 q5, q9, #12
vsri.u32 q4, q8, #20
vsri.u32 q5, q9, #20
veor q8, q6, q10
veor q9, q7, q11
vshl.u32 q6, q8, #12
vshl.u32 q7, q9, #12
vsri.u32 q6, q8, #20
vsri.u32 q7, q9, #20
// x0 += x4, x12 = rotl32(x12 ^ x0, 8)
// x1 += x5, x13 = rotl32(x13 ^ x1, 8)
// x2 += x6, x14 = rotl32(x14 ^ x2, 8)
// x3 += x7, x15 = rotl32(x15 ^ x3, 8)
vld1.8 {d16}, [ip, :64]
vadd.i32 q0, q0, q4
vadd.i32 q1, q1, q5
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
veor q12, q12, q0
veor q13, q13, q1
veor q14, q14, q2
veor q15, q15, q3
vtbl.8 d24, {d24}, d16
vtbl.8 d25, {d25}, d16
vtbl.8 d26, {d26}, d16
vtbl.8 d27, {d27}, d16
vtbl.8 d28, {d28}, d16
vtbl.8 d29, {d29}, d16
vtbl.8 d30, {d30}, d16
vtbl.8 d31, {d31}, d16
vld1.32 {q8-q9}, [sp, :256]
// x8 += x12, x4 = rotl32(x4 ^ x8, 7)
// x9 += x13, x5 = rotl32(x5 ^ x9, 7)
// x10 += x14, x6 = rotl32(x6 ^ x10, 7)
// x11 += x15, x7 = rotl32(x7 ^ x11, 7)
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vadd.i32 q10, q10, q14
vadd.i32 q11, q11, q15
vst1.32 {q8-q9}, [sp, :256]
veor q8, q4, q8
veor q9, q5, q9
vshl.u32 q4, q8, #7
vshl.u32 q5, q9, #7
vsri.u32 q4, q8, #25
vsri.u32 q5, q9, #25
veor q8, q6, q10
veor q9, q7, q11
vshl.u32 q6, q8, #7
vshl.u32 q7, q9, #7
vsri.u32 q6, q8, #25
vsri.u32 q7, q9, #25
vld1.32 {q8-q9}, [sp, :256]
// x0 += x5, x15 = rotl32(x15 ^ x0, 16)
// x1 += x6, x12 = rotl32(x12 ^ x1, 16)
// x2 += x7, x13 = rotl32(x13 ^ x2, 16)
// x3 += x4, x14 = rotl32(x14 ^ x3, 16)
vadd.i32 q0, q0, q5
vadd.i32 q1, q1, q6
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q4
veor q15, q15, q0
veor q12, q12, q1
veor q13, q13, q2
veor q14, q14, q3
vrev32.16 q15, q15
vrev32.16 q12, q12
vrev32.16 q13, q13
vrev32.16 q14, q14
// x10 += x15, x5 = rotl32(x5 ^ x10, 12)
// x11 += x12, x6 = rotl32(x6 ^ x11, 12)
// x8 += x13, x7 = rotl32(x7 ^ x8, 12)
// x9 += x14, x4 = rotl32(x4 ^ x9, 12)
vadd.i32 q10, q10, q15
vadd.i32 q11, q11, q12
vadd.i32 q8, q8, q13
vadd.i32 q9, q9, q14
vst1.32 {q8-q9}, [sp, :256]
veor q8, q7, q8
veor q9, q4, q9
vshl.u32 q7, q8, #12
vshl.u32 q4, q9, #12
vsri.u32 q7, q8, #20
vsri.u32 q4, q9, #20
veor q8, q5, q10
veor q9, q6, q11
vshl.u32 q5, q8, #12
vshl.u32 q6, q9, #12
vsri.u32 q5, q8, #20
vsri.u32 q6, q9, #20
// x0 += x5, x15 = rotl32(x15 ^ x0, 8)
// x1 += x6, x12 = rotl32(x12 ^ x1, 8)
// x2 += x7, x13 = rotl32(x13 ^ x2, 8)
// x3 += x4, x14 = rotl32(x14 ^ x3, 8)
vld1.8 {d16}, [ip, :64]
vadd.i32 q0, q0, q5
vadd.i32 q1, q1, q6
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q4
veor q15, q15, q0
veor q12, q12, q1
veor q13, q13, q2
veor q14, q14, q3
vtbl.8 d30, {d30}, d16
vtbl.8 d31, {d31}, d16
vtbl.8 d24, {d24}, d16
vtbl.8 d25, {d25}, d16
vtbl.8 d26, {d26}, d16
vtbl.8 d27, {d27}, d16
vtbl.8 d28, {d28}, d16
vtbl.8 d29, {d29}, d16
vld1.32 {q8-q9}, [sp, :256]
// x10 += x15, x5 = rotl32(x5 ^ x10, 7)
// x11 += x12, x6 = rotl32(x6 ^ x11, 7)
// x8 += x13, x7 = rotl32(x7 ^ x8, 7)
// x9 += x14, x4 = rotl32(x4 ^ x9, 7)
vadd.i32 q10, q10, q15
vadd.i32 q11, q11, q12
vadd.i32 q8, q8, q13
vadd.i32 q9, q9, q14
vst1.32 {q8-q9}, [sp, :256]
veor q8, q7, q8
veor q9, q4, q9
vshl.u32 q7, q8, #7
vshl.u32 q4, q9, #7
vsri.u32 q7, q8, #25
vsri.u32 q4, q9, #25
veor q8, q5, q10
veor q9, q6, q11
vshl.u32 q5, q8, #7
vshl.u32 q6, q9, #7
vsri.u32 q5, q8, #25
vsri.u32 q6, q9, #25
subs r3, r3, #2
bne .Ldoubleround4
// x0..7[0-3] are in q0-q7, x10..15[0-3] are in q10-q15.
// x8..9[0-3] are on the stack.
// Re-interleave the words in the first two rows of each block (x0..7).
// Also add the counter values 0-3 to x12[0-3].
vld1.32 {q8}, [r5, :128] // load counter values 0-3
vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1)
vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3)
vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5)
vzip.32 q6, q7 // => (6 7 6 7) (6 7 6 7)
vadd.u32 q12, q8 // x12 += counter values 0-3
vswp d1, d4
vswp d3, d6
vld1.32 {q8-q9}, [r0]! // load s0..7
vswp d9, d12
vswp d11, d14
// Swap q1 and q4 so that we'll free up consecutive registers (q0-q1)
// after XORing the first 32 bytes.
vswp q1, q4
// First two rows of each block are (q0 q1) (q2 q6) (q4 q5) (q3 q7)
// x0..3[0-3] += s0..3[0-3] (add orig state to 1st row of each block)
vadd.u32 q0, q0, q8
vadd.u32 q2, q2, q8
vadd.u32 q4, q4, q8
vadd.u32 q3, q3, q8
// x4..7[0-3] += s4..7[0-3] (add orig state to 2nd row of each block)
vadd.u32 q1, q1, q9
vadd.u32 q6, q6, q9
vadd.u32 q5, q5, q9
vadd.u32 q7, q7, q9
// XOR first 32 bytes using keystream from first two rows of first block
vld1.8 {q8-q9}, [r2]!
veor q8, q8, q0
veor q9, q9, q1
vst1.8 {q8-q9}, [r1]!
// Re-interleave the words in the last two rows of each block (x8..15).
vld1.32 {q8-q9}, [sp, :256]
vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13)
vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15)
vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9)
vzip.32 q10, q11 // => (10 11 10 11) (10 11 10 11)
vld1.32 {q0-q1}, [r0] // load s8..15
vswp d25, d28
vswp d27, d30
vswp d17, d20
vswp d19, d22
// Last two rows of each block are (q8 q12) (q10 q14) (q9 q13) (q11 q15)
// x8..11[0-3] += s8..11[0-3] (add orig state to 3rd row of each block)
vadd.u32 q8, q8, q0
vadd.u32 q10, q10, q0
vadd.u32 q9, q9, q0
vadd.u32 q11, q11, q0
// x12..15[0-3] += s12..15[0-3] (add orig state to 4th row of each block)
vadd.u32 q12, q12, q1
vadd.u32 q14, q14, q1
vadd.u32 q13, q13, q1
vadd.u32 q15, q15, q1
// XOR the rest of the data with the keystream
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q8
veor q1, q1, q12
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q2
veor q1, q1, q6
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q10
veor q1, q1, q14
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q4
veor q1, q1, q5
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q9
veor q1, q1, q13
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q3
veor q1, q1, q7
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]
mov sp, r4 // restore original stack pointer
veor q0, q0, q11
veor q1, q1, q15
vst1.8 {q0-q1}, [r1]
pop {r4-r5}
bx lr
ENDPROC(chacha_4block_xor_neon)

View File

@@ -0,0 +1,226 @@
/*
* ARM NEON accelerated ChaCha and XChaCha stream ciphers,
* including ChaCha20 (RFC7539)
*
* Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Based on:
* ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
*
* Copyright (C) 2015 Martin Willi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <crypto/algapi.h>
#include <crypto/chacha.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
int nrounds);
asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
int nrounds);
asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
u8 buf[CHACHA_BLOCK_SIZE];
while (bytes >= CHACHA_BLOCK_SIZE * 4) {
chacha_4block_xor_neon(state, dst, src, nrounds);
bytes -= CHACHA_BLOCK_SIZE * 4;
src += CHACHA_BLOCK_SIZE * 4;
dst += CHACHA_BLOCK_SIZE * 4;
state[12] += 4;
}
while (bytes >= CHACHA_BLOCK_SIZE) {
chacha_block_xor_neon(state, dst, src, nrounds);
bytes -= CHACHA_BLOCK_SIZE;
src += CHACHA_BLOCK_SIZE;
dst += CHACHA_BLOCK_SIZE;
state[12]++;
}
if (bytes) {
memcpy(buf, src, bytes);
chacha_block_xor_neon(state, buf, buf, nrounds);
memcpy(dst, buf, bytes);
}
}
static int chacha_neon_stream_xor(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes,
struct chacha_ctx *ctx, u8 *iv)
{
struct blkcipher_walk walk;
u32 state[16];
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, CHACHA_BLOCK_SIZE);
crypto_chacha_init(state, ctx, iv);
while (walk.nbytes >= CHACHA_BLOCK_SIZE) {
kernel_neon_begin();
chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
rounddown(walk.nbytes, CHACHA_BLOCK_SIZE),
ctx->nrounds);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % CHACHA_BLOCK_SIZE);
}
if (walk.nbytes) {
kernel_neon_begin();
chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, ctx->nrounds);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
static int chacha_neon(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct chacha_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *iv = desc->info;
if (nbytes <= CHACHA_BLOCK_SIZE || !may_use_simd())
return crypto_chacha_crypt(desc, dst, src, nbytes);
return chacha_neon_stream_xor(desc, dst, src, nbytes, ctx, iv);
}
static int xchacha_neon(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct chacha_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *iv = desc->info;
struct chacha_ctx subctx;
u32 state[16];
u8 real_iv[16];
if (nbytes <= CHACHA_BLOCK_SIZE || !may_use_simd())
return crypto_xchacha_crypt(desc, dst, src, nbytes);
crypto_chacha_init(state, ctx, iv);
kernel_neon_begin();
hchacha_block_neon(state, subctx.key, ctx->nrounds);
kernel_neon_end();
subctx.nrounds = ctx->nrounds;
memcpy(&real_iv[0], iv + 24, 8);
memcpy(&real_iv[8], iv + 16, 8);
return chacha_neon_stream_xor(desc, dst, src, nbytes, &subctx, real_iv);
}
static struct crypto_alg algs[] = {
{
.cra_name = "chacha20",
.cra_driver_name = "chacha20-neon",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct chacha_ctx),
.cra_alignmask = sizeof(u32) - 1,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.geniv = "seqiv",
.setkey = crypto_chacha20_setkey,
.encrypt = chacha_neon,
.decrypt = chacha_neon,
},
},
}, {
.cra_name = "xchacha20",
.cra_driver_name = "xchacha20-neon",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct chacha_ctx),
.cra_alignmask = sizeof(u32) - 1,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.geniv = "seqiv",
.setkey = crypto_chacha20_setkey,
.encrypt = xchacha_neon,
.decrypt = xchacha_neon,
},
},
}, {
.cra_name = "xchacha12",
.cra_driver_name = "xchacha12-neon",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct chacha_ctx),
.cra_alignmask = sizeof(u32) - 1,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.geniv = "seqiv",
.setkey = crypto_chacha12_setkey,
.encrypt = xchacha_neon,
.decrypt = xchacha_neon,
},
},
},
};
static int __init chacha_simd_mod_init(void)
{
if (!(elf_hwcap & HWCAP_NEON))
return -ENODEV;
return crypto_register_algs(algs, ARRAY_SIZE(algs));
}
static void __exit chacha_simd_mod_fini(void)
{
crypto_unregister_algs(algs, ARRAY_SIZE(algs));
}
module_init(chacha_simd_mod_init);
module_exit(chacha_simd_mod_fini);
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("chacha20");
MODULE_ALIAS_CRYPTO("chacha20-neon");
MODULE_ALIAS_CRYPTO("xchacha20");
MODULE_ALIAS_CRYPTO("xchacha20-neon");
MODULE_ALIAS_CRYPTO("xchacha12");
MODULE_ALIAS_CRYPTO("xchacha12-neon");

View File

@@ -0,0 +1,116 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* NH - ε-almost-universal hash function, NEON accelerated version
*
* Copyright 2018 Google LLC
*
* Author: Eric Biggers <ebiggers@google.com>
*/
#include <linux/linkage.h>
.text
.fpu neon
KEY .req r0
MESSAGE .req r1
MESSAGE_LEN .req r2
HASH .req r3
PASS0_SUMS .req q0
PASS0_SUM_A .req d0
PASS0_SUM_B .req d1
PASS1_SUMS .req q1
PASS1_SUM_A .req d2
PASS1_SUM_B .req d3
PASS2_SUMS .req q2
PASS2_SUM_A .req d4
PASS2_SUM_B .req d5
PASS3_SUMS .req q3
PASS3_SUM_A .req d6
PASS3_SUM_B .req d7
K0 .req q4
K1 .req q5
K2 .req q6
K3 .req q7
T0 .req q8
T0_L .req d16
T0_H .req d17
T1 .req q9
T1_L .req d18
T1_H .req d19
T2 .req q10
T2_L .req d20
T2_H .req d21
T3 .req q11
T3_L .req d22
T3_H .req d23
.macro _nh_stride k0, k1, k2, k3
// Load next message stride
vld1.8 {T3}, [MESSAGE]!
// Load next key stride
vld1.32 {\k3}, [KEY]!
// Add message words to key words
vadd.u32 T0, T3, \k0
vadd.u32 T1, T3, \k1
vadd.u32 T2, T3, \k2
vadd.u32 T3, T3, \k3
// Multiply 32x32 => 64 and accumulate
vmlal.u32 PASS0_SUMS, T0_L, T0_H
vmlal.u32 PASS1_SUMS, T1_L, T1_H
vmlal.u32 PASS2_SUMS, T2_L, T2_H
vmlal.u32 PASS3_SUMS, T3_L, T3_H
.endm
/*
* void nh_neon(const u32 *key, const u8 *message, size_t message_len,
* u8 hash[NH_HASH_BYTES])
*
* It's guaranteed that message_len % 16 == 0.
*/
ENTRY(nh_neon)
vld1.32 {K0,K1}, [KEY]!
vmov.u64 PASS0_SUMS, #0
vmov.u64 PASS1_SUMS, #0
vld1.32 {K2}, [KEY]!
vmov.u64 PASS2_SUMS, #0
vmov.u64 PASS3_SUMS, #0
subs MESSAGE_LEN, MESSAGE_LEN, #64
blt .Lloop4_done
.Lloop4:
_nh_stride K0, K1, K2, K3
_nh_stride K1, K2, K3, K0
_nh_stride K2, K3, K0, K1
_nh_stride K3, K0, K1, K2
subs MESSAGE_LEN, MESSAGE_LEN, #64
bge .Lloop4
.Lloop4_done:
ands MESSAGE_LEN, MESSAGE_LEN, #63
beq .Ldone
_nh_stride K0, K1, K2, K3
subs MESSAGE_LEN, MESSAGE_LEN, #16
beq .Ldone
_nh_stride K1, K2, K3, K0
subs MESSAGE_LEN, MESSAGE_LEN, #16
beq .Ldone
_nh_stride K2, K3, K0, K1
.Ldone:
// Sum the accumulators for each pass, then store the sums to 'hash'
vadd.u64 T0_L, PASS0_SUM_A, PASS0_SUM_B
vadd.u64 T0_H, PASS1_SUM_A, PASS1_SUM_B
vadd.u64 T1_L, PASS2_SUM_A, PASS2_SUM_B
vadd.u64 T1_H, PASS3_SUM_A, PASS3_SUM_B
vst1.8 {T0-T1}, [HASH]
bx lr
ENDPROC(nh_neon)

View File

@@ -0,0 +1,77 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost--universal hash function for Adiantum
* (NEON accelerated version)
*
* Copyright 2018 Google LLC
*/
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len,
u8 hash[NH_HASH_BYTES]);
/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */
static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES])
{
nh_neon(key, message, message_len, (u8 *)hash);
}
static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
if (srclen < 64 || !may_use_simd())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
kernel_neon_begin();
crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
kernel_neon_end();
src += n;
srclen -= n;
} while (srclen);
return 0;
}
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-neon",
.base.cra_priority = 200,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = nhpoly1305_neon_update,
.final = crypto_nhpoly1305_final,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
if (!(elf_hwcap & HWCAP_NEON))
return -ENODEV;
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (NEON-accelerated)");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-neon");

View File

@@ -1,432 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
*
* Copyright (c) 2018 Google, Inc
*
* Author: Eric Biggers <ebiggers@google.com>
*/
#include <linux/linkage.h>
.text
.fpu neon
// arguments
ROUND_KEYS .req r0 // const {u64,u32} *round_keys
NROUNDS .req r1 // int nrounds
DST .req r2 // void *dst
SRC .req r3 // const void *src
NBYTES .req r4 // unsigned int nbytes
TWEAK .req r5 // void *tweak
// registers which hold the data being encrypted/decrypted
X0 .req q0
X0_L .req d0
X0_H .req d1
Y0 .req q1
Y0_H .req d3
X1 .req q2
X1_L .req d4
X1_H .req d5
Y1 .req q3
Y1_H .req d7
X2 .req q4
X2_L .req d8
X2_H .req d9
Y2 .req q5
Y2_H .req d11
X3 .req q6
X3_L .req d12
X3_H .req d13
Y3 .req q7
Y3_H .req d15
// the round key, duplicated in all lanes
ROUND_KEY .req q8
ROUND_KEY_L .req d16
ROUND_KEY_H .req d17
// index vector for vtbl-based 8-bit rotates
ROTATE_TABLE .req d18
// multiplication table for updating XTS tweaks
GF128MUL_TABLE .req d19
GF64MUL_TABLE .req d19
// current XTS tweak value(s)
TWEAKV .req q10
TWEAKV_L .req d20
TWEAKV_H .req d21
TMP0 .req q12
TMP0_L .req d24
TMP0_H .req d25
TMP1 .req q13
TMP2 .req q14
TMP3 .req q15
.align 4
.Lror64_8_table:
.byte 1, 2, 3, 4, 5, 6, 7, 0
.Lror32_8_table:
.byte 1, 2, 3, 0, 5, 6, 7, 4
.Lrol64_8_table:
.byte 7, 0, 1, 2, 3, 4, 5, 6
.Lrol32_8_table:
.byte 3, 0, 1, 2, 7, 4, 5, 6
.Lgf128mul_table:
.byte 0, 0x87
.fill 14
.Lgf64mul_table:
.byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b
.fill 12
/*
* _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
*
* Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
* Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
* of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
*
* The 8-bit rotates are implemented using vtbl instead of vshr + vsli because
* the vtbl approach is faster on some processors and the same speed on others.
*/
.macro _speck_round_128bytes n
// x = ror(x, 8)
vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
// x += y
vadd.u\n X0, Y0
vadd.u\n X1, Y1
vadd.u\n X2, Y2
vadd.u\n X3, Y3
// x ^= k
veor X0, ROUND_KEY
veor X1, ROUND_KEY
veor X2, ROUND_KEY
veor X3, ROUND_KEY
// y = rol(y, 3)
vshl.u\n TMP0, Y0, #3
vshl.u\n TMP1, Y1, #3
vshl.u\n TMP2, Y2, #3
vshl.u\n TMP3, Y3, #3
vsri.u\n TMP0, Y0, #(\n - 3)
vsri.u\n TMP1, Y1, #(\n - 3)
vsri.u\n TMP2, Y2, #(\n - 3)
vsri.u\n TMP3, Y3, #(\n - 3)
// y ^= x
veor Y0, TMP0, X0
veor Y1, TMP1, X1
veor Y2, TMP2, X2
veor Y3, TMP3, X3
.endm
/*
* _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
*
* This is the inverse of _speck_round_128bytes().
*/
.macro _speck_unround_128bytes n
// y ^= x
veor TMP0, Y0, X0
veor TMP1, Y1, X1
veor TMP2, Y2, X2
veor TMP3, Y3, X3
// y = ror(y, 3)
vshr.u\n Y0, TMP0, #3
vshr.u\n Y1, TMP1, #3
vshr.u\n Y2, TMP2, #3
vshr.u\n Y3, TMP3, #3
vsli.u\n Y0, TMP0, #(\n - 3)
vsli.u\n Y1, TMP1, #(\n - 3)
vsli.u\n Y2, TMP2, #(\n - 3)
vsli.u\n Y3, TMP3, #(\n - 3)
// x ^= k
veor X0, ROUND_KEY
veor X1, ROUND_KEY
veor X2, ROUND_KEY
veor X3, ROUND_KEY
// x -= y
vsub.u\n X0, Y0
vsub.u\n X1, Y1
vsub.u\n X2, Y2
vsub.u\n X3, Y3
// x = rol(x, 8);
vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
.endm
.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp
// Load the next source block
vld1.8 {\dst_reg}, [SRC]!
// Save the current tweak in the tweak buffer
vst1.8 {TWEAKV}, [\tweak_buf:128]!
// XOR the next source block with the current tweak
veor \dst_reg, TWEAKV
/*
* Calculate the next tweak by multiplying the current one by x,
* modulo p(x) = x^128 + x^7 + x^2 + x + 1.
*/
vshr.u64 \tmp, TWEAKV, #63
vshl.u64 TWEAKV, #1
veor TWEAKV_H, \tmp\()_L
vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H
veor TWEAKV_L, \tmp\()_H
.endm
.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp
// Load the next two source blocks
vld1.8 {\dst_reg}, [SRC]!
// Save the current two tweaks in the tweak buffer
vst1.8 {TWEAKV}, [\tweak_buf:128]!
// XOR the next two source blocks with the current two tweaks
veor \dst_reg, TWEAKV
/*
* Calculate the next two tweaks by multiplying the current ones by x^2,
* modulo p(x) = x^64 + x^4 + x^3 + x + 1.
*/
vshr.u64 \tmp, TWEAKV, #62
vshl.u64 TWEAKV, #2
vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L
vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H
veor TWEAKV, \tmp
.endm
/*
* _speck_xts_crypt() - Speck-XTS encryption/decryption
*
* Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
* using Speck-XTS, specifically the variant with a block size of '2n' and round
* count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
* the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
* nonzero multiple of 128.
*/
.macro _speck_xts_crypt n, decrypting
push {r4-r7}
mov r7, sp
/*
* The first four parameters were passed in registers r0-r3. Load the
* additional parameters, which were passed on the stack.
*/
ldr NBYTES, [sp, #16]
ldr TWEAK, [sp, #20]
/*
* If decrypting, modify the ROUND_KEYS parameter to point to the last
* round key rather than the first, since for decryption the round keys
* are used in reverse order.
*/
.if \decrypting
.if \n == 64
add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3
sub ROUND_KEYS, #8
.else
add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2
sub ROUND_KEYS, #4
.endif
.endif
// Load the index vector for vtbl-based 8-bit rotates
.if \decrypting
ldr r12, =.Lrol\n\()_8_table
.else
ldr r12, =.Lror\n\()_8_table
.endif
vld1.8 {ROTATE_TABLE}, [r12:64]
// One-time XTS preparation
/*
* Allocate stack space to store 128 bytes worth of tweaks. For
* performance, this space is aligned to a 16-byte boundary so that we
* can use the load/store instructions that declare 16-byte alignment.
*/
sub sp, #128
bic sp, #0xf
.if \n == 64
// Load first tweak
vld1.8 {TWEAKV}, [TWEAK]
// Load GF(2^128) multiplication table
ldr r12, =.Lgf128mul_table
vld1.8 {GF128MUL_TABLE}, [r12:64]
.else
// Load first tweak
vld1.8 {TWEAKV_L}, [TWEAK]
// Load GF(2^64) multiplication table
ldr r12, =.Lgf64mul_table
vld1.8 {GF64MUL_TABLE}, [r12:64]
// Calculate second tweak, packing it together with the first
vshr.u64 TMP0_L, TWEAKV_L, #63
vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L
vshl.u64 TWEAKV_H, TWEAKV_L, #1
veor TWEAKV_H, TMP0_L
.endif
.Lnext_128bytes_\@:
/*
* Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak
* values, and save the tweaks on the stack for later. Then
* de-interleave the 'x' and 'y' elements of each block, i.e. make it so
* that the X[0-3] registers contain only the second halves of blocks,
* and the Y[0-3] registers contain only the first halves of blocks.
* (Speck uses the order (y, x) rather than the more intuitive (x, y).)
*/
mov r12, sp
.if \n == 64
_xts128_precrypt_one X0, r12, TMP0
_xts128_precrypt_one Y0, r12, TMP0
_xts128_precrypt_one X1, r12, TMP0
_xts128_precrypt_one Y1, r12, TMP0
_xts128_precrypt_one X2, r12, TMP0
_xts128_precrypt_one Y2, r12, TMP0
_xts128_precrypt_one X3, r12, TMP0
_xts128_precrypt_one Y3, r12, TMP0
vswp X0_L, Y0_H
vswp X1_L, Y1_H
vswp X2_L, Y2_H
vswp X3_L, Y3_H
.else
_xts64_precrypt_two X0, r12, TMP0
_xts64_precrypt_two Y0, r12, TMP0
_xts64_precrypt_two X1, r12, TMP0
_xts64_precrypt_two Y1, r12, TMP0
_xts64_precrypt_two X2, r12, TMP0
_xts64_precrypt_two Y2, r12, TMP0
_xts64_precrypt_two X3, r12, TMP0
_xts64_precrypt_two Y3, r12, TMP0
vuzp.32 Y0, X0
vuzp.32 Y1, X1
vuzp.32 Y2, X2
vuzp.32 Y3, X3
.endif
// Do the cipher rounds
mov r12, ROUND_KEYS
mov r6, NROUNDS
.Lnext_round_\@:
.if \decrypting
.if \n == 64
vld1.64 ROUND_KEY_L, [r12]
sub r12, #8
vmov ROUND_KEY_H, ROUND_KEY_L
.else
vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]
sub r12, #4
.endif
_speck_unround_128bytes \n
.else
.if \n == 64
vld1.64 ROUND_KEY_L, [r12]!
vmov ROUND_KEY_H, ROUND_KEY_L
.else
vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]!
.endif
_speck_round_128bytes \n
.endif
subs r6, r6, #1
bne .Lnext_round_\@
// Re-interleave the 'x' and 'y' elements of each block
.if \n == 64
vswp X0_L, Y0_H
vswp X1_L, Y1_H
vswp X2_L, Y2_H
vswp X3_L, Y3_H
.else
vzip.32 Y0, X0
vzip.32 Y1, X1
vzip.32 Y2, X2
vzip.32 Y3, X3
.endif
// XOR the encrypted/decrypted blocks with the tweaks we saved earlier
mov r12, sp
vld1.8 {TMP0, TMP1}, [r12:128]!
vld1.8 {TMP2, TMP3}, [r12:128]!
veor X0, TMP0
veor Y0, TMP1
veor X1, TMP2
veor Y1, TMP3
vld1.8 {TMP0, TMP1}, [r12:128]!
vld1.8 {TMP2, TMP3}, [r12:128]!
veor X2, TMP0
veor Y2, TMP1
veor X3, TMP2
veor Y3, TMP3
// Store the ciphertext in the destination buffer
vst1.8 {X0, Y0}, [DST]!
vst1.8 {X1, Y1}, [DST]!
vst1.8 {X2, Y2}, [DST]!
vst1.8 {X3, Y3}, [DST]!
// Continue if there are more 128-byte chunks remaining, else return
subs NBYTES, #128
bne .Lnext_128bytes_\@
// Store the next tweak
.if \n == 64
vst1.8 {TWEAKV}, [TWEAK]
.else
vst1.8 {TWEAKV_L}, [TWEAK]
.endif
mov sp, r7
pop {r4-r7}
bx lr
.endm
ENTRY(speck128_xts_encrypt_neon)
_speck_xts_crypt n=64, decrypting=0
ENDPROC(speck128_xts_encrypt_neon)
ENTRY(speck128_xts_decrypt_neon)
_speck_xts_crypt n=64, decrypting=1
ENDPROC(speck128_xts_decrypt_neon)
ENTRY(speck64_xts_encrypt_neon)
_speck_xts_crypt n=32, decrypting=0
ENDPROC(speck64_xts_encrypt_neon)
ENTRY(speck64_xts_decrypt_neon)
_speck_xts_crypt n=32, decrypting=1
ENDPROC(speck64_xts_decrypt_neon)

View File

@@ -1,314 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
*
* Copyright (c) 2018 Google, Inc
*
* Note: the NIST recommendation for XTS only specifies a 128-bit block size,
* but a 64-bit version (needed for Speck64) is fairly straightforward; the math
* is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial
* x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004:
* "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes
* OCB and PMAC"), represented as 0x1B.
*/
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/speck.h>
#include <crypto/xts.h>
#include <linux/kernel.h>
#include <linux/module.h>
/* The assembly functions only handle multiples of 128 bytes */
#define SPECK_NEON_CHUNK_SIZE 128
/* Speck128 */
struct speck128_xts_tfm_ctx {
struct speck128_tfm_ctx main_key;
struct speck128_tfm_ctx tweak_key;
};
asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
void *dst, const void *src,
unsigned int nbytes, void *tweak);
asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
void *dst, const void *src,
unsigned int nbytes, void *tweak);
typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
u8 *, const u8 *);
typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
const void *, unsigned int, void *);
static __always_inline int
__speck128_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes,
speck128_crypt_one_t crypt_one,
speck128_xts_crypt_many_t crypt_many)
{
struct crypto_blkcipher *tfm = desc->tfm;
const struct speck128_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct blkcipher_walk walk;
le128 tweak;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE);
crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
u8 *dst = walk.dst.virt.addr;
const u8 *src = walk.src.virt.addr;
if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
unsigned int count;
count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
kernel_neon_begin();
(*crypt_many)(ctx->main_key.round_keys,
ctx->main_key.nrounds,
dst, src, count, &tweak);
kernel_neon_end();
dst += count;
src += count;
nbytes -= count;
}
/* Handle any remainder with generic code */
while (nbytes >= sizeof(tweak)) {
le128_xor((le128 *)dst, (const le128 *)src, &tweak);
(*crypt_one)(&ctx->main_key, dst, dst);
le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
gf128mul_x_ble((be128 *)&tweak, (const be128 *)&tweak);
dst += sizeof(tweak);
src += sizeof(tweak);
nbytes -= sizeof(tweak);
}
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int speck128_xts_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return __speck128_xts_crypt(desc, dst, src, nbytes,
crypto_speck128_encrypt,
speck128_xts_encrypt_neon);
}
static int speck128_xts_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return __speck128_xts_crypt(desc, dst, src, nbytes,
crypto_speck128_decrypt,
speck128_xts_decrypt_neon);
}
static int speck128_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct speck128_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
if (keylen % 2)
return -EINVAL;
keylen /= 2;
err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
if (err)
return err;
return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
}
/* Speck64 */
struct speck64_xts_tfm_ctx {
struct speck64_tfm_ctx main_key;
struct speck64_tfm_ctx tweak_key;
};
asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
void *dst, const void *src,
unsigned int nbytes, void *tweak);
asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
void *dst, const void *src,
unsigned int nbytes, void *tweak);
typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
u8 *, const u8 *);
typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
const void *, unsigned int, void *);
static __always_inline int
__speck64_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes,
speck64_crypt_one_t crypt_one,
speck64_xts_crypt_many_t crypt_many)
{
struct crypto_blkcipher *tfm = desc->tfm;
const struct speck64_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct blkcipher_walk walk;
__le64 tweak;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE);
crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
u8 *dst = walk.dst.virt.addr;
const u8 *src = walk.src.virt.addr;
if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
unsigned int count;
count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
kernel_neon_begin();
(*crypt_many)(ctx->main_key.round_keys,
ctx->main_key.nrounds,
dst, src, count, &tweak);
kernel_neon_end();
dst += count;
src += count;
nbytes -= count;
}
/* Handle any remainder with generic code */
while (nbytes >= sizeof(tweak)) {
*(__le64 *)dst = *(__le64 *)src ^ tweak;
(*crypt_one)(&ctx->main_key, dst, dst);
*(__le64 *)dst ^= tweak;
tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
((tweak & cpu_to_le64(1ULL << 63)) ?
0x1B : 0));
dst += sizeof(tweak);
src += sizeof(tweak);
nbytes -= sizeof(tweak);
}
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int speck64_xts_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
return __speck64_xts_crypt(desc, dst, src, nbytes,
crypto_speck64_encrypt,
speck64_xts_encrypt_neon);
}
static int speck64_xts_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
return __speck64_xts_crypt(desc, dst, src, nbytes,
crypto_speck64_decrypt,
speck64_xts_decrypt_neon);
}
static int speck64_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct speck64_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
if (keylen % 2)
return -EINVAL;
keylen /= 2;
err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
if (err)
return err;
return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
}
static struct crypto_alg speck_algs[] = {
{
.cra_name = "xts(speck128)",
.cra_driver_name = "xts-speck128-neon",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = SPECK128_BLOCK_SIZE,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = 2 * SPECK128_128_KEY_SIZE,
.max_keysize = 2 * SPECK128_256_KEY_SIZE,
.ivsize = SPECK128_BLOCK_SIZE,
.setkey = speck128_xts_setkey,
.encrypt = speck128_xts_encrypt,
.decrypt = speck128_xts_decrypt,
}
}
}, {
.cra_name = "xts(speck64)",
.cra_driver_name = "xts-speck64-neon",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = SPECK64_BLOCK_SIZE,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = 2 * SPECK64_96_KEY_SIZE,
.max_keysize = 2 * SPECK64_128_KEY_SIZE,
.ivsize = SPECK64_BLOCK_SIZE,
.setkey = speck64_xts_setkey,
.encrypt = speck64_xts_encrypt,
.decrypt = speck64_xts_decrypt,
}
}
}
};
static int __init speck_neon_module_init(void)
{
if (!(elf_hwcap & HWCAP_NEON))
return -ENODEV;
return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
}
static void __exit speck_neon_module_exit(void)
{
crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
}
module_init(speck_neon_module_init);
module_exit(speck_neon_module_exit);
MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("xts(speck128)");
MODULE_ALIAS_CRYPTO("xts-speck128-neon");
MODULE_ALIAS_CRYPTO("xts(speck64)");
MODULE_ALIAS_CRYPTO("xts-speck64-neon");

View File

@@ -12,6 +12,7 @@
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
#include <asm/memory.h>
#ifdef CONFIG_NEED_RET_TO_USER
#include <mach/entry-macro.S>
@@ -35,6 +36,9 @@ ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
bne fast_work_pending
@@ -61,6 +65,9 @@ ret_fast_syscall:
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
beq no_work_pending
@@ -93,6 +100,9 @@ ENTRY(ret_to_user)
ret_slow_syscall:
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne slow_work_pending

View File

@@ -14,6 +14,7 @@
#include <linux/uaccess.h>
#include <linux/tracehook.h>
#include <linux/uprobes.h>
#include <linux/syscalls.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
@@ -634,3 +635,9 @@ struct page *get_signal_page(void)
return page;
}
/* Defer to generic check */
asmlinkage void addr_limit_check_failed(void)
{
addr_limit_user_check();
}

View File

@@ -0,0 +1,435 @@
# CONFIG_FHANDLE is not set
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
# CONFIG_PROC_PID_CPUSET is not set
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_SGETMASK_SYSCALL=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_PCI=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
# CONFIG_SPARSEMEM_VMEMMAP is not set
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_ZSMALLOC=y
CONFIG_SECCOMP=y
CONFIG_PARAVIRT=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
CONFIG_ARM64_SW_TTBR0_PAN=y
CONFIG_ARM64_LSE_ATOMICS=y
CONFIG_RANDOMIZE_BASE=y
CONFIG_CMDLINE="console=ttyAMA0"
CONFIG_CMDLINE_EXTEND=y
# CONFIG_EFI is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_PM_DEBUG=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPUFREQ_DT=y
CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
CONFIG_ARM_DT_BL_CPUFREQ=y
CONFIG_ARM_SCPI_CPUFREQ=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_NET_IPGRE_DEMUX=y
CONFIG_NET_IPVTI=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
# CONFIG_TCP_CONG_WESTWOOD is not set
# CONFIG_TCP_CONG_HTCP is not set
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_DCCP=y
CONFIG_NF_CT_PROTO_SCTP=y
CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=y
CONFIG_NF_CONNTRACK_FTP=y
CONFIG_NF_CONNTRACK_H323=y
CONFIG_NF_CONNTRACK_IRC=y
CONFIG_NF_CONNTRACK_NETBIOS_NS=y
CONFIG_NF_CONNTRACK_PPTP=y
CONFIG_NF_CONNTRACK_SANE=y
CONFIG_NF_CONNTRACK_TFTP=y
CONFIG_NF_CT_NETLINK=y
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
CONFIG_NETFILTER_XT_MATCH_HELPER=y
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MAC=y
CONFIG_NETFILTER_XT_MATCH_MARK=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
CONFIG_NETFILTER_XT_MATCH_STRING=y
CONFIG_NETFILTER_XT_MATCH_TIME=y
CONFIG_NETFILTER_XT_MATCH_U32=y
CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_MATCH_ECN=y
CONFIG_IP_NF_MATCH_TTL=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_TARGET_NETMAP=y
CONFIG_IP_NF_TARGET_REDIRECT=y
CONFIG_IP_NF_MANGLE=y
CONFIG_IP_NF_RAW=y
CONFIG_IP_NF_SECURITY=y
CONFIG_IP_NF_ARPTABLES=y
CONFIG_IP_NF_ARPFILTER=y
CONFIG_IP_NF_ARP_MANGLE=y
CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_RPFILTER=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
CONFIG_IP6_NF_RAW=y
CONFIG_L2TP=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
CONFIG_NET_CLS_U32=y
CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_U32=y
CONFIG_NET_CLS_ACT=y
CONFIG_CFG80211=y
# CONFIG_CFG80211_DEFAULT_PS is not set
CONFIG_MAC80211=y
# CONFIG_MAC80211_RC_MINSTREL is not set
CONFIG_RFKILL=y
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
# CONFIG_ALLOW_DEV_COREDUMP is not set
CONFIG_DEBUG_DEVRES=y
CONFIG_OF_UNITTEST=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
CONFIG_UID_SYS_STATS=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_VIRTIO=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
CONFIG_DM_VERITY_AVB=y
CONFIG_NETDEVICES=y
CONFIG_NETCONSOLE=y
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TUN=y
CONFIG_VIRTIO_NET=y
# CONFIG_ETHERNET is not set
CONFIG_PHYLIB=y
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
CONFIG_PPP_MPPE=y
CONFIG_PPTP=y
CONFIG_PPPOL2TP=y
CONFIG_USB_USBNET=y
# CONFIG_USB_NET_AX8817X is not set
# CONFIG_USB_NET_AX88179_178A is not set
# CONFIG_USB_NET_CDCETHER is not set
# CONFIG_USB_NET_CDC_NCM is not set
# CONFIG_USB_NET_NET1080 is not set
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
# CONFIG_WLAN_VENDOR_ADMTEK is not set
# CONFIG_WLAN_VENDOR_ATH is not set
# CONFIG_WLAN_VENDOR_ATMEL is not set
# CONFIG_WLAN_VENDOR_BROADCOM is not set
# CONFIG_WLAN_VENDOR_CISCO is not set
# CONFIG_WLAN_VENDOR_INTEL is not set
# CONFIG_WLAN_VENDOR_INTERSIL is not set
# CONFIG_WLAN_VENDOR_MARVELL is not set
# CONFIG_WLAN_VENDOR_MEDIATEK is not set
# CONFIG_WLAN_VENDOR_RALINK is not set
# CONFIG_WLAN_VENDOR_REALTEK is not set
# CONFIG_WLAN_VENDOR_RSI is not set
# CONFIG_WLAN_VENDOR_ST is not set
# CONFIG_WLAN_VENDOR_TI is not set
# CONFIG_WLAN_VENDOR_ZYDAS is not set
CONFIG_VIRT_WIFI=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
CONFIG_INPUT_TABLET=y
CONFIG_TABLET_USB_ACECAD=y
CONFIG_TABLET_USB_AIPTEK=y
CONFIG_TABLET_USB_GTCO=y
CONFIG_TABLET_USB_HANWANG=y
CONFIG_TABLET_USB_KBTAB=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_GPIO=y
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=48
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
# CONFIG_HW_RANDOM_CAVIUM is not set
# CONFIG_DEVPORT is not set
# CONFIG_I2C_COMPAT is not set
# CONFIG_I2C_HELPER_AUTO is not set
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
CONFIG_MEDIA_SUPPORT=y
# CONFIG_DVB_TUNER_DIB0070 is not set
# CONFIG_DVB_TUNER_DIB0090 is not set
# CONFIG_VGA_ARB is not set
CONFIG_DRM=y
# CONFIG_DRM_FBDEV_EMULATION is not set
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_HIDRAW=y
CONFIG_UHID=y
CONFIG_HID_A4TECH=y
CONFIG_HID_ACRUX=y
CONFIG_HID_ACRUX_FF=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
CONFIG_HID_CHERRY=y
CONFIG_HID_CHICONY=y
CONFIG_HID_PRODIKEYS=y
CONFIG_HID_CYPRESS=y
CONFIG_HID_DRAGONRISE=y
CONFIG_DRAGONRISE_FF=y
CONFIG_HID_EMS_FF=y
CONFIG_HID_ELECOM=y
CONFIG_HID_EZKEY=y
CONFIG_HID_HOLTEK=y
CONFIG_HID_KEYTOUCH=y
CONFIG_HID_KYE=y
CONFIG_HID_UCLOGIC=y
CONFIG_HID_WALTOP=y
CONFIG_HID_GYRATION=y
CONFIG_HID_TWINHAN=y
CONFIG_HID_KENSINGTON=y
CONFIG_HID_LCPOWER=y
CONFIG_HID_LOGITECH=y
CONFIG_HID_LOGITECH_DJ=y
CONFIG_LOGITECH_FF=y
CONFIG_LOGIRUMBLEPAD2_FF=y
CONFIG_LOGIG940_FF=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MONTEREY=y
CONFIG_HID_MULTITOUCH=y
CONFIG_HID_NTRIG=y
CONFIG_HID_ORTEK=y
CONFIG_HID_PANTHERLORD=y
CONFIG_PANTHERLORD_FF=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_PICOLCD=y
CONFIG_HID_PRIMAX=y
CONFIG_HID_ROCCAT=y
CONFIG_HID_SAITEK=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SPEEDLINK=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_GREENASIA=y
CONFIG_GREENASIA_FF=y
CONFIG_HID_SMARTJOYPLUS=y
CONFIG_SMARTJOYPLUS_FF=y
CONFIG_HID_TIVO=y
CONFIG_HID_TOPSEED=y
CONFIG_HID_THRUSTMASTER=y
CONFIG_HID_WACOM=y
CONFIG_HID_WIIMOTE=y
CONFIG_HID_ZEROPLUS=y
CONFIG_HID_ZYDACRON=y
CONFIG_USB_HIDDEV=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_GADGET=y
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_MMC=y
# CONFIG_PWRSEQ_EMMC is not set
# CONFIG_PWRSEQ_SIMPLE is not set
# CONFIG_MMC_BLOCK is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
# CONFIG_RTC_SYSTOHC is not set
CONFIG_RTC_DRV_PL031=y
CONFIG_VIRTIO_PCI=y
# CONFIG_VIRTIO_PCI_LEGACY is not set
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_VSOC=y
CONFIG_ION=y
CONFIG_COMMON_CLK_SCPI=y
# CONFIG_COMMON_CLK_XGENE is not set
CONFIG_MAILBOX=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ARM_SCPI_PROTOCOL=y
# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
# CONFIG_DNOTIFY is not set
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_SDCARD_FS=y
CONFIG_PSTORE=y
CONFIG_PSTORE_CONSOLE=y
CONFIG_PSTORE_RAM=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_LSM_MMAP_MIN_ADDR=65536
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_CRYPTO_ADIANTUM=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ZSTD=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_XZ_DEC=y

View File

@@ -53,11 +53,4 @@ config CRYPTO_CRC32_ARM64
tristate "CRC32 and CRC32C using optional ARMv8 instructions"
depends on ARM64
select CRYPTO_HASH
config CRYPTO_SPECK_NEON
tristate "NEON accelerated Speck cipher algorithms"
depends on KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_GF128MUL
select CRYPTO_SPECK
endif

View File

@@ -30,9 +30,6 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o
obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
aes-neon-blk-y := aes-glue-neon.o aes-neon.o
obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
speck-neon-y := speck-neon-core.o speck-neon-glue.o
AFLAGS_aes-ce.o := -DINTERLEAVE=4
AFLAGS_aes-neon.o := -DINTERLEAVE=4

View File

@@ -1,352 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
*
* Copyright (c) 2018 Google, Inc
*
* Author: Eric Biggers <ebiggers@google.com>
*/
#include <linux/linkage.h>
.text
// arguments
ROUND_KEYS .req x0 // const {u64,u32} *round_keys
NROUNDS .req w1 // int nrounds
NROUNDS_X .req x1
DST .req x2 // void *dst
SRC .req x3 // const void *src
NBYTES .req w4 // unsigned int nbytes
TWEAK .req x5 // void *tweak
// registers which hold the data being encrypted/decrypted
// (underscores avoid a naming collision with ARM64 registers x0-x3)
X_0 .req v0
Y_0 .req v1
X_1 .req v2
Y_1 .req v3
X_2 .req v4
Y_2 .req v5
X_3 .req v6
Y_3 .req v7
// the round key, duplicated in all lanes
ROUND_KEY .req v8
// index vector for tbl-based 8-bit rotates
ROTATE_TABLE .req v9
ROTATE_TABLE_Q .req q9
// temporary registers
TMP0 .req v10
TMP1 .req v11
TMP2 .req v12
TMP3 .req v13
// multiplication table for updating XTS tweaks
GFMUL_TABLE .req v14
GFMUL_TABLE_Q .req q14
// next XTS tweak value(s)
TWEAKV_NEXT .req v15
// XTS tweaks for the blocks currently being encrypted/decrypted
TWEAKV0 .req v16
TWEAKV1 .req v17
TWEAKV2 .req v18
TWEAKV3 .req v19
TWEAKV4 .req v20
TWEAKV5 .req v21
TWEAKV6 .req v22
TWEAKV7 .req v23
.align 4
.Lror64_8_table:
.octa 0x080f0e0d0c0b0a090007060504030201
.Lror32_8_table:
.octa 0x0c0f0e0d080b0a090407060500030201
.Lrol64_8_table:
.octa 0x0e0d0c0b0a09080f0605040302010007
.Lrol32_8_table:
.octa 0x0e0d0c0f0a09080b0605040702010003
.Lgf128mul_table:
.octa 0x00000000000000870000000000000001
.Lgf64mul_table:
.octa 0x0000000000000000000000002d361b00
/*
* _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
*
* Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
* Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
* of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
* 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
*/
.macro _speck_round_128bytes n, lanes
// x = ror(x, 8)
tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
// x += y
add X_0.\lanes, X_0.\lanes, Y_0.\lanes
add X_1.\lanes, X_1.\lanes, Y_1.\lanes
add X_2.\lanes, X_2.\lanes, Y_2.\lanes
add X_3.\lanes, X_3.\lanes, Y_3.\lanes
// x ^= k
eor X_0.16b, X_0.16b, ROUND_KEY.16b
eor X_1.16b, X_1.16b, ROUND_KEY.16b
eor X_2.16b, X_2.16b, ROUND_KEY.16b
eor X_3.16b, X_3.16b, ROUND_KEY.16b
// y = rol(y, 3)
shl TMP0.\lanes, Y_0.\lanes, #3
shl TMP1.\lanes, Y_1.\lanes, #3
shl TMP2.\lanes, Y_2.\lanes, #3
shl TMP3.\lanes, Y_3.\lanes, #3
sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
// y ^= x
eor Y_0.16b, TMP0.16b, X_0.16b
eor Y_1.16b, TMP1.16b, X_1.16b
eor Y_2.16b, TMP2.16b, X_2.16b
eor Y_3.16b, TMP3.16b, X_3.16b
.endm
/*
* _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
*
* This is the inverse of _speck_round_128bytes().
*/
.macro _speck_unround_128bytes n, lanes
// y ^= x
eor TMP0.16b, Y_0.16b, X_0.16b
eor TMP1.16b, Y_1.16b, X_1.16b
eor TMP2.16b, Y_2.16b, X_2.16b
eor TMP3.16b, Y_3.16b, X_3.16b
// y = ror(y, 3)
ushr Y_0.\lanes, TMP0.\lanes, #3
ushr Y_1.\lanes, TMP1.\lanes, #3
ushr Y_2.\lanes, TMP2.\lanes, #3
ushr Y_3.\lanes, TMP3.\lanes, #3
sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
// x ^= k
eor X_0.16b, X_0.16b, ROUND_KEY.16b
eor X_1.16b, X_1.16b, ROUND_KEY.16b
eor X_2.16b, X_2.16b, ROUND_KEY.16b
eor X_3.16b, X_3.16b, ROUND_KEY.16b
// x -= y
sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
// x = rol(x, 8)
tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
.endm
.macro _next_xts_tweak next, cur, tmp, n
.if \n == 64
/*
* Calculate the next tweak by multiplying the current one by x,
* modulo p(x) = x^128 + x^7 + x^2 + x + 1.
*/
sshr \tmp\().2d, \cur\().2d, #63
and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
shl \next\().2d, \cur\().2d, #1
ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
eor \next\().16b, \next\().16b, \tmp\().16b
.else
/*
* Calculate the next two tweaks by multiplying the current ones by x^2,
* modulo p(x) = x^64 + x^4 + x^3 + x + 1.
*/
ushr \tmp\().2d, \cur\().2d, #62
shl \next\().2d, \cur\().2d, #2
tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
eor \next\().16b, \next\().16b, \tmp\().16b
.endif
.endm
/*
* _speck_xts_crypt() - Speck-XTS encryption/decryption
*
* Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
* using Speck-XTS, specifically the variant with a block size of '2n' and round
* count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
* the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
* nonzero multiple of 128.
*/
.macro _speck_xts_crypt n, lanes, decrypting
/*
* If decrypting, modify the ROUND_KEYS parameter to point to the last
* round key rather than the first, since for decryption the round keys
* are used in reverse order.
*/
.if \decrypting
mov NROUNDS, NROUNDS /* zero the high 32 bits */
.if \n == 64
add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
sub ROUND_KEYS, ROUND_KEYS, #8
.else
add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
sub ROUND_KEYS, ROUND_KEYS, #4
.endif
.endif
// Load the index vector for tbl-based 8-bit rotates
.if \decrypting
ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
.else
ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
.endif
// One-time XTS preparation
.if \n == 64
// Load first tweak
ld1 {TWEAKV0.16b}, [TWEAK]
// Load GF(2^128) multiplication table
ldr GFMUL_TABLE_Q, .Lgf128mul_table
.else
// Load first tweak
ld1 {TWEAKV0.8b}, [TWEAK]
// Load GF(2^64) multiplication table
ldr GFMUL_TABLE_Q, .Lgf64mul_table
// Calculate second tweak, packing it together with the first
ushr TMP0.2d, TWEAKV0.2d, #63
shl TMP1.2d, TWEAKV0.2d, #1
tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
eor TMP0.8b, TMP0.8b, TMP1.8b
mov TWEAKV0.d[1], TMP0.d[0]
.endif
.Lnext_128bytes_\@:
// Calculate XTS tweaks for next 128 bytes
_next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
_next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
_next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
_next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
_next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
_next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
_next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
_next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
// Load the next source blocks into {X,Y}[0-3]
ld1 {X_0.16b-Y_1.16b}, [SRC], #64
ld1 {X_2.16b-Y_3.16b}, [SRC], #64
// XOR the source blocks with their XTS tweaks
eor TMP0.16b, X_0.16b, TWEAKV0.16b
eor Y_0.16b, Y_0.16b, TWEAKV1.16b
eor TMP1.16b, X_1.16b, TWEAKV2.16b
eor Y_1.16b, Y_1.16b, TWEAKV3.16b
eor TMP2.16b, X_2.16b, TWEAKV4.16b
eor Y_2.16b, Y_2.16b, TWEAKV5.16b
eor TMP3.16b, X_3.16b, TWEAKV6.16b
eor Y_3.16b, Y_3.16b, TWEAKV7.16b
/*
* De-interleave the 'x' and 'y' elements of each block, i.e. make it so
* that the X[0-3] registers contain only the second halves of blocks,
* and the Y[0-3] registers contain only the first halves of blocks.
* (Speck uses the order (y, x) rather than the more intuitive (x, y).)
*/
uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
// Do the cipher rounds
mov x6, ROUND_KEYS
mov w7, NROUNDS
.Lnext_round_\@:
.if \decrypting
ld1r {ROUND_KEY.\lanes}, [x6]
sub x6, x6, #( \n / 8 )
_speck_unround_128bytes \n, \lanes
.else
ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
_speck_round_128bytes \n, \lanes
.endif
subs w7, w7, #1
bne .Lnext_round_\@
// Re-interleave the 'x' and 'y' elements of each block
zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
// XOR the encrypted/decrypted blocks with the tweaks calculated earlier
eor X_0.16b, TMP0.16b, TWEAKV0.16b
eor Y_0.16b, Y_0.16b, TWEAKV1.16b
eor X_1.16b, TMP1.16b, TWEAKV2.16b
eor Y_1.16b, Y_1.16b, TWEAKV3.16b
eor X_2.16b, TMP2.16b, TWEAKV4.16b
eor Y_2.16b, Y_2.16b, TWEAKV5.16b
eor X_3.16b, TMP3.16b, TWEAKV6.16b
eor Y_3.16b, Y_3.16b, TWEAKV7.16b
mov TWEAKV0.16b, TWEAKV_NEXT.16b
// Store the ciphertext in the destination buffer
st1 {X_0.16b-Y_1.16b}, [DST], #64
st1 {X_2.16b-Y_3.16b}, [DST], #64
// Continue if there are more 128-byte chunks remaining
subs NBYTES, NBYTES, #128
bne .Lnext_128bytes_\@
// Store the next tweak and return
.if \n == 64
st1 {TWEAKV_NEXT.16b}, [TWEAK]
.else
st1 {TWEAKV_NEXT.8b}, [TWEAK]
.endif
ret
.endm
ENTRY(speck128_xts_encrypt_neon)
_speck_xts_crypt n=64, lanes=2d, decrypting=0
ENDPROC(speck128_xts_encrypt_neon)
ENTRY(speck128_xts_decrypt_neon)
_speck_xts_crypt n=64, lanes=2d, decrypting=1
ENDPROC(speck128_xts_decrypt_neon)
ENTRY(speck64_xts_encrypt_neon)
_speck_xts_crypt n=32, lanes=4s, decrypting=0
ENDPROC(speck64_xts_encrypt_neon)
ENTRY(speck64_xts_decrypt_neon)
_speck_xts_crypt n=32, lanes=4s, decrypting=1
ENDPROC(speck64_xts_decrypt_neon)

View File

@@ -1,308 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
* (64-bit version; based on the 32-bit version)
*
* Copyright (c) 2018 Google, Inc
*/
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/speck.h>
#include <crypto/xts.h>
#include <linux/kernel.h>
#include <linux/module.h>
/* The assembly functions only handle multiples of 128 bytes */
#define SPECK_NEON_CHUNK_SIZE 128
/* Speck128 */
struct speck128_xts_tfm_ctx {
struct speck128_tfm_ctx main_key;
struct speck128_tfm_ctx tweak_key;
};
asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
void *dst, const void *src,
unsigned int nbytes, void *tweak);
asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
void *dst, const void *src,
unsigned int nbytes, void *tweak);
typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
u8 *, const u8 *);
typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
const void *, unsigned int, void *);
static __always_inline int
__speck128_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes,
speck128_crypt_one_t crypt_one,
speck128_xts_crypt_many_t crypt_many)
{
struct crypto_blkcipher *tfm = desc->tfm;
const struct speck128_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct blkcipher_walk walk;
le128 tweak;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE);
crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
u8 *dst = walk.dst.virt.addr;
const u8 *src = walk.src.virt.addr;
if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
unsigned int count;
count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
kernel_neon_begin();
(*crypt_many)(ctx->main_key.round_keys,
ctx->main_key.nrounds,
dst, src, count, &tweak);
kernel_neon_end();
dst += count;
src += count;
nbytes -= count;
}
/* Handle any remainder with generic code */
while (nbytes >= sizeof(tweak)) {
le128_xor((le128 *)dst, (const le128 *)src, &tweak);
(*crypt_one)(&ctx->main_key, dst, dst);
le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
gf128mul_x_ble((be128 *)&tweak, (const be128 *)&tweak);
dst += sizeof(tweak);
src += sizeof(tweak);
nbytes -= sizeof(tweak);
}
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int speck128_xts_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return __speck128_xts_crypt(desc, dst, src, nbytes,
crypto_speck128_encrypt,
speck128_xts_encrypt_neon);
}
static int speck128_xts_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return __speck128_xts_crypt(desc, dst, src, nbytes,
crypto_speck128_decrypt,
speck128_xts_decrypt_neon);
}
static int speck128_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct speck128_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
if (keylen % 2)
return -EINVAL;
keylen /= 2;
err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
if (err)
return err;
return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
}
/* Speck64 */
struct speck64_xts_tfm_ctx {
struct speck64_tfm_ctx main_key;
struct speck64_tfm_ctx tweak_key;
};
asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
void *dst, const void *src,
unsigned int nbytes, void *tweak);
asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
void *dst, const void *src,
unsigned int nbytes, void *tweak);
typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
u8 *, const u8 *);
typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
const void *, unsigned int, void *);
static __always_inline int
__speck64_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes,
speck64_crypt_one_t crypt_one,
speck64_xts_crypt_many_t crypt_many)
{
struct crypto_blkcipher *tfm = desc->tfm;
const struct speck64_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct blkcipher_walk walk;
__le64 tweak;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE);
crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
u8 *dst = walk.dst.virt.addr;
const u8 *src = walk.src.virt.addr;
if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
unsigned int count;
count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
kernel_neon_begin();
(*crypt_many)(ctx->main_key.round_keys,
ctx->main_key.nrounds,
dst, src, count, &tweak);
kernel_neon_end();
dst += count;
src += count;
nbytes -= count;
}
/* Handle any remainder with generic code */
while (nbytes >= sizeof(tweak)) {
*(__le64 *)dst = *(__le64 *)src ^ tweak;
(*crypt_one)(&ctx->main_key, dst, dst);
*(__le64 *)dst ^= tweak;
tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
((tweak & cpu_to_le64(1ULL << 63)) ?
0x1B : 0));
dst += sizeof(tweak);
src += sizeof(tweak);
nbytes -= sizeof(tweak);
}
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int speck64_xts_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
return __speck64_xts_crypt(desc, dst, src, nbytes,
crypto_speck64_encrypt,
speck64_xts_encrypt_neon);
}
static int speck64_xts_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
return __speck64_xts_crypt(desc, dst, src, nbytes,
crypto_speck64_decrypt,
speck64_xts_decrypt_neon);
}
static int speck64_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct speck64_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
if (keylen % 2)
return -EINVAL;
keylen /= 2;
err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
if (err)
return err;
return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
}
static struct crypto_alg speck_algs[] = {
{
.cra_name = "xts(speck128)",
.cra_driver_name = "xts-speck128-neon",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = SPECK128_BLOCK_SIZE,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = 2 * SPECK128_128_KEY_SIZE,
.max_keysize = 2 * SPECK128_256_KEY_SIZE,
.ivsize = SPECK128_BLOCK_SIZE,
.setkey = speck128_xts_setkey,
.encrypt = speck128_xts_encrypt,
.decrypt = speck128_xts_decrypt,
}
}
}, {
.cra_name = "xts(speck64)",
.cra_driver_name = "xts-speck64-neon",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = SPECK64_BLOCK_SIZE,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = 2 * SPECK64_96_KEY_SIZE,
.max_keysize = 2 * SPECK64_128_KEY_SIZE,
.ivsize = SPECK64_BLOCK_SIZE,
.setkey = speck64_xts_setkey,
.encrypt = speck64_xts_encrypt,
.decrypt = speck64_xts_decrypt,
}
}
}
};
static int __init speck_neon_module_init(void)
{
if (!(elf_hwcap & HWCAP_ASIMD))
return -ENODEV;
return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
}
static void __exit speck_neon_module_exit(void)
{
crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
}
module_init(speck_neon_module_init);
module_exit(speck_neon_module_exit);
MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("xts(speck128)");
MODULE_ALIAS_CRYPTO("xts-speck128-neon");
MODULE_ALIAS_CRYPTO("xts(speck64)");
MODULE_ALIAS_CRYPTO("xts-speck64-neon");

View File

@@ -34,9 +34,7 @@
#define ARM64_HAS_32BIT_EL0 13
#define ARM64_HYP_OFFSET_LOW 14
#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
#define ARM64_UNMAP_KERNEL_AT_EL0 16
#define ARM64_HARDEN_BRANCH_PREDICTOR 17
#define ARM64_SSBD 18
#define ARM64_MISMATCHED_CACHE_TYPE 19

View File

@@ -16,8 +16,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
#include <asm/alternative.h>
#include <asm/stack_pointer.h>
#include <asm/alternative.h>
static inline void set_my_cpu_offset(unsigned long off)
{

View File

@@ -109,6 +109,10 @@
#define SCTLR_EL1_CP15BEN (1 << 5)
/* id_aa64isar0 */
#define ID_AA64ISAR0_DP_SHIFT 44
#define ID_AA64ISAR0_SM4_SHIFT 40
#define ID_AA64ISAR0_SM3_SHIFT 36
#define ID_AA64ISAR0_SHA3_SHIFT 32
#define ID_AA64ISAR0_RDM_SHIFT 28
#define ID_AA64ISAR0_ATOMICS_SHIFT 20
#define ID_AA64ISAR0_CRC32_SHIFT 16

View File

@@ -82,6 +82,7 @@ struct thread_info {
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_FSCHECK 4 /* Check FS is USER_DS on return */
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
@@ -104,10 +105,12 @@ struct thread_info {
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
_TIF_FSCHECK)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \

View File

@@ -73,6 +73,9 @@ static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
/* On user-mode return, check fs is correct */
set_thread_flag(TIF_FSCHECK);
/*
* Prevent a mispredicted conditional call to set_fs from forwarding
* the wrong address limit to access_ok under speculation.

View File

@@ -30,5 +30,10 @@
#define HWCAP_ATOMICS (1 << 8)
#define HWCAP_FPHP (1 << 9)
#define HWCAP_ASIMDHP (1 << 10)
#define HWCAP_SHA3 (1 << 17)
#define HWCAP_SM3 (1 << 18)
#define HWCAP_SM4 (1 << 19)
#define HWCAP_ASIMDDP (1 << 20)
#define HWCAP_SHA512 (1 << 21)
#endif /* _UAPI__ASM_HWCAP_H */

View File

@@ -83,7 +83,10 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_DP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
@@ -963,8 +966,13 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),

View File

@@ -68,6 +68,17 @@ static const char *const hwcap_str[] = {
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"sm3",
"sm4",
"asimddp",
"sha512",
NULL
};

View File

@@ -25,6 +25,7 @@
#include <linux/uaccess.h>
#include <linux/tracehook.h>
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <asm/debug-monitors.h>
#include <asm/elf.h>
@@ -408,7 +409,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
* Update the trace code with the current status.
*/
trace_hardirqs_off();
do {
/* Check valid user FS if needed */
addr_limit_user_check();
if (thread_flags & _TIF_NEED_RESCHED) {
schedule();
} else {

View File

@@ -215,17 +215,16 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
put_task_stack(tsk);
}
EXPORT_SYMBOL(save_stack_trace_tsk);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
__save_stack_trace(tsk, trace, 1);
}
EXPORT_SYMBOL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace)
{
__save_stack_trace(current, trace, 0);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif

View File

@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
@@ -51,6 +52,7 @@ CONFIG_X86_CPUID=y
CONFIG_KSM=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_ZSMALLOC=y
# CONFIG_MTRR is not set
CONFIG_HZ_100=y
CONFIG_KEXEC=y
@@ -59,7 +61,8 @@ CONFIG_PHYSICAL_START=0x200000
CONFIG_RANDOMIZE_BASE=y
CONFIG_PHYSICAL_ALIGN=0x1000000
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0 reboot=p"
CONFIG_CMDLINE="console=ttyS0 reboot=p nopti"
CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
@@ -92,8 +95,8 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_INET_DIAG_DESTROY=y
CONFIG_TCP_CONG_ADVANCED=y
@@ -108,6 +111,7 @@ CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_NETLABEL=y
CONFIG_NETFILTER=y
@@ -137,6 +141,7 @@ CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -187,6 +192,7 @@ CONFIG_IP6_NF_RAW=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
CONFIG_NET_CLS_U32=y
CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_U32=y
CONFIG_NET_CLS_ACT=y
@@ -199,12 +205,12 @@ CONFIG_DEBUG_DEVRES=y
CONFIG_OF=y
CONFIG_OF_UNITTEST=y
# CONFIG_PNP_DEBUG_MESSAGES is not set
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
CONFIG_UID_SYS_STATS=y
CONFIG_MEMORY_STATE_TIME=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
@@ -221,6 +227,7 @@ CONFIG_DM_ZERO=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_NETCONSOLE=y
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -257,6 +264,7 @@ CONFIG_USB_USBNET=y
# CONFIG_WLAN_VENDOR_TI is not set
# CONFIG_WLAN_VENDOR_ZYDAS is not set
CONFIG_MAC80211_HWSIM=y
CONFIG_VIRT_WIFI=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
# CONFIG_INPUT_KEYBOARD is not set
@@ -311,11 +319,11 @@ CONFIG_MEDIA_SUPPORT=y
CONFIG_DRM=y
# CONFIG_DRM_FBDEV_EMULATION is not set
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_FB=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_HIDRAW=y
CONFIG_UHID=y
# CONFIG_HID_GENERIC is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_ACRUX=y
CONFIG_HID_ACRUX_FF=y
@@ -379,6 +387,8 @@ CONFIG_USB_GADGET=y
CONFIG_USB_DUMMY_HCD=y
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
@@ -388,6 +398,7 @@ CONFIG_RTC_CLASS=y
CONFIG_SW_SYNC=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_STAGING=y
@@ -403,6 +414,9 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -436,11 +450,11 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_PANIC_TIMEOUT=5
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_UPROBE_EVENT=y
CONFIG_IO_DELAY_NONE=y
CONFIG_DEBUG_BOOT_PARAMS=y
CONFIG_OPTIMIZE_INLINING=y
@@ -451,4 +465,14 @@ CONFIG_SECURITY_PATH=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
CONFIG_CRYPTO_RSA=y
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_ADIANTUM=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ZSTD=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
CONFIG_X509_CERTIFICATE_PARSER=y
CONFIG_SYSTEM_TRUSTED_KEYRING=y
CONFIG_SYSTEM_TRUSTED_KEYS="verity_dev_keys.x509"

View File

@@ -10,7 +10,7 @@
*/
#include <crypto/algapi.h>
#include <crypto/chacha20.h>
#include <crypto/chacha.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -29,31 +29,31 @@ static bool chacha20_use_avx2;
static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes)
{
u8 buf[CHACHA20_BLOCK_SIZE];
u8 buf[CHACHA_BLOCK_SIZE];
#ifdef CONFIG_AS_AVX2
if (chacha20_use_avx2) {
while (bytes >= CHACHA20_BLOCK_SIZE * 8) {
while (bytes >= CHACHA_BLOCK_SIZE * 8) {
chacha20_8block_xor_avx2(state, dst, src);
bytes -= CHACHA20_BLOCK_SIZE * 8;
src += CHACHA20_BLOCK_SIZE * 8;
dst += CHACHA20_BLOCK_SIZE * 8;
bytes -= CHACHA_BLOCK_SIZE * 8;
src += CHACHA_BLOCK_SIZE * 8;
dst += CHACHA_BLOCK_SIZE * 8;
state[12] += 8;
}
}
#endif
while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
while (bytes >= CHACHA_BLOCK_SIZE * 4) {
chacha20_4block_xor_ssse3(state, dst, src);
bytes -= CHACHA20_BLOCK_SIZE * 4;
src += CHACHA20_BLOCK_SIZE * 4;
dst += CHACHA20_BLOCK_SIZE * 4;
bytes -= CHACHA_BLOCK_SIZE * 4;
src += CHACHA_BLOCK_SIZE * 4;
dst += CHACHA_BLOCK_SIZE * 4;
state[12] += 4;
}
while (bytes >= CHACHA20_BLOCK_SIZE) {
while (bytes >= CHACHA_BLOCK_SIZE) {
chacha20_block_xor_ssse3(state, dst, src);
bytes -= CHACHA20_BLOCK_SIZE;
src += CHACHA20_BLOCK_SIZE;
dst += CHACHA20_BLOCK_SIZE;
bytes -= CHACHA_BLOCK_SIZE;
src += CHACHA_BLOCK_SIZE;
dst += CHACHA_BLOCK_SIZE;
state[12]++;
}
if (bytes) {
@@ -70,24 +70,24 @@ static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
struct blkcipher_walk walk;
int err;
if (nbytes <= CHACHA20_BLOCK_SIZE || !may_use_simd())
return crypto_chacha20_crypt(desc, dst, src, nbytes);
if (nbytes <= CHACHA_BLOCK_SIZE || !may_use_simd())
return crypto_chacha_crypt(desc, dst, src, nbytes);
state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
err = blkcipher_walk_virt_block(desc, &walk, CHACHA_BLOCK_SIZE);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
crypto_chacha_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
kernel_fpu_begin();
while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
while (walk.nbytes >= CHACHA_BLOCK_SIZE) {
chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
rounddown(walk.nbytes, CHACHA_BLOCK_SIZE));
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % CHACHA20_BLOCK_SIZE);
walk.nbytes % CHACHA_BLOCK_SIZE);
}
if (walk.nbytes) {
@@ -108,14 +108,14 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct chacha20_ctx),
.cra_ctxsize = sizeof(struct chacha_ctx),
.cra_alignmask = sizeof(u32) - 1,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CHACHA20_KEY_SIZE,
.max_keysize = CHACHA20_KEY_SIZE,
.ivsize = CHACHA20_IV_SIZE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.geniv = "seqiv",
.setkey = crypto_chacha20_setkey,
.encrypt = chacha20_simd,

View File

@@ -83,35 +83,37 @@ static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx,
if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) {
if (unlikely(!sctx->wset)) {
if (!sctx->uset) {
memcpy(sctx->u, dctx->r, sizeof(sctx->u));
poly1305_simd_mult(sctx->u, dctx->r);
memcpy(sctx->u, dctx->r.r, sizeof(sctx->u));
poly1305_simd_mult(sctx->u, dctx->r.r);
sctx->uset = true;
}
memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u));
poly1305_simd_mult(sctx->u + 5, dctx->r);
poly1305_simd_mult(sctx->u + 5, dctx->r.r);
memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u));
poly1305_simd_mult(sctx->u + 10, dctx->r);
poly1305_simd_mult(sctx->u + 10, dctx->r.r);
sctx->wset = true;
}
blocks = srclen / (POLY1305_BLOCK_SIZE * 4);
poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u);
poly1305_4block_avx2(dctx->h.h, src, dctx->r.r, blocks,
sctx->u);
src += POLY1305_BLOCK_SIZE * 4 * blocks;
srclen -= POLY1305_BLOCK_SIZE * 4 * blocks;
}
#endif
if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) {
if (unlikely(!sctx->uset)) {
memcpy(sctx->u, dctx->r, sizeof(sctx->u));
poly1305_simd_mult(sctx->u, dctx->r);
memcpy(sctx->u, dctx->r.r, sizeof(sctx->u));
poly1305_simd_mult(sctx->u, dctx->r.r);
sctx->uset = true;
}
blocks = srclen / (POLY1305_BLOCK_SIZE * 2);
poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u);
poly1305_2block_sse2(dctx->h.h, src, dctx->r.r, blocks,
sctx->u);
src += POLY1305_BLOCK_SIZE * 2 * blocks;
srclen -= POLY1305_BLOCK_SIZE * 2 * blocks;
}
if (srclen >= POLY1305_BLOCK_SIZE) {
poly1305_block_sse2(dctx->h, src, dctx->r, 1);
poly1305_block_sse2(dctx->h.h, src, dctx->r.r, 1);
srclen -= POLY1305_BLOCK_SIZE;
}
return srclen;

View File

@@ -22,6 +22,7 @@
#include <linux/user-return-notifier.h>
#include <linux/nospec.h>
#include <linux/uprobes.h>
#include <linux/syscalls.h>
#include <asm/desc.h>
#include <asm/traps.h>
@@ -180,6 +181,8 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
struct thread_info *ti = current_thread_info();
u32 cached_flags;
addr_limit_user_check();
if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
local_irq_disable();

View File

@@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) \
$(filter --target=% --gcc-toolchain=%,$(KBUILD_CFLAGS))
GCOV_PROFILE := n
#

View File

@@ -101,6 +101,7 @@ struct thread_info {
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
#define TIF_X32 30 /* 32-bit native x86-64 binary */
#define TIF_FSCHECK 31 /* Check FS is USER_DS on return */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -124,6 +125,7 @@ struct thread_info {
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_ADDR32 (1 << TIF_ADDR32)
#define _TIF_X32 (1 << TIF_X32)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
/*
* work to do in syscall_trace_enter(). Also includes TIF_NOHZ for
@@ -137,7 +139,7 @@ struct thread_info {
/* work to do on any return to user space */
#define _TIF_ALLWORK_MASK \
((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
_TIF_NOHZ)
_TIF_NOHZ | _TIF_FSCHECK)
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \

View File

@@ -8,6 +8,7 @@
#include <linux/kasan-checks.h>
#include <linux/thread_info.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/smap.h>
@@ -31,7 +32,12 @@
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.addr_limit)
#define set_fs(x) (current->thread.addr_limit = (x))
static inline void set_fs(mm_segment_t fs)
{
current->thread.addr_limit = fs;
/* On user-mode return, check fs is correct */
set_thread_flag(TIF_FSCHECK);
}
#define segment_eq(a, b) ((a).seg == (b).seg)

View File

@@ -0,0 +1,16 @@
ARCH=arm64
BRANCH=android-4.9
CLANG_TRIPLE=aarch64-linux-gnu-
CROSS_COMPILE=aarch64-linux-androidkernel-
DEFCONFIG=cuttlefish_defconfig
EXTRA_CMDS=''
KERNEL_DIR=common
POST_DEFCONFIG_CMDS="check_defconfig"
CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r346389b/bin
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
FILES="
arch/arm64/boot/Image.gz
vmlinux
System.map
"
STOP_SHIP_TRACEPRINTK=1

View File

@@ -6,10 +6,11 @@ DEFCONFIG=x86_64_cuttlefish_defconfig
EXTRA_CMDS=''
KERNEL_DIR=common
POST_DEFCONFIG_CMDS="check_defconfig"
CLANG_PREBUILT_BIN=prebuilts/clang/host/linux-x86/clang-4630689/bin
CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r346389b/bin
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
FILES="
arch/x86/boot/bzImage
vmlinux
System.map
"
STOP_SHIP_TRACEPRINTK=1

View File

@@ -10,3 +10,4 @@ arch/arm/boot/zImage
vmlinux
System.map
"
STOP_SHIP_TRACEPRINTK=1

View File

@@ -10,3 +10,4 @@ arch/arm64/boot/Image
vmlinux
System.map
"
STOP_SHIP_TRACEPRINTK=1

View File

@@ -9,3 +9,4 @@ FILES="
vmlinux
System.map
"
STOP_SHIP_TRACEPRINTK=1

View File

@@ -9,3 +9,4 @@ FILES="
vmlinux
System.map
"
STOP_SHIP_TRACEPRINTK=1

View File

@@ -10,3 +10,4 @@ arch/x86/boot/bzImage
vmlinux
System.map
"
STOP_SHIP_TRACEPRINTK=1

View File

@@ -10,3 +10,4 @@ arch/x86/boot/bzImage
vmlinux
System.map
"
STOP_SHIP_TRACEPRINTK=1

View File

@@ -241,5 +241,46 @@ error:
return ret;
}
EXPORT_SYMBOL_GPL(verify_pkcs7_signature);
#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
/**
* verify_signature_one - Verify a signature with keys from given keyring
* @sig: The signature to be verified
* @trusted_keys: Trusted keys to use (NULL for builtin trusted keys only,
* (void *)1UL for all trusted keys).
* @keyid: key description (not partial)
*/
int verify_signature_one(const struct public_key_signature *sig,
struct key *trusted_keys, const char *keyid)
{
key_ref_t ref;
struct key *key;
int ret;
if (!sig)
return -EBADMSG;
if (!trusted_keys) {
trusted_keys = builtin_trusted_keys;
} else if (trusted_keys == (void *)1UL) {
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
trusted_keys = secondary_trusted_keys;
#else
trusted_keys = builtin_trusted_keys;
#endif
}
ref = keyring_search(make_key_ref(trusted_keys, 1),
&key_type_asymmetric, keyid);
if (IS_ERR(ref)) {
pr_err("Asymmetric key (%s) not found in keyring(%s)\n",
keyid, trusted_keys->description);
return -ENOKEY;
}
key = key_ref_to_ptr(ref);
ret = verify_signature(key, sig);
key_put(key);
return ret;
}
EXPORT_SYMBOL_GPL(verify_signature_one);

View File

@@ -382,6 +382,34 @@ config CRYPTO_KEYWRAP
Support for key wrapping (NIST SP800-38F / RFC3394) without
padding.
config CRYPTO_NHPOLY1305
tristate
select CRYPTO_HASH
select CRYPTO_POLY1305
config CRYPTO_ADIANTUM
tristate "Adiantum support"
select CRYPTO_CHACHA20
select CRYPTO_POLY1305
select CRYPTO_NHPOLY1305
help
Adiantum is a tweakable, length-preserving encryption mode
designed for fast and secure disk encryption, especially on
CPUs without dedicated crypto instructions. It encrypts
each sector using the XChaCha12 stream cipher, two passes of
an ε-almost-∆-universal hash function, and an invocation of
the AES-256 block cipher on a single 16-byte block. On CPUs
without AES instructions, Adiantum is much faster than
AES-XTS.
Adiantum's security is provably reducible to that of its
underlying stream and block ciphers, subject to a security
bound. Unlike XTS, Adiantum is a true wide-block encryption
mode, so it actually provides an even stronger notion of
security than XTS, subject to the security bound.
If unsure, say N.
comment "Hash modes"
config CRYPTO_CMAC
@@ -1311,18 +1339,26 @@ config CRYPTO_SALSA20_X86_64
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
config CRYPTO_CHACHA20
tristate "ChaCha20 cipher algorithm"
tristate "ChaCha stream cipher algorithms"
select CRYPTO_BLKCIPHER
help
ChaCha20 cipher algorithm, RFC7539.
The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms.
ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
Bernstein and further specified in RFC7539 for use in IETF protocols.
This is the portable C implementation of ChaCha20.
See also:
This is the portable C implementation of ChaCha20. See also:
<http://cr.yp.to/chacha/chacha-20080128.pdf>
XChaCha20 is the application of the XSalsa20 construction to ChaCha20
rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length
from 64 bits (or 96 bits using the RFC7539 convention) to 192 bits,
while provably retaining ChaCha20's security. See also:
<https://cr.yp.to/snuffle/xsalsa-20081128.pdf>
XChaCha12 is XChaCha20 reduced to 12 rounds, with correspondingly
reduced security margin but increased performance. It can be needed
in some performance-sensitive scenarios.
config CRYPTO_CHACHA20_X86_64
tristate "ChaCha20 cipher algorithm (x86_64/SSSE3/AVX2)"
depends on X86 && 64BIT
@@ -1454,20 +1490,6 @@ config CRYPTO_SERPENT_AVX2_X86_64
See also:
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SPECK
tristate "Speck cipher algorithm"
select CRYPTO_ALGAPI
help
Speck is a lightweight block cipher that is tuned for optimal
performance in software (rather than hardware).
Speck may not be as secure as AES, and should only be used on systems
where AES is not fast enough.
See also: <https://eprint.iacr.org/2013/404.pdf>
If unsure, say N.
config CRYPTO_TEA
tristate "TEA, XTEA and XETA cipher algorithms"
select CRYPTO_ALGAPI
@@ -1634,6 +1656,15 @@ config CRYPTO_LZ4HC
help
This is the LZ4 high compression mode algorithm.
config CRYPTO_ZSTD
tristate "Zstd compression algorithm"
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
select ZSTD_COMPRESS
select ZSTD_DECOMPRESS
help
This is the zstd algorithm.
comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG

View File

@@ -85,6 +85,8 @@ obj-$(CONFIG_CRYPTO_LRW) += lrw.o
obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o
obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o
obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
obj-$(CONFIG_CRYPTO_CCM) += ccm.o
obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o
@@ -109,9 +111,8 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_SPECK) += speck.o
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
@@ -136,6 +137,7 @@ obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
#
# generic algorithms and the async_tx api

658
crypto/adiantum.c Normal file
View File

@@ -0,0 +1,658 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Adiantum length-preserving encryption mode
*
* Copyright 2018 Google LLC
*/
/*
* Adiantum is a tweakable, length-preserving encryption mode designed for fast
* and secure disk encryption, especially on CPUs without dedicated crypto
* instructions. Adiantum encrypts each sector using the XChaCha12 stream
* cipher, two passes of an ε-almost--universal (εAU) hash function based on
* NH and Poly1305, and an invocation of the AES-256 block cipher on a single
* 16-byte block. See the paper for details:
*
* Adiantum: length-preserving encryption for entry-level processors
* (https://eprint.iacr.org/2018/720.pdf)
*
* For flexibility, this implementation also allows other ciphers:
*
* - Stream cipher: XChaCha12 or XChaCha20
* - Block cipher: any with a 128-bit block size and 256-bit key
*
* This implementation doesn't currently allow other εAU hash functions, i.e.
* HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC
* but still provably as secure, and also the εAU hash function of HBSH is
* formally defined to take two inputs (tweak, message) which makes it difficult
* to wrap with the crypto_shash API. Rather, some details need to be handled
* here. Nevertheless, if needed in the future, support for other εAU hash
* functions could be added here.
*/
#include <crypto/b128ops.h>
#include <crypto/chacha.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/nhpoly1305.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include "internal.h"
/*
* Size of right-hand block of input data, in bytes; also the size of the block
* cipher's block size and the hash function's output.
*/
#define BLOCKCIPHER_BLOCK_SIZE 16
/* Size of the block cipher key (K_E) in bytes */
#define BLOCKCIPHER_KEY_SIZE 32
/* Size of the hash key (K_H) in bytes */
#define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
/*
* The specification allows variable-length tweaks, but Linux's crypto API
* currently only allows algorithms to support a single length. The "natural"
* tweak length for Adiantum is 16, since that fits into one Poly1305 block for
* the best performance. But longer tweaks are useful for fscrypt, to avoid
* needing to derive per-file keys. So instead we use two blocks, or 32 bytes.
*/
#define TWEAK_SIZE 32
struct adiantum_instance_ctx {
struct crypto_skcipher_spawn streamcipher_spawn;
struct crypto_spawn blockcipher_spawn;
struct crypto_shash_spawn hash_spawn;
};
struct adiantum_tfm_ctx {
struct crypto_skcipher *streamcipher;
struct crypto_cipher *blockcipher;
struct crypto_shash *hash;
struct poly1305_key header_hash_key;
};
struct adiantum_request_ctx {
/*
* Buffer for right-hand block of data, i.e.
*
* P_L => P_M => C_M => C_R when encrypting, or
* C_R => C_M => P_M => P_L when decrypting.
*
* Also used to build the IV for the stream cipher.
*/
union {
u8 bytes[XCHACHA_IV_SIZE];
__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
le128 bignum; /* interpret as element of Z/(2^{128}Z) */
} rbuf;
bool enc; /* true if encrypting, false if decrypting */
/*
* The result of the Poly1305 εAU hash function applied to
* (message length, tweak).
*/
le128 header_hash;
/* Sub-requests, must be last */
union {
struct shash_desc hash_desc;
struct skcipher_request streamcipher_req;
} u;
};
/*
* Given the XChaCha stream key K_S, derive the block cipher key K_E and the
* hash key K_H as follows:
*
* K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191)
*
* Note that this denotes using bits from the XChaCha keystream, which here we
* get indirectly by encrypting a buffer containing all 0's.
*/
static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct {
u8 iv[XCHACHA_IV_SIZE];
u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE];
struct scatterlist sg;
struct crypto_wait wait;
struct skcipher_request req; /* must be last */
} *data;
u8 *keyp;
int err;
/* Set the stream cipher key (K_S) */
crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(tctx->streamcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
crypto_skcipher_set_flags(tfm,
crypto_skcipher_get_flags(tctx->streamcipher) &
CRYPTO_TFM_RES_MASK);
if (err)
return err;
/* Derive the subkeys */
data = kzalloc(sizeof(*data) +
crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->iv[0] = 1;
sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys));
crypto_init_wait(&data->wait);
skcipher_request_set_tfm(&data->req, tctx->streamcipher);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &data->wait);
skcipher_request_set_crypt(&data->req, &data->sg, &data->sg,
sizeof(data->derived_keys), data->iv);
err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait);
if (err)
goto out;
keyp = data->derived_keys;
/* Set the block cipher key (K_E) */
crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(tctx->blockcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(tctx->blockcipher, keyp,
BLOCKCIPHER_KEY_SIZE);
crypto_skcipher_set_flags(tfm,
crypto_cipher_get_flags(tctx->blockcipher) &
CRYPTO_TFM_RES_MASK);
if (err)
goto out;
keyp += BLOCKCIPHER_KEY_SIZE;
/* Set the hash key (K_H) */
poly1305_core_setkey(&tctx->header_hash_key, keyp);
keyp += POLY1305_BLOCK_SIZE;
crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
crypto_skcipher_set_flags(tfm, crypto_shash_get_flags(tctx->hash) &
CRYPTO_TFM_RES_MASK);
keyp += NHPOLY1305_KEY_SIZE;
WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
out:
kzfree(data);
return err;
}
/* Addition in Z/(2^{128}Z) */
static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2)
{
u64 x = le64_to_cpu(v1->b);
u64 y = le64_to_cpu(v2->b);
r->b = cpu_to_le64(x + y);
r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) +
(x + y < x));
}
/* Subtraction in Z/(2^{128}Z) */
static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
{
u64 x = le64_to_cpu(v1->b);
u64 y = le64_to_cpu(v2->b);
r->b = cpu_to_le64(x - y);
r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) -
(x - y > x));
}
/*
* Apply the Poly1305 εAU hash function to (message length, tweak) and save the
* result to rctx->header_hash.
*
* This value is reused in both the first and second hash steps. Specifically,
* it's added to the result of an independently keyed εAU hash function (for
* equal length inputs only) taken over the message. This gives the overall
* Adiantum hash of the (tweak, message) pair.
*/
static void adiantum_hash_header(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct {
__le64 message_bits;
__le64 padding;
} header = {
.message_bits = cpu_to_le64((u64)bulk_len * 8)
};
struct poly1305_state state;
poly1305_core_init(&state);
BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0);
poly1305_core_blocks(&state, &tctx->header_hash_key,
&header, sizeof(header) / POLY1305_BLOCK_SIZE);
BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
TWEAK_SIZE / POLY1305_BLOCK_SIZE);
poly1305_core_emit(&state, &rctx->header_hash);
}
/* Hash the left-hand block (the "bulk") of the message using NHPoly1305 */
static int adiantum_hash_message(struct skcipher_request *req,
struct scatterlist *sgl, le128 *digest)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct shash_desc *hash_desc = &rctx->u.hash_desc;
struct sg_mapping_iter miter;
unsigned int i, n;
int err;
hash_desc->tfm = tctx->hash;
hash_desc->flags = 0;
err = crypto_shash_init(hash_desc);
if (err)
return err;
sg_miter_start(&miter, sgl, sg_nents(sgl),
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
for (i = 0; i < bulk_len; i += n) {
sg_miter_next(&miter);
n = min_t(unsigned int, miter.length, bulk_len - i);
err = crypto_shash_update(hash_desc, miter.addr, n);
if (err)
break;
}
sg_miter_stop(&miter);
if (err)
return err;
return crypto_shash_final(hash_desc, (u8 *)digest);
}
/* Continue Adiantum encryption/decryption after the stream cipher step */
static int adiantum_finish(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
le128 digest;
int err;
/* If decrypting, decrypt C_M with the block cipher to get P_M */
if (!rctx->enc)
crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
rctx->rbuf.bytes);
/*
* Second hash step
* enc: C_R = C_M - H_{K_H}(T, C_L)
* dec: P_R = P_M - H_{K_H}(T, P_L)
*/
err = adiantum_hash_message(req, req->dst, &digest);
if (err)
return err;
le128_add(&digest, &digest, &rctx->header_hash);
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1);
return 0;
}
static void adiantum_streamcipher_done(struct crypto_async_request *areq,
int err)
{
struct skcipher_request *req = areq->data;
if (!err)
err = adiantum_finish(req);
skcipher_request_complete(req, err);
}
static int adiantum_crypt(struct skcipher_request *req, bool enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
unsigned int stream_len;
le128 digest;
int err;
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
return -EINVAL;
rctx->enc = enc;
/*
* First hash step
* enc: P_M = P_R + H_{K_H}(T, P_L)
* dec: C_M = C_R + H_{K_H}(T, C_L)
*/
adiantum_hash_header(req);
err = adiantum_hash_message(req, req->src, &digest);
if (err)
return err;
le128_add(&digest, &digest, &rctx->header_hash);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0);
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
/* If encrypting, encrypt P_M with the block cipher to get C_M */
if (enc)
crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
rctx->rbuf.bytes);
/* Initialize the rest of the XChaCha IV (first part is C_M) */
BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */
rctx->rbuf.words[4] = cpu_to_le32(1);
rctx->rbuf.words[5] = 0;
rctx->rbuf.words[6] = 0;
rctx->rbuf.words[7] = 0;
/*
* XChaCha needs to be done on all the data except the last 16 bytes;
* for disk encryption that usually means 4080 or 496 bytes. But ChaCha
* implementations tend to be most efficient when passed a whole number
* of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes.
* And here it doesn't matter whether the last 16 bytes are written to,
* as the second hash step will overwrite them. Thus, round the XChaCha
* length up to the next 64-byte boundary if possible.
*/
stream_len = bulk_len;
if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE);
skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
req->dst, stream_len, &rctx->rbuf);
skcipher_request_set_callback(&rctx->u.streamcipher_req,
req->base.flags,
adiantum_streamcipher_done, req);
return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
adiantum_finish(req);
}
static int adiantum_encrypt(struct skcipher_request *req)
{
return adiantum_crypt(req, true);
}
static int adiantum_decrypt(struct skcipher_request *req)
{
return adiantum_crypt(req, false);
}
static int adiantum_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *streamcipher;
struct crypto_cipher *blockcipher;
struct crypto_shash *hash;
unsigned int subreq_size;
int err;
streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
if (IS_ERR(streamcipher))
return PTR_ERR(streamcipher);
blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
if (IS_ERR(blockcipher)) {
err = PTR_ERR(blockcipher);
goto err_free_streamcipher;
}
hash = crypto_spawn_shash(&ictx->hash_spawn);
if (IS_ERR(hash)) {
err = PTR_ERR(hash);
goto err_free_blockcipher;
}
tctx->streamcipher = streamcipher;
tctx->blockcipher = blockcipher;
tctx->hash = hash;
BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
sizeof(struct adiantum_request_ctx));
subreq_size = max(FIELD_SIZEOF(struct adiantum_request_ctx,
u.hash_desc) +
crypto_shash_descsize(hash),
FIELD_SIZEOF(struct adiantum_request_ctx,
u.streamcipher_req) +
crypto_skcipher_reqsize(streamcipher));
crypto_skcipher_set_reqsize(tfm,
offsetof(struct adiantum_request_ctx, u) +
subreq_size);
return 0;
err_free_blockcipher:
crypto_free_cipher(blockcipher);
err_free_streamcipher:
crypto_free_skcipher(streamcipher);
return err;
}
static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
{
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(tctx->streamcipher);
crypto_free_cipher(tctx->blockcipher);
crypto_free_shash(tctx->hash);
}
static void adiantum_free_instance(struct skcipher_instance *inst)
{
struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
crypto_drop_skcipher(&ictx->streamcipher_spawn);
crypto_drop_spawn(&ictx->blockcipher_spawn);
crypto_drop_shash(&ictx->hash_spawn);
kfree(inst);
}
/*
* Check for a supported set of inner algorithms.
* See the comment at the beginning of this file.
*/
static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
struct crypto_alg *blockcipher_alg,
struct shash_alg *hash_alg)
{
if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
return false;
if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE ||
blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE)
return false;
if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
return false;
if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
return false;
return true;
}
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
const char *streamcipher_name;
const char *blockcipher_name;
const char *nhpoly1305_name;
struct skcipher_instance *inst;
struct adiantum_instance_ctx *ictx;
struct skcipher_alg *streamcipher_alg;
struct crypto_alg *blockcipher_alg;
struct crypto_alg *_hash_alg;
struct shash_alg *hash_alg;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
return -EINVAL;
streamcipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(streamcipher_name))
return PTR_ERR(streamcipher_name);
blockcipher_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(blockcipher_name))
return PTR_ERR(blockcipher_name);
nhpoly1305_name = crypto_attr_alg_name(tb[3]);
if (nhpoly1305_name == ERR_PTR(-ENOENT))
nhpoly1305_name = "nhpoly1305";
if (IS_ERR(nhpoly1305_name))
return PTR_ERR(nhpoly1305_name);
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ictx = skcipher_instance_ctx(inst);
/* Stream cipher, e.g. "xchacha12" */
err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
0, crypto_requires_sync(algt->type,
algt->mask));
if (err)
goto out_free_inst;
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */
err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
if (err)
goto out_drop_streamcipher;
blockcipher_alg = ictx->blockcipher_spawn.alg;
/* NHPoly1305 εA∆U hash function */
_hash_alg = crypto_alg_mod_lookup(nhpoly1305_name,
CRYPTO_ALG_TYPE_SHASH,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(_hash_alg)) {
err = PTR_ERR(_hash_alg);
goto out_drop_blockcipher;
}
hash_alg = __crypto_shash_alg(_hash_alg);
err = crypto_init_shash_spawn(&ictx->hash_spawn, hash_alg,
skcipher_crypto_instance(inst));
if (err) {
crypto_mod_put(_hash_alg);
goto out_drop_blockcipher;
}
/* Check the set of algorithms */
if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
hash_alg)) {
pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
streamcipher_alg->base.cra_name,
blockcipher_alg->cra_name, hash_alg->base.cra_name);
err = -EINVAL;
goto out_drop_hash;
}
/* Instance fields */
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"adiantum(%s,%s)", streamcipher_alg->base.cra_name,
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto out_drop_hash;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"adiantum(%s,%s,%s)",
streamcipher_alg->base.cra_driver_name,
blockcipher_alg->cra_driver_name,
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_drop_hash;
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
hash_alg->base.cra_alignmask;
/*
* The block cipher is only invoked once per message, so for long
* messages (e.g. sectors for disk encryption) its performance doesn't
* matter as much as that of the stream cipher and hash function. Thus,
* weigh the block cipher's ->cra_priority less.
*/
inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
2 * hash_alg->base.cra_priority +
blockcipher_alg->cra_priority) / 7;
inst->alg.setkey = adiantum_setkey;
inst->alg.encrypt = adiantum_encrypt;
inst->alg.decrypt = adiantum_decrypt;
inst->alg.init = adiantum_init_tfm;
inst->alg.exit = adiantum_exit_tfm;
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg);
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg);
inst->alg.ivsize = TWEAK_SIZE;
inst->free = adiantum_free_instance;
err = skcipher_register_instance(tmpl, inst);
if (err)
goto out_drop_hash;
return 0;
out_drop_hash:
crypto_drop_shash(&ictx->hash_spawn);
out_drop_blockcipher:
crypto_drop_spawn(&ictx->blockcipher_spawn);
out_drop_streamcipher:
crypto_drop_skcipher(&ictx->streamcipher_spawn);
out_free_inst:
kfree(inst);
return err;
}
/* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */
static struct crypto_template adiantum_tmpl = {
.name = "adiantum",
.create = adiantum_create,
.module = THIS_MODULE,
};
static int __init adiantum_module_init(void)
{
return crypto_register_template(&adiantum_tmpl);
}
static void __exit adiantum_module_exit(void)
{
crypto_unregister_template(&adiantum_tmpl);
}
module_init(adiantum_module_init);
module_exit(adiantum_module_exit);
MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("adiantum");

View File

@@ -62,7 +62,8 @@ static inline u8 byte(const u32 x, const unsigned n)
static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
__visible const u32 crypto_ft_tab[4][256] = {
/* cacheline-aligned to facilitate prefetching into cache */
__visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
{
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
@@ -326,7 +327,7 @@ __visible const u32 crypto_ft_tab[4][256] = {
}
};
__visible const u32 crypto_fl_tab[4][256] = {
__visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
{
0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
@@ -590,7 +591,7 @@ __visible const u32 crypto_fl_tab[4][256] = {
}
};
__visible const u32 crypto_it_tab[4][256] = {
__visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
{
0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
@@ -854,7 +855,7 @@ __visible const u32 crypto_it_tab[4][256] = {
}
};
__visible const u32 crypto_il_tab[4][256] = {
__visible const u32 crypto_il_tab[4][256] __cacheline_aligned = {
{
0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
0x00000030, 0x00000036, 0x000000a5, 0x00000038,

View File

@@ -1,151 +0,0 @@
/*
* ChaCha20 256-bit cipher algorithm, RFC7539
*
* Copyright (C) 2015 Martin Willi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <crypto/algapi.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <crypto/chacha20.h>
static inline u32 le32_to_cpuvp(const void *p)
{
return le32_to_cpup(p);
}
static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes)
{
u8 stream[CHACHA20_BLOCK_SIZE];
if (dst != src)
memcpy(dst, src, bytes);
while (bytes >= CHACHA20_BLOCK_SIZE) {
chacha20_block(state, stream);
crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
bytes -= CHACHA20_BLOCK_SIZE;
dst += CHACHA20_BLOCK_SIZE;
}
if (bytes) {
chacha20_block(state, stream);
crypto_xor(dst, stream, bytes);
}
}
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
{
static const char constant[16] = "expand 32-byte k";
state[0] = le32_to_cpuvp(constant + 0);
state[1] = le32_to_cpuvp(constant + 4);
state[2] = le32_to_cpuvp(constant + 8);
state[3] = le32_to_cpuvp(constant + 12);
state[4] = ctx->key[0];
state[5] = ctx->key[1];
state[6] = ctx->key[2];
state[7] = ctx->key[3];
state[8] = ctx->key[4];
state[9] = ctx->key[5];
state[10] = ctx->key[6];
state[11] = ctx->key[7];
state[12] = le32_to_cpuvp(iv + 0);
state[13] = le32_to_cpuvp(iv + 4);
state[14] = le32_to_cpuvp(iv + 8);
state[15] = le32_to_cpuvp(iv + 12);
}
EXPORT_SYMBOL_GPL(crypto_chacha20_init);
int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keysize)
{
struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm);
int i;
if (keysize != CHACHA20_KEY_SIZE)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32));
return 0;
}
EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
u32 state[16];
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % CHACHA20_BLOCK_SIZE);
}
if (walk.nbytes) {
chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
EXPORT_SYMBOL_GPL(crypto_chacha20_crypt);
static struct crypto_alg alg = {
.cra_name = "chacha20",
.cra_driver_name = "chacha20-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct chacha20_ctx),
.cra_alignmask = sizeof(u32) - 1,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CHACHA20_KEY_SIZE,
.max_keysize = CHACHA20_KEY_SIZE,
.ivsize = CHACHA20_IV_SIZE,
.geniv = "seqiv",
.setkey = crypto_chacha20_setkey,
.encrypt = crypto_chacha20_crypt,
.decrypt = crypto_chacha20_crypt,
},
},
};
static int __init chacha20_generic_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit chacha20_generic_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(chacha20_generic_mod_init);
module_exit(chacha20_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
MODULE_DESCRIPTION("chacha20 cipher algorithm");
MODULE_ALIAS_CRYPTO("chacha20");
MODULE_ALIAS_CRYPTO("chacha20-generic");

View File

@@ -13,7 +13,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/chacha20.h>
#include <crypto/chacha.h>
#include <crypto/poly1305.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -51,7 +51,7 @@ struct poly_req {
};
struct chacha_req {
u8 iv[CHACHA20_IV_SIZE];
u8 iv[CHACHA_IV_SIZE];
struct scatterlist src[1];
struct skcipher_request req; /* must be last member */
};
@@ -91,7 +91,7 @@ static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb)
memcpy(iv, &leicb, sizeof(leicb));
memcpy(iv + sizeof(leicb), ctx->salt, ctx->saltlen);
memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv,
CHACHA20_IV_SIZE - sizeof(leicb) - ctx->saltlen);
CHACHA_IV_SIZE - sizeof(leicb) - ctx->saltlen);
}
static int poly_verify_tag(struct aead_request *req)
@@ -494,7 +494,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
struct chachapoly_ctx *ctx = crypto_aead_ctx(aead);
int err;
if (keylen != ctx->saltlen + CHACHA20_KEY_SIZE)
if (keylen != ctx->saltlen + CHACHA_KEY_SIZE)
return -EINVAL;
keylen -= ctx->saltlen;
@@ -639,7 +639,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -EINVAL;
/* Need 16-byte IV size, including Initial Block Counter value */
if (crypto_skcipher_alg_ivsize(chacha) != CHACHA20_IV_SIZE)
if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE)
goto out_drop_chacha;
/* Not a stream cipher? */
if (chacha->base.cra_blocksize != 1)

243
crypto/chacha_generic.c Normal file
View File

@@ -0,0 +1,243 @@
/*
* ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539)
*
* Copyright (C) 2015 Martin Willi
* Copyright (C) 2018 Google LLC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <crypto/chacha.h>
static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
/* aligned to potentially speed up crypto_xor() */
u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
if (dst != src)
memcpy(dst, src, bytes);
while (bytes >= CHACHA_BLOCK_SIZE) {
chacha_block(state, stream, nrounds);
crypto_xor(dst, stream, CHACHA_BLOCK_SIZE);
bytes -= CHACHA_BLOCK_SIZE;
dst += CHACHA_BLOCK_SIZE;
}
if (bytes) {
chacha_block(state, stream, nrounds);
crypto_xor(dst, stream, bytes);
}
}
static int chacha_stream_xor(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes,
struct chacha_ctx *ctx, u8 *iv)
{
struct blkcipher_walk walk;
u32 state[16];
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, CHACHA_BLOCK_SIZE);
crypto_chacha_init(state, ctx, iv);
while (walk.nbytes >= CHACHA_BLOCK_SIZE) {
chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
rounddown(walk.nbytes, CHACHA_BLOCK_SIZE),
ctx->nrounds);
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % CHACHA_BLOCK_SIZE);
}
if (walk.nbytes) {
chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, ctx->nrounds);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv)
{
state[0] = 0x61707865; /* "expa" */
state[1] = 0x3320646e; /* "nd 3" */
state[2] = 0x79622d32; /* "2-by" */
state[3] = 0x6b206574; /* "te k" */
state[4] = ctx->key[0];
state[5] = ctx->key[1];
state[6] = ctx->key[2];
state[7] = ctx->key[3];
state[8] = ctx->key[4];
state[9] = ctx->key[5];
state[10] = ctx->key[6];
state[11] = ctx->key[7];
state[12] = get_unaligned_le32(iv + 0);
state[13] = get_unaligned_le32(iv + 4);
state[14] = get_unaligned_le32(iv + 8);
state[15] = get_unaligned_le32(iv + 12);
}
EXPORT_SYMBOL_GPL(crypto_chacha_init);
static int chacha_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keysize, int nrounds)
{
struct chacha_ctx *ctx = crypto_tfm_ctx(tfm);
int i;
if (keysize != CHACHA_KEY_SIZE)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
ctx->nrounds = nrounds;
return 0;
}
int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keysize)
{
return chacha_setkey(tfm, key, keysize, 20);
}
EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
int crypto_chacha12_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keysize)
{
return chacha_setkey(tfm, key, keysize, 12);
}
EXPORT_SYMBOL_GPL(crypto_chacha12_setkey);
int crypto_chacha_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct chacha_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *iv = desc->info;
return chacha_stream_xor(desc, dst, src, nbytes, ctx, iv);
}
EXPORT_SYMBOL_GPL(crypto_chacha_crypt);
int crypto_xchacha_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct chacha_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *iv = desc->info;
struct chacha_ctx subctx;
u32 state[16];
u8 real_iv[16];
/* Compute the subkey given the original key and first 128 nonce bits */
crypto_chacha_init(state, ctx, iv);
hchacha_block(state, subctx.key, ctx->nrounds);
subctx.nrounds = ctx->nrounds;
/* Build the real IV */
memcpy(&real_iv[0], iv + 24, 8); /* stream position */
memcpy(&real_iv[8], iv + 16, 8); /* remaining 64 nonce bits */
/* Generate the stream and XOR it with the data */
return chacha_stream_xor(desc, dst, src, nbytes, &subctx, real_iv);
}
EXPORT_SYMBOL_GPL(crypto_xchacha_crypt);
static struct crypto_alg algs[] = {
{
.cra_name = "chacha20",
.cra_driver_name = "chacha20-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct chacha_ctx),
.cra_alignmask = sizeof(u32) - 1,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.geniv = "seqiv",
.setkey = crypto_chacha20_setkey,
.encrypt = crypto_chacha_crypt,
.decrypt = crypto_chacha_crypt,
},
},
}, {
.cra_name = "xchacha20",
.cra_driver_name = "xchacha20-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct chacha_ctx),
.cra_alignmask = sizeof(u32) - 1,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.geniv = "seqiv",
.setkey = crypto_chacha20_setkey,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
},
},
}, {
.cra_name = "xchacha12",
.cra_driver_name = "xchacha12-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = sizeof(struct chacha_ctx),
.cra_alignmask = sizeof(u32) - 1,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.geniv = "seqiv",
.setkey = crypto_chacha12_setkey,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
},
},
},
};
static int __init chacha_generic_mod_init(void)
{
return crypto_register_algs(algs, ARRAY_SIZE(algs));
}
static void __exit chacha_generic_mod_fini(void)
{
crypto_unregister_algs(algs, ARRAY_SIZE(algs));
}
module_init(chacha_generic_mod_init);
module_exit(chacha_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)");
MODULE_ALIAS_CRYPTO("chacha20");
MODULE_ALIAS_CRYPTO("chacha20-generic");
MODULE_ALIAS_CRYPTO("xchacha20");
MODULE_ALIAS_CRYPTO("xchacha20-generic");
MODULE_ALIAS_CRYPTO("xchacha12");
MODULE_ALIAS_CRYPTO("xchacha12-generic");

254
crypto/nhpoly1305.c Normal file
View File

@@ -0,0 +1,254 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost--universal hash function for Adiantum
*
* Copyright 2018 Google LLC
*/
/*
* "NHPoly1305" is the main component of Adiantum hashing.
* Specifically, it is the calculation
*
* H_M Poly1305_{K_M}(NH_{K_N}(pad_{128}(M)))
*
* from the procedure in section A.5 of the Adiantum paper [1]. It is an
* ε-almost--universal (εAU) hash function for equal-length inputs over
* Z/(2^{128}Z), where the "" operation is addition. It hashes 1024-byte
* chunks of the input with the NH hash function [2], reducing the input length
* by 32x. The resulting NH digests are evaluated as a polynomial in
* GF(2^{130}-5), like in the Poly1305 MAC [3]. Note that the polynomial
* evaluation by itself would suffice to achieve the εAU property; NH is used
* for performance since it's over twice as fast as Poly1305.
*
* This is *not* a cryptographic hash function; do not use it as such!
*
* [1] Adiantum: length-preserving encryption for entry-level processors
* (https://eprint.iacr.org/2018/720.pdf)
* [2] UMAC: Fast and Secure Message Authentication
* (https://fastcrypto.org/umac/umac_proc.pdf)
* [3] The Poly1305-AES message-authentication code
* (https://cr.yp.to/mac/poly1305-20050329.pdf)
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/nhpoly1305.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
static void nh_generic(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES])
{
u64 sums[4] = { 0, 0, 0, 0 };
BUILD_BUG_ON(NH_PAIR_STRIDE != 2);
BUILD_BUG_ON(NH_NUM_PASSES != 4);
while (message_len) {
u32 m0 = get_unaligned_le32(message + 0);
u32 m1 = get_unaligned_le32(message + 4);
u32 m2 = get_unaligned_le32(message + 8);
u32 m3 = get_unaligned_le32(message + 12);
sums[0] += (u64)(u32)(m0 + key[ 0]) * (u32)(m2 + key[ 2]);
sums[1] += (u64)(u32)(m0 + key[ 4]) * (u32)(m2 + key[ 6]);
sums[2] += (u64)(u32)(m0 + key[ 8]) * (u32)(m2 + key[10]);
sums[3] += (u64)(u32)(m0 + key[12]) * (u32)(m2 + key[14]);
sums[0] += (u64)(u32)(m1 + key[ 1]) * (u32)(m3 + key[ 3]);
sums[1] += (u64)(u32)(m1 + key[ 5]) * (u32)(m3 + key[ 7]);
sums[2] += (u64)(u32)(m1 + key[ 9]) * (u32)(m3 + key[11]);
sums[3] += (u64)(u32)(m1 + key[13]) * (u32)(m3 + key[15]);
key += NH_MESSAGE_UNIT / sizeof(key[0]);
message += NH_MESSAGE_UNIT;
message_len -= NH_MESSAGE_UNIT;
}
hash[0] = cpu_to_le64(sums[0]);
hash[1] = cpu_to_le64(sums[1]);
hash[2] = cpu_to_le64(sums[2]);
hash[3] = cpu_to_le64(sums[3]);
}
/* Pass the next NH hash value through Poly1305 */
static void process_nh_hash_value(struct nhpoly1305_state *state,
const struct nhpoly1305_key *key)
{
BUILD_BUG_ON(NH_HASH_BYTES % POLY1305_BLOCK_SIZE != 0);
poly1305_core_blocks(&state->poly_state, &key->poly_key, state->nh_hash,
NH_HASH_BYTES / POLY1305_BLOCK_SIZE);
}
/*
* Feed the next portion of the source data, as a whole number of 16-byte
* "NH message units", through NH and Poly1305. Each NH hash is taken over
* 1024 bytes, except possibly the final one which is taken over a multiple of
* 16 bytes up to 1024. Also, in the case where data is passed in misaligned
* chunks, we combine partial hashes; the end result is the same either way.
*/
static void nhpoly1305_units(struct nhpoly1305_state *state,
const struct nhpoly1305_key *key,
const u8 *src, unsigned int srclen, nh_t nh_fn)
{
do {
unsigned int bytes;
if (state->nh_remaining == 0) {
/* Starting a new NH message */
bytes = min_t(unsigned int, srclen, NH_MESSAGE_BYTES);
nh_fn(key->nh_key, src, bytes, state->nh_hash);
state->nh_remaining = NH_MESSAGE_BYTES - bytes;
} else {
/* Continuing a previous NH message */
__le64 tmp_hash[NH_NUM_PASSES];
unsigned int pos;
int i;
pos = NH_MESSAGE_BYTES - state->nh_remaining;
bytes = min(srclen, state->nh_remaining);
nh_fn(&key->nh_key[pos / 4], src, bytes, tmp_hash);
for (i = 0; i < NH_NUM_PASSES; i++)
le64_add_cpu(&state->nh_hash[i],
le64_to_cpu(tmp_hash[i]));
state->nh_remaining -= bytes;
}
if (state->nh_remaining == 0)
process_nh_hash_value(state, key);
src += bytes;
srclen -= bytes;
} while (srclen);
}
int crypto_nhpoly1305_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct nhpoly1305_key *ctx = crypto_shash_ctx(tfm);
int i;
if (keylen != NHPOLY1305_KEY_SIZE)
return -EINVAL;
poly1305_core_setkey(&ctx->poly_key, key);
key += POLY1305_BLOCK_SIZE;
for (i = 0; i < NH_KEY_WORDS; i++)
ctx->nh_key[i] = get_unaligned_le32(key + i * sizeof(u32));
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_setkey);
int crypto_nhpoly1305_init(struct shash_desc *desc)
{
struct nhpoly1305_state *state = shash_desc_ctx(desc);
poly1305_core_init(&state->poly_state);
state->buflen = 0;
state->nh_remaining = 0;
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_init);
int crypto_nhpoly1305_update_helper(struct shash_desc *desc,
const u8 *src, unsigned int srclen,
nh_t nh_fn)
{
struct nhpoly1305_state *state = shash_desc_ctx(desc);
const struct nhpoly1305_key *key = crypto_shash_ctx(desc->tfm);
unsigned int bytes;
if (state->buflen) {
bytes = min(srclen, (int)NH_MESSAGE_UNIT - state->buflen);
memcpy(&state->buffer[state->buflen], src, bytes);
state->buflen += bytes;
if (state->buflen < NH_MESSAGE_UNIT)
return 0;
nhpoly1305_units(state, key, state->buffer, NH_MESSAGE_UNIT,
nh_fn);
state->buflen = 0;
src += bytes;
srclen -= bytes;
}
if (srclen >= NH_MESSAGE_UNIT) {
bytes = round_down(srclen, NH_MESSAGE_UNIT);
nhpoly1305_units(state, key, src, bytes, nh_fn);
src += bytes;
srclen -= bytes;
}
if (srclen) {
memcpy(state->buffer, src, srclen);
state->buflen = srclen;
}
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_update_helper);
int crypto_nhpoly1305_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
return crypto_nhpoly1305_update_helper(desc, src, srclen, nh_generic);
}
EXPORT_SYMBOL(crypto_nhpoly1305_update);
int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, nh_t nh_fn)
{
struct nhpoly1305_state *state = shash_desc_ctx(desc);
const struct nhpoly1305_key *key = crypto_shash_ctx(desc->tfm);
if (state->buflen) {
memset(&state->buffer[state->buflen], 0,
NH_MESSAGE_UNIT - state->buflen);
nhpoly1305_units(state, key, state->buffer, NH_MESSAGE_UNIT,
nh_fn);
}
if (state->nh_remaining)
process_nh_hash_value(state, key);
poly1305_core_emit(&state->poly_state, dst);
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_final_helper);
int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst)
{
return crypto_nhpoly1305_final_helper(desc, dst, nh_generic);
}
EXPORT_SYMBOL(crypto_nhpoly1305_final);
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-generic",
.base.cra_priority = 100,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = crypto_nhpoly1305_update,
.final = crypto_nhpoly1305_final,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-generic");

View File

@@ -17,6 +17,7 @@
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/unaligned.h>
static inline u64 mlt(u64 a, u64 b)
{
@@ -33,16 +34,11 @@ static inline u32 and(u32 v, u32 mask)
return v & mask;
}
static inline u32 le32_to_cpuvp(const void *p)
{
return le32_to_cpup(p);
}
int crypto_poly1305_init(struct shash_desc *desc)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx->h, 0, sizeof(dctx->h));
poly1305_core_init(&dctx->h);
dctx->buflen = 0;
dctx->rset = false;
dctx->sset = false;
@@ -51,23 +47,16 @@ int crypto_poly1305_init(struct shash_desc *desc)
}
EXPORT_SYMBOL_GPL(crypto_poly1305_init);
static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key)
{
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
dctx->r[0] = (le32_to_cpuvp(key + 0) >> 0) & 0x3ffffff;
dctx->r[1] = (le32_to_cpuvp(key + 3) >> 2) & 0x3ffff03;
dctx->r[2] = (le32_to_cpuvp(key + 6) >> 4) & 0x3ffc0ff;
dctx->r[3] = (le32_to_cpuvp(key + 9) >> 6) & 0x3f03fff;
dctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff;
}
static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
{
dctx->s[0] = le32_to_cpuvp(key + 0);
dctx->s[1] = le32_to_cpuvp(key + 4);
dctx->s[2] = le32_to_cpuvp(key + 8);
dctx->s[3] = le32_to_cpuvp(key + 12);
key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff;
key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03;
key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff;
key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff;
key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff;
}
EXPORT_SYMBOL_GPL(poly1305_core_setkey);
/*
* Poly1305 requires a unique key for each tag, which implies that we can't set
@@ -79,13 +68,16 @@ unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
{
if (!dctx->sset) {
if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
poly1305_setrkey(dctx, src);
poly1305_core_setkey(&dctx->r, src);
src += POLY1305_BLOCK_SIZE;
srclen -= POLY1305_BLOCK_SIZE;
dctx->rset = true;
}
if (srclen >= POLY1305_BLOCK_SIZE) {
poly1305_setskey(dctx, src);
dctx->s[0] = get_unaligned_le32(src + 0);
dctx->s[1] = get_unaligned_le32(src + 4);
dctx->s[2] = get_unaligned_le32(src + 8);
dctx->s[3] = get_unaligned_le32(src + 12);
src += POLY1305_BLOCK_SIZE;
srclen -= POLY1305_BLOCK_SIZE;
dctx->sset = true;
@@ -95,47 +87,43 @@ unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
}
EXPORT_SYMBOL_GPL(crypto_poly1305_setdesckey);
static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen,
u32 hibit)
static void poly1305_blocks_internal(struct poly1305_state *state,
const struct poly1305_key *key,
const void *src, unsigned int nblocks,
u32 hibit)
{
u32 r0, r1, r2, r3, r4;
u32 s1, s2, s3, s4;
u32 h0, h1, h2, h3, h4;
u64 d0, d1, d2, d3, d4;
unsigned int datalen;
if (unlikely(!dctx->sset)) {
datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
src += srclen - datalen;
srclen = datalen;
}
if (!nblocks)
return;
r0 = dctx->r[0];
r1 = dctx->r[1];
r2 = dctx->r[2];
r3 = dctx->r[3];
r4 = dctx->r[4];
r0 = key->r[0];
r1 = key->r[1];
r2 = key->r[2];
r3 = key->r[3];
r4 = key->r[4];
s1 = r1 * 5;
s2 = r2 * 5;
s3 = r3 * 5;
s4 = r4 * 5;
h0 = dctx->h[0];
h1 = dctx->h[1];
h2 = dctx->h[2];
h3 = dctx->h[3];
h4 = dctx->h[4];
while (likely(srclen >= POLY1305_BLOCK_SIZE)) {
h0 = state->h[0];
h1 = state->h[1];
h2 = state->h[2];
h3 = state->h[3];
h4 = state->h[4];
do {
/* h += m[i] */
h0 += (le32_to_cpuvp(src + 0) >> 0) & 0x3ffffff;
h1 += (le32_to_cpuvp(src + 3) >> 2) & 0x3ffffff;
h2 += (le32_to_cpuvp(src + 6) >> 4) & 0x3ffffff;
h3 += (le32_to_cpuvp(src + 9) >> 6) & 0x3ffffff;
h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit;
h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff;
h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff;
h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff;
h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff;
h4 += (get_unaligned_le32(src + 12) >> 8) | hibit;
/* h *= r */
d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) +
@@ -158,16 +146,36 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
h1 += h0 >> 26; h0 = h0 & 0x3ffffff;
src += POLY1305_BLOCK_SIZE;
srclen -= POLY1305_BLOCK_SIZE;
} while (--nblocks);
state->h[0] = h0;
state->h[1] = h1;
state->h[2] = h2;
state->h[3] = h3;
state->h[4] = h4;
}
void poly1305_core_blocks(struct poly1305_state *state,
const struct poly1305_key *key,
const void *src, unsigned int nblocks)
{
poly1305_blocks_internal(state, key, src, nblocks, 1 << 24);
}
EXPORT_SYMBOL_GPL(poly1305_core_blocks);
static void poly1305_blocks(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen, u32 hibit)
{
unsigned int datalen;
if (unlikely(!dctx->sset)) {
datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
src += srclen - datalen;
srclen = datalen;
}
dctx->h[0] = h0;
dctx->h[1] = h1;
dctx->h[2] = h2;
dctx->h[3] = h3;
dctx->h[4] = h4;
return srclen;
poly1305_blocks_internal(&dctx->h, &dctx->r,
src, srclen / POLY1305_BLOCK_SIZE, hibit);
}
int crypto_poly1305_update(struct shash_desc *desc,
@@ -191,9 +199,9 @@ int crypto_poly1305_update(struct shash_desc *desc,
}
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
bytes = poly1305_blocks(dctx, src, srclen, 1 << 24);
src += srclen - bytes;
srclen = bytes;
poly1305_blocks(dctx, src, srclen, 1 << 24);
src += srclen - (srclen % POLY1305_BLOCK_SIZE);
srclen %= POLY1305_BLOCK_SIZE;
}
if (unlikely(srclen)) {
@@ -205,31 +213,18 @@ int crypto_poly1305_update(struct shash_desc *desc,
}
EXPORT_SYMBOL_GPL(crypto_poly1305_update);
int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
void poly1305_core_emit(const struct poly1305_state *state, void *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
__le32 *mac = (__le32 *)dst;
u32 h0, h1, h2, h3, h4;
u32 g0, g1, g2, g3, g4;
u32 mask;
u64 f = 0;
if (unlikely(!dctx->sset))
return -ENOKEY;
if (unlikely(dctx->buflen)) {
dctx->buf[dctx->buflen++] = 1;
memset(dctx->buf + dctx->buflen, 0,
POLY1305_BLOCK_SIZE - dctx->buflen);
poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 0);
}
/* fully carry h */
h0 = dctx->h[0];
h1 = dctx->h[1];
h2 = dctx->h[2];
h3 = dctx->h[3];
h4 = dctx->h[4];
h0 = state->h[0];
h1 = state->h[1];
h2 = state->h[2];
h3 = state->h[3];
h4 = state->h[4];
h2 += (h1 >> 26); h1 = h1 & 0x3ffffff;
h3 += (h2 >> 26); h2 = h2 & 0x3ffffff;
@@ -259,16 +254,40 @@ int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
h4 = (h4 & mask) | g4;
/* h = h % (2^128) */
h0 = (h0 >> 0) | (h1 << 26);
h1 = (h1 >> 6) | (h2 << 20);
h2 = (h2 >> 12) | (h3 << 14);
h3 = (h3 >> 18) | (h4 << 8);
put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0);
put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4);
put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8);
put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12);
}
EXPORT_SYMBOL_GPL(poly1305_core_emit);
int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
__le32 digest[4];
u64 f = 0;
if (unlikely(!dctx->sset))
return -ENOKEY;
if (unlikely(dctx->buflen)) {
dctx->buf[dctx->buflen++] = 1;
memset(dctx->buf + dctx->buflen, 0,
POLY1305_BLOCK_SIZE - dctx->buflen);
poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 0);
}
poly1305_core_emit(&dctx->h, digest);
/* mac = (h + s) % (2^128) */
f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f);
f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f);
f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f);
f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f);
f = (f >> 32) + le32_to_cpu(digest[0]) + dctx->s[0];
put_unaligned_le32(f, dst + 0);
f = (f >> 32) + le32_to_cpu(digest[1]) + dctx->s[1];
put_unaligned_le32(f, dst + 4);
f = (f >> 32) + le32_to_cpu(digest[2]) + dctx->s[2];
put_unaligned_le32(f, dst + 8);
f = (f >> 32) + le32_to_cpu(digest[3]) + dctx->s[3];
put_unaligned_le32(f, dst + 12);
return 0;
}

View File

@@ -1,307 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Speck: a lightweight block cipher
*
* Copyright (c) 2018 Google, Inc
*
* Speck has 10 variants, including 5 block sizes. For now we only implement
* the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
* Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
* and a key size of K bits. The Speck128 variants are believed to be the most
* secure variants, and they use the same block size and key sizes as AES. The
* Speck64 variants are less secure, but on 32-bit processors are usually
* faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
* secure and/or not as well suited for implementation on either 32-bit or
* 64-bit processors, so are omitted.
*
* Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
* https://eprint.iacr.org/2013/404.pdf
*
* In a correspondence, the Speck designers have also clarified that the words
* should be interpreted in little-endian format, and the words should be
* ordered such that the first word of each block is 'y' rather than 'x', and
* the first key word (rather than the last) becomes the first round key.
*/
#include <asm/unaligned.h>
#include <crypto/speck.h>
#include <linux/bitops.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
/* Speck128 */
static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
{
*x = ror64(*x, 8);
*x += *y;
*x ^= k;
*y = rol64(*y, 3);
*y ^= *x;
}
static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
{
*y ^= *x;
*y = ror64(*y, 3);
*x ^= k;
*x -= *y;
*x = rol64(*x, 8);
}
void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
u8 *out, const u8 *in)
{
u64 y = get_unaligned_le64(in);
u64 x = get_unaligned_le64(in + 8);
int i;
for (i = 0; i < ctx->nrounds; i++)
speck128_round(&x, &y, ctx->round_keys[i]);
put_unaligned_le64(y, out);
put_unaligned_le64(x, out + 8);
}
EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
}
void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
u8 *out, const u8 *in)
{
u64 y = get_unaligned_le64(in);
u64 x = get_unaligned_le64(in + 8);
int i;
for (i = ctx->nrounds - 1; i >= 0; i--)
speck128_unround(&x, &y, ctx->round_keys[i]);
put_unaligned_le64(y, out);
put_unaligned_le64(x, out + 8);
}
EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
}
int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
unsigned int keylen)
{
u64 l[3];
u64 k;
int i;
switch (keylen) {
case SPECK128_128_KEY_SIZE:
k = get_unaligned_le64(key);
l[0] = get_unaligned_le64(key + 8);
ctx->nrounds = SPECK128_128_NROUNDS;
for (i = 0; i < ctx->nrounds; i++) {
ctx->round_keys[i] = k;
speck128_round(&l[0], &k, i);
}
break;
case SPECK128_192_KEY_SIZE:
k = get_unaligned_le64(key);
l[0] = get_unaligned_le64(key + 8);
l[1] = get_unaligned_le64(key + 16);
ctx->nrounds = SPECK128_192_NROUNDS;
for (i = 0; i < ctx->nrounds; i++) {
ctx->round_keys[i] = k;
speck128_round(&l[i % 2], &k, i);
}
break;
case SPECK128_256_KEY_SIZE:
k = get_unaligned_le64(key);
l[0] = get_unaligned_le64(key + 8);
l[1] = get_unaligned_le64(key + 16);
l[2] = get_unaligned_le64(key + 24);
ctx->nrounds = SPECK128_256_NROUNDS;
for (i = 0; i < ctx->nrounds; i++) {
ctx->round_keys[i] = k;
speck128_round(&l[i % 3], &k, i);
}
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
}
/* Speck64 */
static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
{
*x = ror32(*x, 8);
*x += *y;
*x ^= k;
*y = rol32(*y, 3);
*y ^= *x;
}
static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
{
*y ^= *x;
*y = ror32(*y, 3);
*x ^= k;
*x -= *y;
*x = rol32(*x, 8);
}
void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
u8 *out, const u8 *in)
{
u32 y = get_unaligned_le32(in);
u32 x = get_unaligned_le32(in + 4);
int i;
for (i = 0; i < ctx->nrounds; i++)
speck64_round(&x, &y, ctx->round_keys[i]);
put_unaligned_le32(y, out);
put_unaligned_le32(x, out + 4);
}
EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
}
void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
u8 *out, const u8 *in)
{
u32 y = get_unaligned_le32(in);
u32 x = get_unaligned_le32(in + 4);
int i;
for (i = ctx->nrounds - 1; i >= 0; i--)
speck64_unround(&x, &y, ctx->round_keys[i]);
put_unaligned_le32(y, out);
put_unaligned_le32(x, out + 4);
}
EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
}
int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
unsigned int keylen)
{
u32 l[3];
u32 k;
int i;
switch (keylen) {
case SPECK64_96_KEY_SIZE:
k = get_unaligned_le32(key);
l[0] = get_unaligned_le32(key + 4);
l[1] = get_unaligned_le32(key + 8);
ctx->nrounds = SPECK64_96_NROUNDS;
for (i = 0; i < ctx->nrounds; i++) {
ctx->round_keys[i] = k;
speck64_round(&l[i % 2], &k, i);
}
break;
case SPECK64_128_KEY_SIZE:
k = get_unaligned_le32(key);
l[0] = get_unaligned_le32(key + 4);
l[1] = get_unaligned_le32(key + 8);
l[2] = get_unaligned_le32(key + 12);
ctx->nrounds = SPECK64_128_NROUNDS;
for (i = 0; i < ctx->nrounds; i++) {
ctx->round_keys[i] = k;
speck64_round(&l[i % 3], &k, i);
}
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
}
/* Algorithm definitions */
static struct crypto_alg speck_algs[] = {
{
.cra_name = "speck128",
.cra_driver_name = "speck128-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SPECK128_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct speck128_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = SPECK128_128_KEY_SIZE,
.cia_max_keysize = SPECK128_256_KEY_SIZE,
.cia_setkey = speck128_setkey,
.cia_encrypt = speck128_encrypt,
.cia_decrypt = speck128_decrypt
}
}
}, {
.cra_name = "speck64",
.cra_driver_name = "speck64-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SPECK64_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct speck64_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = SPECK64_96_KEY_SIZE,
.cia_max_keysize = SPECK64_128_KEY_SIZE,
.cia_setkey = speck64_setkey,
.cia_encrypt = speck64_encrypt,
.cia_decrypt = speck64_decrypt
}
}
}
};
static int __init speck_module_init(void)
{
return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
}
static void __exit speck_module_exit(void)
{
crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
}
module_init(speck_module_init);
module_exit(speck_module_exit);
MODULE_DESCRIPTION("Speck block cipher (generic)");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("speck128");
MODULE_ALIAS_CRYPTO("speck128-generic");
MODULE_ALIAS_CRYPTO("speck64");
MODULE_ALIAS_CRYPTO("speck64-generic");

View File

@@ -1610,6 +1610,16 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_32);
break;
case 219:
test_cipher_speed("adiantum(xchacha12,aes)", ENCRYPT, sec, NULL,
0, speed_template_32);
test_cipher_speed("adiantum(xchacha12,aes)", DECRYPT, sec, NULL,
0, speed_template_32);
test_cipher_speed("adiantum(xchacha20,aes)", ENCRYPT, sec, NULL,
0, speed_template_32);
test_cipher_speed("adiantum(xchacha20,aes)", DECRYPT, sec, NULL,
0, speed_template_32);
break;
case 300:
if (alg) {

View File

@@ -62,7 +62,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
*/
#define IDX1 32
#define IDX2 32400
#define IDX3 1
#define IDX3 1511
#define IDX4 8193
#define IDX5 22222
#define IDX6 17101
@@ -2172,6 +2172,36 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "__ghash-pclmulqdqni",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "adiantum(xchacha12,aes)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = adiantum_xchacha12_aes_enc_tv_template,
.count = ARRAY_SIZE(adiantum_xchacha12_aes_enc_tv_template),
},
.dec = {
.vecs = adiantum_xchacha12_aes_dec_tv_template,
.count = ARRAY_SIZE(adiantum_xchacha12_aes_dec_tv_template),
},
}
},
}, {
.alg = "adiantum(xchacha20,aes)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = adiantum_xchacha20_aes_enc_tv_template,
.count = ARRAY_SIZE(adiantum_xchacha20_aes_enc_tv_template),
},
.dec = {
.vecs = adiantum_xchacha20_aes_dec_tv_template,
.count = ARRAY_SIZE(adiantum_xchacha20_aes_dec_tv_template),
},
}
},
}, {
.alg = "ansi_cprng",
.test = alg_test_cprng,
@@ -3237,36 +3267,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "ecb(speck128)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = speck128_enc_tv_template,
.count = ARRAY_SIZE(speck128_enc_tv_template)
},
.dec = {
.vecs = speck128_dec_tv_template,
.count = ARRAY_SIZE(speck128_dec_tv_template)
}
}
}
}, {
.alg = "ecb(speck64)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = speck64_enc_tv_template,
.count = ARRAY_SIZE(speck64_enc_tv_template)
},
.dec = {
.vecs = speck64_dec_tv_template,
.count = ARRAY_SIZE(speck64_dec_tv_template)
}
}
}
}, {
.alg = "ecb(tea)",
.test = alg_test_skcipher,
@@ -3674,6 +3674,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.count = MICHAEL_MIC_TEST_VECTORS
}
}
}, {
.alg = "nhpoly1305",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = nhpoly1305_tv_template,
.count = ARRAY_SIZE(nhpoly1305_tv_template),
}
}
}, {
.alg = "ofb(aes)",
.test = alg_test_skcipher,
@@ -4026,6 +4035,36 @@ static const struct alg_test_desc alg_test_descs[] = {
.count = XCBC_AES_TEST_VECTORS
}
}
}, {
.alg = "xchacha12",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = xchacha12_tv_template,
.count = ARRAY_SIZE(xchacha12_tv_template),
},
.dec = {
.vecs = xchacha12_tv_template,
.count = ARRAY_SIZE(xchacha12_tv_template),
},
}
},
}, {
.alg = "xchacha20",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = xchacha20_tv_template,
.count = ARRAY_SIZE(xchacha20_tv_template),
},
.dec = {
.vecs = xchacha20_tv_template,
.count = ARRAY_SIZE(xchacha20_tv_template),
},
}
},
}, {
.alg = "xts(aes)",
.test = alg_test_skcipher,
@@ -4087,36 +4126,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "xts(speck128)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = speck128_xts_enc_tv_template,
.count = ARRAY_SIZE(speck128_xts_enc_tv_template)
},
.dec = {
.vecs = speck128_xts_dec_tv_template,
.count = ARRAY_SIZE(speck128_xts_dec_tv_template)
}
}
}
}, {
.alg = "xts(speck64)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = speck64_xts_enc_tv_template,
.count = ARRAY_SIZE(speck64_xts_enc_tv_template)
},
.dec = {
.vecs = speck64_xts_dec_tv_template,
.count = ARRAY_SIZE(speck64_xts_dec_tv_template)
}
}
}
}, {
.alg = "xts(twofish)",
.test = alg_test_skcipher,
@@ -4132,6 +4141,22 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "zstd",
.test = alg_test_comp,
.fips_allowed = 1,
.suite = {
.comp = {
.comp = {
.vecs = zstd_comp_tv_template,
.count = ZSTD_COMP_TEST_VECTORS
},
.decomp = {
.vecs = zstd_decomp_tv_template,
.count = ZSTD_DECOMP_TEST_VECTORS
}
}
}
}
};

File diff suppressed because it is too large Load Diff

209
crypto/zstd.c Normal file
View File

@@ -0,0 +1,209 @@
/*
* Cryptographic API.
*
* Copyright (c) 2017-present, Facebook, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/vmalloc.h>
#include <linux/zstd.h>
#define ZSTD_DEF_LEVEL 3
struct zstd_ctx {
ZSTD_CCtx *cctx;
ZSTD_DCtx *dctx;
void *cwksp;
void *dwksp;
};
static ZSTD_parameters zstd_params(void)
{
return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0);
}
static int zstd_comp_init(struct zstd_ctx *ctx)
{
int ret = 0;
const ZSTD_parameters params = zstd_params();
const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams);
ctx->cwksp = vzalloc(wksp_size);
if (!ctx->cwksp) {
ret = -ENOMEM;
goto out;
}
ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size);
if (!ctx->cctx) {
ret = -EINVAL;
goto out_free;
}
out:
return ret;
out_free:
vfree(ctx->cwksp);
goto out;
}
static int zstd_decomp_init(struct zstd_ctx *ctx)
{
int ret = 0;
const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
ctx->dwksp = vzalloc(wksp_size);
if (!ctx->dwksp) {
ret = -ENOMEM;
goto out;
}
ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size);
if (!ctx->dctx) {
ret = -EINVAL;
goto out_free;
}
out:
return ret;
out_free:
vfree(ctx->dwksp);
goto out;
}
static void zstd_comp_exit(struct zstd_ctx *ctx)
{
vfree(ctx->cwksp);
ctx->cwksp = NULL;
ctx->cctx = NULL;
}
static void zstd_decomp_exit(struct zstd_ctx *ctx)
{
vfree(ctx->dwksp);
ctx->dwksp = NULL;
ctx->dctx = NULL;
}
static int __zstd_init(void *ctx)
{
int ret;
ret = zstd_comp_init(ctx);
if (ret)
return ret;
ret = zstd_decomp_init(ctx);
if (ret)
zstd_comp_exit(ctx);
return ret;
}
static int zstd_init(struct crypto_tfm *tfm)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
return __zstd_init(ctx);
}
static void __zstd_exit(void *ctx)
{
zstd_comp_exit(ctx);
zstd_decomp_exit(ctx);
}
static void zstd_exit(struct crypto_tfm *tfm)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
__zstd_exit(ctx);
}
static int __zstd_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t out_len;
struct zstd_ctx *zctx = ctx;
const ZSTD_parameters params = zstd_params();
out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params);
if (ZSTD_isError(out_len))
return -EINVAL;
*dlen = out_len;
return 0;
}
static int zstd_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
return __zstd_compress(src, slen, dst, dlen, ctx);
}
static int __zstd_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t out_len;
struct zstd_ctx *zctx = ctx;
out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen);
if (ZSTD_isError(out_len))
return -EINVAL;
*dlen = out_len;
return 0;
}
static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
return __zstd_decompress(src, slen, dst, dlen, ctx);
}
static struct crypto_alg alg = {
.cra_name = "zstd",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct zstd_ctx),
.cra_module = THIS_MODULE,
.cra_init = zstd_init,
.cra_exit = zstd_exit,
.cra_u = { .compress = {
.coa_compress = zstd_compress,
.coa_decompress = zstd_decompress } }
};
static int __init zstd_mod_init(void)
{
int ret;
ret = crypto_register_alg(&alg);
if (ret)
return ret;
return ret;
}
static void __exit zstd_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(zstd_mod_init);
module_exit(zstd_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Zstd Compression Algorithm");
MODULE_ALIAS_CRYPTO("zstd");

View File

@@ -9,7 +9,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
depends on MMU
depends on MMU && !M68K
default n
---help---
Binder is used in Android for both communication between processes,
@@ -31,19 +31,6 @@ config ANDROID_BINDER_DEVICES
created. Each binder device has its own context manager, and is
therefore logically separated from the other devices.
config ANDROID_BINDER_IPC_32BIT
bool
depends on !64BIT && ANDROID_BINDER_IPC
default y
---help---
The Binder API has been changed to support both 32 and 64bit
applications in a mixed environment.
Enable this to support an old 32-bit Android user-space (v4.4 and
earlier).
Note that enabling this will break newer Android user-space.
config ANDROID_BINDER_IPC_SELFTEST
bool "Android Binder IPC Driver Selftest"
depends on ANDROID_BINDER_IPC

View File

@@ -71,10 +71,6 @@
#include <linux/security.h>
#include <linux/spinlock.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1
#endif
#include <uapi/linux/android/binder.h>
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -142,7 +138,7 @@ enum {
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, S_IRUGO);
@@ -161,7 +157,7 @@ static int binder_set_stop_on_user_error(const char *val,
return ret;
}
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
param_get_int, &binder_stop_on_user_error, 0644);
#define binder_debug(mask, x...) \
do { \
@@ -250,7 +246,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
unsigned int cur = atomic_inc_return(&log->cur);
if (cur >= ARRAY_SIZE(log->entry))
log->full = 1;
log->full = true;
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
WRITE_ONCE(e->debug_id_done, 0);
/*
@@ -2266,8 +2262,8 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
struct binder_object_header *hdr;
size_t object_size = 0;
if (offset > buffer->data_size - sizeof(*hdr) ||
buffer->data_size < sizeof(*hdr) ||
if (buffer->data_size < sizeof(*hdr) ||
offset > buffer->data_size - sizeof(*hdr) ||
!IS_ALIGNED(offset, sizeof(u32)))
return 0;
@@ -2407,7 +2403,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d buffer release %d, size %zd-%zd, failed at %p\n",
"%d buffer release %d, size %zd-%zd, failed at %pK\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, failed_at);
@@ -2856,7 +2852,7 @@ static bool binder_proc_transaction(struct binder_transaction *t,
if (node->has_async_transaction) {
pending_async = true;
} else {
node->has_async_transaction = 1;
node->has_async_transaction = true;
}
}
@@ -3055,6 +3051,14 @@ static void binder_transaction(struct binder_proc *proc,
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
if (target_node && target_proc == proc) {
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
}
if (!target_node) {
/*
@@ -3748,7 +3752,7 @@ static int binder_thread_write(struct binder_proc *proc,
w = binder_dequeue_work_head_ilocked(
&buf_node->async_todo);
if (!w) {
buf_node->has_async_transaction = 0;
buf_node->has_async_transaction = false;
} else {
binder_enqueue_work_ilocked(
w, &proc->todo);
@@ -3970,7 +3974,7 @@ static int binder_thread_write(struct binder_proc *proc,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
"%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
@@ -4178,6 +4182,7 @@ retry:
binder_inner_proc_unlock(proc);
if (put_user(e->cmd, (uint32_t __user *)ptr))
return -EFAULT;
cmd = e->cmd;
e->cmd = BR_OK;
ptr += sizeof(uint32_t);
@@ -5059,7 +5064,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
failure_string = "bad vm_flags";
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
vma->vm_flags &= ~VM_MAYWRITE;
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
@@ -5072,7 +5079,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}
@@ -5082,7 +5089,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
struct binder_proc *proc;
struct binder_device *binder_dev;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
@@ -5128,7 +5135,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
* anyway print all contexts that a given PID has, so this
* is not a problem.
*/
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
binder_debugfs_dir_entry_proc,
(void *)(unsigned long)proc->pid,
&binder_proc_fops);
@@ -5395,7 +5402,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
@@ -5420,7 +5427,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
}
if (buffer->target_node)
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n",
seq_printf(m, " size %zd:%zd data %pK\n",
buffer->data_size, buffer->offsets_size,
buffer->data);
}
@@ -5955,11 +5962,13 @@ static int __init init_binder_device(const char *name)
static int __init binder_init(void)
{
int ret;
char *device_name, *device_names;
char *device_name, *device_names, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
binder_alloc_shrinker_init();
ret = binder_alloc_shrinker_init();
if (ret)
return ret;
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
@@ -5971,27 +5980,27 @@ static int __init binder_init(void)
if (binder_debugfs_dir_entry_root) {
debugfs_create_file("state",
S_IRUGO,
0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_state_fops);
debugfs_create_file("stats",
S_IRUGO,
0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_stats_fops);
debugfs_create_file("transactions",
S_IRUGO,
0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_transactions_fops);
debugfs_create_file("transaction_log",
S_IRUGO,
0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
&binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
S_IRUGO,
0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
&binder_transaction_log_fops);
@@ -6008,7 +6017,8 @@ static int __init binder_init(void)
}
strcpy(device_names, binder_devices_param);
while ((device_name = strsep(&device_names, ","))) {
device_tmp = device_names;
while ((device_name = strsep(&device_tmp, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
@@ -6022,6 +6032,9 @@ err_init_binder_device_failed:
hlist_del(&device->hlist);
kfree(device);
}
kfree(device_names);
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);

View File

@@ -217,7 +217,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
down_read(&mm->mmap_sem);
vma = alloc->vma;
}
@@ -286,7 +286,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
up_write(&mm->mmap_sem);
up_read(&mm->mmap_sem);
mmput(mm);
}
return 0;
@@ -319,17 +319,18 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
up_write(&mm->mmap_sem);
up_read(&mm->mmap_sem);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -667,7 +668,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_already_mapped;
}
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
@@ -1004,8 +1005,14 @@ void binder_alloc_init(struct binder_alloc *alloc)
INIT_LIST_HEAD(&alloc->buffers);
}
void binder_alloc_shrinker_init(void)
int binder_alloc_shrinker_init(void)
{
list_lru_init(&binder_alloc_lru);
register_shrinker(&binder_shrinker);
int ret = list_lru_init(&binder_alloc_lru);
if (ret == 0) {
ret = register_shrinker(&binder_shrinker);
if (ret)
list_lru_destroy(&binder_alloc_lru);
}
return ret;
}

View File

@@ -129,7 +129,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
void binder_alloc_shrinker_init(void);
extern int binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,

View File

@@ -31,6 +31,9 @@ static const char * const backends[] = {
#endif
#if IS_ENABLED(CONFIG_CRYPTO_842)
"842",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
"zstd",
#endif
NULL
};

View File

@@ -50,6 +50,11 @@ static const char *default_compressor = "lzo";
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
/*
* Pages that compress to sizes equals or greater than this are stored
* uncompressed in memory.
*/
static size_t huge_class_size;
static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
@@ -1188,6 +1193,8 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
return false;
}
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
return true;
}
@@ -1373,7 +1380,7 @@ compress_again:
return ret;
}
if (comp_len > max_zpage_size)
if (unlikely(comp_len >= huge_class_size))
comp_len = PAGE_SIZE;
/*
* handle allocation has 2 paths:

View File

@@ -21,22 +21,6 @@
#include "zcomp.h"
/*-- Configurable parameters */
/*
* Pages that compress to size greater than this are stored
* uncompressed in memory.
*/
static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/*
* NOTE: max_zpage_size must be less than or equal to:
* ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
* always return failure.
*/
/*-- End of configurable params */
#define SECTOR_SHIFT 9
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)

View File

@@ -263,7 +263,7 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
#include <crypto/chacha20.h>
#include <crypto/chacha.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
@@ -438,11 +438,10 @@ static int crng_init = 0;
#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
__u8 out[CHACHA20_BLOCK_SIZE]);
#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng,
__u8 tmp[CHACHA20_BLOCK_SIZE], int used);
__u8 tmp[CHACHA_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static struct ratelimit_state unseeded_warning =
@@ -818,7 +817,7 @@ static int crng_fast_load(const char *cp, size_t len)
}
p = (unsigned char *) &primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--;
}
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
@@ -868,7 +867,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
__u8 block[CHACHA20_BLOCK_SIZE];
__u8 block[CHACHA_BLOCK_SIZE];
__u32 key[8];
} buf;
@@ -879,7 +878,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
} else {
_extract_crng(&primary_crng, buf.block);
_crng_backtrack_protect(&primary_crng, buf.block,
CHACHA20_KEY_SIZE);
CHACHA_KEY_SIZE);
}
spin_lock_irqsave(&crng->lock, flags);
for (i = 0; i < 8; i++) {
@@ -926,7 +925,7 @@ static inline void crng_wait_ready(void)
}
static void _extract_crng(struct crng_state *crng,
__u8 out[CHACHA20_BLOCK_SIZE])
__u8 out[CHACHA_BLOCK_SIZE])
{
unsigned long v, flags;
@@ -943,7 +942,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
{
struct crng_state *crng = NULL;
@@ -961,14 +960,14 @@ static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
__u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
unsigned long flags;
__u32 *s, *d;
int i;
used = round_up(used, sizeof(__u32));
if (used + CHACHA20_KEY_SIZE > CHACHA20_BLOCK_SIZE) {
if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
extract_crng(tmp);
used = 0;
}
@@ -980,7 +979,7 @@ static void _crng_backtrack_protect(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
struct crng_state *crng = NULL;
@@ -995,8 +994,8 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
__u8 tmp[CHACHA20_BLOCK_SIZE];
ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
__u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1010,7 +1009,7 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
}
extract_crng(tmp);
i = min_t(int, nbytes, CHACHA20_BLOCK_SIZE);
i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
if (copy_to_user(buf, tmp, i)) {
ret = -EFAULT;
break;
@@ -1564,7 +1563,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
*/
void get_random_bytes(void *buf, int nbytes)
{
__u8 tmp[CHACHA20_BLOCK_SIZE];
__u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
#if DEBUG_RANDOM_BOOT > 0
if (!crng_ready())
@@ -1573,10 +1572,10 @@ void get_random_bytes(void *buf, int nbytes)
#endif
trace_get_random_bytes(nbytes, _RET_IP_);
while (nbytes >= CHACHA20_BLOCK_SIZE) {
while (nbytes >= CHACHA_BLOCK_SIZE) {
extract_crng(buf);
buf += CHACHA20_BLOCK_SIZE;
nbytes -= CHACHA20_BLOCK_SIZE;
buf += CHACHA_BLOCK_SIZE;
nbytes -= CHACHA_BLOCK_SIZE;
}
if (nbytes > 0) {
@@ -1584,7 +1583,7 @@ void get_random_bytes(void *buf, int nbytes)
memcpy(buf, tmp, nbytes);
crng_backtrack_protect(tmp, nbytes);
} else
crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
memzero_explicit(tmp, sizeof(tmp));
}
EXPORT_SYMBOL(get_random_bytes);
@@ -2110,8 +2109,8 @@ struct ctl_table random_table[] = {
struct batched_entropy {
union {
u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
};
unsigned int position;
};

View File

@@ -431,11 +431,13 @@ static int concurrent_time_text_seq_show(struct seq_file *m, void *v,
hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
atomic64_t *times = get_times(uid_entry->concurrent_times);
seq_put_decimal_ull(m, "", (u64)uid_entry->uid);
seq_putc(m, ':');
for (i = 0; i < num_possible_cpus; ++i) {
u64 time = cputime_to_clock_t(atomic64_read(&times[i]));
seq_put_decimal_ull(m, " ", time);
}
seq_putc(m, '\n');
@@ -535,6 +537,7 @@ static ssize_t uid_cpupower_enable_write(struct file *file,
void cpufreq_task_times_init(struct task_struct *p)
{
unsigned long flags;
spin_lock_irqsave(&task_time_in_state_lock, flags);
p->time_in_state = NULL;
spin_unlock_irqrestore(&task_time_in_state_lock, flags);
@@ -722,7 +725,7 @@ void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime)
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
int cpu = 0;
if (!freqs || p->flags & PF_EXITING)
if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
return;
state = freqs->offset + READ_ONCE(freqs->last_index);

View File

@@ -4,6 +4,7 @@
config ARM_CPUIDLE
bool "Generic ARM/ARM64 CPU idle Driver"
select DT_IDLE_STATES
select CPU_IDLE_MULTIPLE_DRIVERS
help
Select this to enable generic cpuidle driver for ARM.
It provides a generic idle driver whose idle states are configured

View File

@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/topology.h>
#include <asm/cpuidle.h>
@@ -44,7 +45,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
}
static struct cpuidle_driver arm_idle_driver = {
static struct cpuidle_driver arm_idle_driver __initdata = {
.name = "arm_idle",
.owner = THIS_MODULE,
/*
@@ -80,30 +81,42 @@ static const struct of_device_id arm_idle_state_match[] __initconst = {
static int __init arm_idle_init(void)
{
int cpu, ret;
struct cpuidle_driver *drv = &arm_idle_driver;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
/*
* Initialize idle states data, starting at index 1.
* This driver is DT only, if no DT idle states are detected (ret == 0)
* let the driver initialization fail accordingly since there is no
* reason to initialize the idle driver if only wfi is supported.
*/
ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
if (ret <= 0)
return ret ? : -ENODEV;
ret = cpuidle_register_driver(drv);
if (ret) {
pr_err("Failed to register cpuidle driver\n");
return ret;
}
/*
* Call arch CPU operations in order to initialize
* idle states suspend back-end specific data
*/
for_each_possible_cpu(cpu) {
drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
if (!drv) {
ret = -ENOMEM;
goto out_fail;
}
drv->cpumask = (struct cpumask *)cpumask_of(cpu);
/*
* Initialize idle states data, starting at index 1. This
* driver is DT only, if no DT idle states are detected (ret
* == 0) let the driver initialization fail accordingly since
* there is no reason to initialize the idle driver if only
* wfi is supported.
*/
ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
if (ret <= 0) {
ret = ret ? : -ENODEV;
goto out_kfree_drv;
}
ret = cpuidle_register_driver(drv);
if (ret) {
pr_err("Failed to register cpuidle driver\n");
goto out_kfree_drv;
}
/*
* Call arch CPU operations in order to initialize
* idle states suspend back-end specific data
*/
ret = arm_cpuidle_init(cpu);
/*
@@ -115,14 +128,14 @@ static int __init arm_idle_init(void)
if (ret) {
pr_err("CPU %d failed to init idle CPU ops\n", cpu);
goto out_fail;
goto out_unregister_drv;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
pr_err("Failed to allocate cpuidle device\n");
ret = -ENOMEM;
goto out_fail;
goto out_unregister_drv;
}
dev->cpu = cpu;
@@ -130,21 +143,28 @@ static int __init arm_idle_init(void)
if (ret) {
pr_err("Failed to register cpuidle device for CPU %d\n",
cpu);
kfree(dev);
goto out_fail;
goto out_kfree_dev;
}
}
return 0;
out_kfree_dev:
kfree(dev);
out_unregister_drv:
cpuidle_unregister_driver(drv);
out_kfree_drv:
kfree(drv);
out_fail:
while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu);
drv = cpuidle_get_cpu_driver(dev);
cpuidle_unregister_device(dev);
cpuidle_unregister_driver(drv);
kfree(dev);
kfree(drv);
}
cpuidle_unregister_driver(drv);
return ret;
}
device_initcall(arm_idle_init);

View File

@@ -305,7 +305,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
poll_wait(file, &sync_file->wq, wait);
if (!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
if (list_empty(&sync_file->cb.node) &&
!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
if (fence_add_callback(sync_file->fence, &sync_file->cb,
fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);

View File

@@ -536,16 +536,27 @@ config DM_LOG_WRITES
If unsure, say N.
config DM_VERITY_AVB
tristate "Support AVB specific verity error behavior"
depends on DM_VERITY
---help---
Enables Android Verified Boot platform-specific error
behavior. In particular, it will modify the vbmeta partition
specified on the kernel command-line when non-transient error
occurs (followed by a panic).
If unsure, say N.
config DM_ANDROID_VERITY
bool "Android verity target support"
depends on BLK_DEV_DM=y
depends on DM_VERITY=y
depends on X509_CERTIFICATE_PARSER
depends on SYSTEM_TRUSTED_KEYRING
depends on PUBLIC_KEY_ALGO_RSA
depends on CRYPTO_RSA
depends on KEYS
depends on ASYMMETRIC_KEY_TYPE
depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
depends on MD_LINEAR=y
---help---
This device-mapper target is virtually a VERITY target. This
target is setup by reading the metadata contents piggybacked
@@ -553,4 +564,24 @@ config DM_ANDROID_VERITY
of the metadata contents are verified against the key included
in the system keyring. Upon success, the underlying verity
target is setup.
config DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED
bool "Verity will validate blocks at most once"
depends on DM_VERITY
---help---
Default enables at_most_once option for dm-verity
Verify data blocks only the first time they are read from the
data device, rather than every time. This reduces the overhead
of dm-verity so that it can be used on systems that are memory
and/or CPU constrained. However, it provides a reduced level
of security because only offline tampering of the data device's
content will be detected, not online tampering.
Hash blocks are still verified each time they are read from the
hash device, since verification of hash blocks is less performance
critical than data blocks, and a hash block will not be verified
any more after all the data blocks it covers have been verified anyway.
If unsure, say N.
endif # MD

View File

@@ -70,3 +70,7 @@ endif
ifeq ($(CONFIG_DM_VERITY_FEC),y)
dm-verity-objs += dm-verity-fec.o
endif
ifeq ($(CONFIG_DM_VERITY_AVB),y)
dm-verity-objs += dm-verity-avb.o
endif

View File

@@ -33,6 +33,7 @@
#include <asm/setup.h>
#include <crypto/hash.h>
#include <crypto/hash_info.h>
#include <crypto/public_key.h>
#include <crypto/sha.h>
#include <keys/asymmetric-type.h>
@@ -122,75 +123,6 @@ static inline bool is_unlocked(void)
return !strncmp(verifiedbootstate, unlocked, sizeof(unlocked));
}
static int table_extract_mpi_array(struct public_key_signature *pks,
const void *data, size_t len)
{
MPI mpi = mpi_read_raw_data(data, len);
if (!mpi) {
DMERR("Error while allocating mpi array");
return -ENOMEM;
}
pks->mpi[0] = mpi;
pks->nr_mpi = 1;
return 0;
}
static struct public_key_signature *table_make_digest(
enum hash_algo hash,
const void *table,
unsigned long table_len)
{
struct public_key_signature *pks = NULL;
struct crypto_shash *tfm;
struct shash_desc *desc;
size_t digest_size, desc_size;
int ret;
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
digest_size = crypto_shash_digestsize(tfm);
/* We allocate the hash operational data storage on the end of out
* context data and the digest output buffer on the end of that.
*/
ret = -ENOMEM;
pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL);
if (!pks)
goto error;
pks->pkey_hash_algo = hash;
pks->digest = (u8 *)pks + sizeof(*pks) + desc_size;
pks->digest_size = digest_size;
desc = (struct shash_desc *)(pks + 1);
desc->tfm = tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_init(desc);
if (ret < 0)
goto error;
ret = crypto_shash_finup(desc, table, table_len, pks->digest);
if (ret < 0)
goto error;
crypto_free_shash(tfm);
return pks;
error:
kfree(pks);
crypto_free_shash(tfm);
return ERR_PTR(ret);
}
static int read_block_dev(struct bio_read *payload, struct block_device *bdev,
sector_t offset, int length)
{
@@ -207,6 +139,7 @@ static int read_block_dev(struct bio_read *payload, struct block_device *bdev,
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = offset;
bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
payload->page_io = kzalloc(sizeof(struct page *) *
payload->number_of_pages, GFP_KERNEL);
@@ -230,7 +163,7 @@ static int read_block_dev(struct bio_read *payload, struct block_device *bdev,
}
}
if (!submit_bio_wait(READ, bio))
if (!submit_bio_wait(bio))
/* success */
goto free_bio;
DMERR("bio read failed");
@@ -567,51 +500,6 @@ static int verity_mode(void)
return DM_VERITY_MODE_EIO;
}
static int verify_verity_signature(char *key_id,
struct android_metadata *metadata)
{
key_ref_t key_ref;
struct key *key;
struct public_key_signature *pks = NULL;
int retval = -EINVAL;
key_ref = keyring_search(make_key_ref(system_trusted_keyring, 1),
&key_type_asymmetric, key_id);
if (IS_ERR(key_ref)) {
DMERR("keyring: key not found");
return -ENOKEY;
}
key = key_ref_to_ptr(key_ref);
pks = table_make_digest(HASH_ALGO_SHA256,
(const void *)metadata->verity_table,
le32_to_cpu(metadata->header->table_length));
if (IS_ERR(pks)) {
DMERR("hashing failed");
retval = PTR_ERR(pks);
pks = NULL;
goto error;
}
retval = table_extract_mpi_array(pks, &metadata->header->signature[0],
RSANUMBYTES);
if (retval < 0) {
DMERR("Error extracting mpi %d", retval);
goto error;
}
retval = verify_signature(key, pks);
mpi_free(pks->rsa.s);
error:
kfree(pks);
key_put(key);
return retval;
}
static void handle_error(void)
{
int mode = verity_mode();
@@ -623,6 +511,95 @@ static void handle_error(void)
}
}
static struct public_key_signature *table_make_digest(
enum hash_algo hash,
const void *table,
unsigned long table_len)
{
struct public_key_signature *pks = NULL;
struct crypto_shash *tfm;
struct shash_desc *desc;
size_t digest_size, desc_size;
int ret;
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
digest_size = crypto_shash_digestsize(tfm);
/* We allocate the hash operational data storage on the end of out
* context data and the digest output buffer on the end of that.
*/
ret = -ENOMEM;
pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL);
if (!pks)
goto error;
pks->pkey_algo = "rsa";
pks->hash_algo = hash_algo_name[hash];
pks->digest = (u8 *)pks + sizeof(*pks) + desc_size;
pks->digest_size = digest_size;
desc = (struct shash_desc *)(pks + 1);
desc->tfm = tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_init(desc);
if (ret < 0)
goto error;
ret = crypto_shash_finup(desc, table, table_len, pks->digest);
if (ret < 0)
goto error;
crypto_free_shash(tfm);
return pks;
error:
kfree(pks);
crypto_free_shash(tfm);
return ERR_PTR(ret);
}
static int verify_verity_signature(char *key_id,
struct android_metadata *metadata)
{
struct public_key_signature *pks = NULL;
int retval = -EINVAL;
if (!key_id)
goto error;
pks = table_make_digest(HASH_ALGO_SHA256,
(const void *)metadata->verity_table,
le32_to_cpu(metadata->header->table_length));
if (IS_ERR(pks)) {
DMERR("hashing failed");
retval = PTR_ERR(pks);
pks = NULL;
goto error;
}
pks->s = kmemdup(&metadata->header->signature[0], RSANUMBYTES, GFP_KERNEL);
if (!pks->s) {
DMERR("Error allocating memory for signature");
goto error;
}
pks->s_size = RSANUMBYTES;
retval = verify_signature_one(pks, NULL, key_id);
kfree(pks->s);
error:
kfree(pks);
return retval;
}
static inline bool test_mult_overflow(sector_t a, u32 b)
{
sector_t r = (sector_t)~0ULL;
@@ -694,8 +671,8 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
dev_t uninitialized_var(dev);
struct android_metadata *metadata = NULL;
int err = 0, i, mode;
char *key_id, *table_ptr, dummy, *target_device,
*verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS];
char *key_id = NULL, *table_ptr, dummy, *target_device;
char *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS];
/* One for specifying number of opt args and one for mode */
sector_t data_sectors;
u32 data_block_size;
@@ -714,16 +691,16 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
handle_error();
return -EINVAL;
}
} else if (argc == 2)
key_id = argv[1];
else {
target_device = argv[0];
} else if (argc == 2) {
key_id = argv[0];
target_device = argv[1];
} else {
DMERR("Incorrect number of arguments");
handle_error();
return -EINVAL;
}
target_device = argv[0];
dev = name_to_dev_t(target_device);
if (!dev) {
DMERR("no dev found for %s", target_device);
@@ -877,12 +854,11 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
err = verity_ctr(ti, no_of_args, verity_table_args);
if (err)
DMERR("android-verity failed to mount as verity target");
else {
if (err) {
DMERR("android-verity failed to create a verity target");
} else {
target_added = true;
DMINFO("android-verity mounted as verity target");
DMINFO("android-verity created as verity target");
}
free_metadata:

View File

@@ -115,6 +115,10 @@ struct iv_tcw_private {
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
enum cipher_flags {
CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
};
/*
* The fields in here must be read only after initialization.
*/
@@ -150,11 +154,14 @@ struct crypt_config {
} iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
unsigned short int sector_size;
unsigned char sector_shift;
/* ESSIV: struct crypto_cipher *essiv_tfm */
void *iv_private;
struct crypto_skcipher **tfms;
unsigned tfms_count;
unsigned long cipher_flags;
/*
* Layout of each crypto request:
@@ -484,6 +491,11 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
{
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
if (cc->sector_size != (1 << SECTOR_SHIFT)) {
ti->error = "Unsupported sector size for LMK";
return -EINVAL;
}
lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(lmk->hash_tfm)) {
ti->error = "Error initializing LMK hash";
@@ -633,6 +645,11 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
if (cc->sector_size != (1 << SECTOR_SHIFT)) {
ti->error = "Unsupported sector size for TCW";
return -EINVAL;
}
if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
ti->error = "Wrong key size for TCW";
return -EINVAL;
@@ -846,21 +863,27 @@ static int crypt_convert_block(struct crypt_config *cc,
u8 *iv;
int r;
/* Reject unexpected unaligned bio. */
if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
return -EIO;
dmreq = dmreq_of_req(cc, req);
iv = iv_of_dmreq(cc, dmreq);
dmreq->iv_sector = ctx->cc_sector;
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
dmreq->iv_sector >>= cc->sector_shift;
dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
sg_set_page(&dmreq->sg_in, bv_in.bv_page, cc->sector_size,
bv_in.bv_offset);
sg_init_table(&dmreq->sg_out, 1);
sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
sg_set_page(&dmreq->sg_out, bv_out.bv_page, cc->sector_size,
bv_out.bv_offset);
bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
if (cc->iv_gen_ops) {
r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -869,7 +892,7 @@ static int crypt_convert_block(struct crypt_config *cc,
}
skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
1 << SECTOR_SHIFT, iv);
cc->sector_size, iv);
if (bio_data_dir(ctx->bio_in) == WRITE)
r = crypto_skcipher_encrypt(req);
@@ -919,6 +942,7 @@ static void crypt_free_req(struct crypt_config *cc,
static int crypt_convert(struct crypt_config *cc,
struct convert_context *ctx)
{
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r;
atomic_set(&ctx->cc_pending, 1);
@@ -926,7 +950,6 @@ static int crypt_convert(struct crypt_config *cc,
while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
crypt_alloc_req(cc, ctx);
atomic_inc(&ctx->cc_pending);
r = crypt_convert_block(cc, ctx, ctx->req);
@@ -946,14 +969,14 @@ static int crypt_convert(struct crypt_config *cc,
*/
case -EINPROGRESS:
ctx->req = NULL;
ctx->cc_sector++;
ctx->cc_sector += sector_step;
continue;
/*
* The request was already processed (synchronously).
*/
case 0:
atomic_dec(&ctx->cc_pending);
ctx->cc_sector++;
ctx->cc_sector += sector_step;
cond_resched();
continue;
@@ -1468,6 +1491,13 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
}
}
/*
* dm-crypt performance can vary greatly depending on which crypto
* algorithm implementation is used. Help people debug performance
* problems by logging the ->cra_driver_name.
*/
DMINFO("%s using implementation \"%s\"", ciphermode,
crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
return 0;
}
@@ -1743,7 +1773,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
char dummy;
static struct dm_arg _args[] = {
{0, 3, "Invalid number of feature args"},
{0, 5, "Invalid number of feature args"},
};
if (argc < 5) {
@@ -1759,6 +1789,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -ENOMEM;
}
cc->key_size = key_size;
cc->sector_size = (1 << SECTOR_SHIFT);
cc->sector_shift = 0;
ti->private = cc;
ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
@@ -1810,7 +1842,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mutex_init(&cc->bio_alloc_lock);
ret = -EINVAL;
if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
(tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
ti->error = "Invalid iv_offset sector";
goto bad;
}
@@ -1858,6 +1891,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
else if (sscanf(opt_string, "sector_size:%hu%c",
&cc->sector_size, &dummy) == 1) {
if (cc->sector_size < (1 << SECTOR_SHIFT) ||
cc->sector_size > 4096 ||
(cc->sector_size & (cc->sector_size - 1))) {
ti->error = "Invalid feature value for sector_size";
goto bad;
}
if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
ti->error = "Device size is not multiple of sector_size feature";
goto bad;
}
cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
} else if (!strcasecmp(opt_string, "iv_large_sectors"))
set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
else {
ti->error = "Invalid feature arguments";
goto bad;
@@ -1938,6 +1986,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
bio_data_dir(bio) == WRITE)
dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
/*
* Ensure that bio is a multiple of internal sector encryption size
* and is aligned to this size as defined in IO hints.
*/
if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
return -EIO;
if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
return -EIO;
io = dm_per_bio_data(bio, cc->per_bio_data_size);
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
io->ctx.req = (struct skcipher_request *)(io + 1);
@@ -1978,6 +2036,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
num_feature_args += !!ti->num_discard_bios;
num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
if (num_feature_args) {
DMEMIT(" %d", num_feature_args);
if (ti->num_discard_bios)
@@ -1986,6 +2046,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" same_cpu_crypt");
if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
DMEMIT(" submit_from_crypt_cpus");
if (cc->sector_size != (1 << SECTOR_SHIFT))
DMEMIT(" sector_size:%d", cc->sector_size);
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
DMEMIT(" iv_large_sectors");
}
break;
@@ -2068,6 +2132,8 @@ static int crypt_iterate_devices(struct dm_target *ti,
static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct crypt_config *cc = ti->private;
/*
* Unfortunate constraint that is required to avoid the potential
* for exceeding underlying device's max_segments limits -- due to
@@ -2075,11 +2141,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
* bio that are not as physically contiguous as the original bio.
*/
limits->max_segment_size = PAGE_SIZE;
limits->logical_block_size =
max_t(unsigned short, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
max_t(unsigned, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
}
static struct target_type crypt_target = {
.name = "crypt",
.version = {1, 14, 1},
.version = {1, 17, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,

229
drivers/md/dm-verity-avb.c Normal file
View File

@@ -0,0 +1,229 @@
/*
* Copyright (C) 2017 Google.
*
* This file is released under the GPLv2.
*
* Based on drivers/md/dm-verity-chromeos.c
*/
#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/mount.h>
#define DM_MSG_PREFIX "verity-avb"
/* Set via module parameters. */
static char avb_vbmeta_device[64];
static char avb_invalidate_on_error[4];
static void invalidate_vbmeta_endio(struct bio *bio)
{
if (bio->bi_error)
DMERR("invalidate_vbmeta_endio: error %d", bio->bi_error);
complete(bio->bi_private);
}
static int invalidate_vbmeta_submit(struct bio *bio,
struct block_device *bdev,
int op, int access_last_sector,
struct page *page)
{
DECLARE_COMPLETION_ONSTACK(wait);
bio->bi_private = &wait;
bio->bi_end_io = invalidate_vbmeta_endio;
bio->bi_bdev = bdev;
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_NOIDLE);
bio->bi_iter.bi_sector = 0;
if (access_last_sector) {
sector_t last_sector;
last_sector = (i_size_read(bdev->bd_inode)>>SECTOR_SHIFT) - 1;
bio->bi_iter.bi_sector = last_sector;
}
if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
DMERR("invalidate_vbmeta_submit: bio_add_page error");
return -EIO;
}
submit_bio(bio);
/* Wait up to 2 seconds for completion or fail. */
if (!wait_for_completion_timeout(&wait, msecs_to_jiffies(2000)))
return -EIO;
return 0;
}
static int invalidate_vbmeta(dev_t vbmeta_devt)
{
int ret = 0;
struct block_device *bdev;
struct bio *bio;
struct page *page;
fmode_t dev_mode;
/* Ensure we do synchronous unblocked I/O. We may also need
* sync_bdev() on completion, but it really shouldn't.
*/
int access_last_sector = 0;
DMINFO("invalidate_vbmeta: acting on device %d:%d",
MAJOR(vbmeta_devt), MINOR(vbmeta_devt));
/* First we open the device for reading. */
dev_mode = FMODE_READ | FMODE_EXCL;
bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode,
invalidate_vbmeta);
if (IS_ERR(bdev)) {
DMERR("invalidate_kernel: could not open device for reading");
dev_mode = 0;
ret = -ENOENT;
goto failed_to_read;
}
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
ret = -ENOMEM;
goto failed_bio_alloc;
}
page = alloc_page(GFP_NOIO);
if (!page) {
ret = -ENOMEM;
goto failed_to_alloc_page;
}
access_last_sector = 0;
ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_READ,
access_last_sector, page);
if (ret) {
DMERR("invalidate_vbmeta: error reading");
goto failed_to_submit_read;
}
/* We have a page. Let's make sure it looks right. */
if (memcmp("AVB0", page_address(page), 4) == 0) {
/* Stamp it. */
memcpy(page_address(page), "AVE0", 4);
DMINFO("invalidate_vbmeta: found vbmeta partition");
} else {
/* Could be this is on a AVB footer, check. Also, since the
* AVB footer is in the last 64 bytes, adjust for the fact that
* we're dealing with 512-byte sectors.
*/
size_t offset = (1<<SECTOR_SHIFT) - 64;
access_last_sector = 1;
ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_READ,
access_last_sector, page);
if (ret) {
DMERR("invalidate_vbmeta: error reading");
goto failed_to_submit_read;
}
if (memcmp("AVBf", page_address(page) + offset, 4) != 0) {
DMERR("invalidate_vbmeta on non-vbmeta partition");
ret = -EINVAL;
goto invalid_header;
}
/* Stamp it. */
memcpy(page_address(page) + offset, "AVE0", 4);
DMINFO("invalidate_vbmeta: found vbmeta footer partition");
}
/* Now rewrite the changed page - the block dev was being
* changed on read. Let's reopen here.
*/
blkdev_put(bdev, dev_mode);
dev_mode = FMODE_WRITE | FMODE_EXCL;
bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode,
invalidate_vbmeta);
if (IS_ERR(bdev)) {
DMERR("invalidate_vbmeta: could not open device for writing");
dev_mode = 0;
ret = -ENOENT;
goto failed_to_write;
}
/* We re-use the same bio to do the write after the read. Need to reset
* it to initialize bio->bi_remaining.
*/
bio_reset(bio);
ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_WRITE,
access_last_sector, page);
if (ret) {
DMERR("invalidate_vbmeta: error writing");
goto failed_to_submit_write;
}
DMERR("invalidate_vbmeta: completed.");
ret = 0;
failed_to_submit_write:
failed_to_write:
invalid_header:
__free_page(page);
failed_to_submit_read:
/* Technically, we'll leak a page with the pending bio, but
* we're about to reboot anyway.
*/
failed_to_alloc_page:
bio_put(bio);
failed_bio_alloc:
if (dev_mode)
blkdev_put(bdev, dev_mode);
failed_to_read:
return ret;
}
void dm_verity_avb_error_handler(void)
{
dev_t dev;
DMINFO("AVB error handler called for %s", avb_vbmeta_device);
if (strcmp(avb_invalidate_on_error, "yes") != 0) {
DMINFO("Not configured to invalidate");
return;
}
if (avb_vbmeta_device[0] == '\0') {
DMERR("avb_vbmeta_device parameter not set");
goto fail_no_dev;
}
dev = name_to_dev_t(avb_vbmeta_device);
if (!dev) {
DMERR("No matching partition for device: %s",
avb_vbmeta_device);
goto fail_no_dev;
}
invalidate_vbmeta(dev);
fail_no_dev:
;
}
static int __init dm_verity_avb_init(void)
{
DMINFO("AVB error handler initialized with vbmeta device: %s",
avb_vbmeta_device);
return 0;
}
static void __exit dm_verity_avb_exit(void)
{
}
module_init(dm_verity_avb_init);
module_exit(dm_verity_avb_exit);
MODULE_AUTHOR("David Zeuthen <zeuthen@google.com>");
MODULE_DESCRIPTION("AVB-specific error handler for dm-verity");
MODULE_LICENSE("GPL");
/* Declare parameter with no module prefix */
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "androidboot.vbmeta."
module_param_string(device, avb_vbmeta_device, sizeof(avb_vbmeta_device), 0);
module_param_string(invalidate_on_error, avb_invalidate_on_error,
sizeof(avb_invalidate_on_error), 0);

View File

@@ -235,8 +235,12 @@ out:
if (v->mode == DM_VERITY_MODE_LOGGING)
return 0;
if (v->mode == DM_VERITY_MODE_RESTART)
if (v->mode == DM_VERITY_MODE_RESTART) {
#ifdef CONFIG_DM_VERITY_AVB
dm_verity_avb_error_handler();
#endif
kernel_restart("dm-verity device corrupted");
}
return 1;
}
@@ -991,6 +995,15 @@ int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
v->tfm = NULL;
goto bad;
}
/*
* dm-verity performance can vary greatly depending on which hash
* algorithm implementation is used. Help people debug performance
* problems by logging the ->cra_driver_name.
*/
DMINFO("%s using implementation \"%s\"", v->alg_name,
crypto_shash_alg(v->tfm)->base.cra_driver_name);
v->digest_size = crypto_shash_digestsize(v->tfm);
if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
ti->error = "Digest size too big";
@@ -1042,6 +1055,14 @@ int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
#ifdef CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED
if (!v->validated_blocks) {
r = verity_alloc_most_once(v);
if (r)
goto bad;
}
#endif
v->hash_per_block_bits =
__fls((1 << v->hash_dev_block_bits) / v->digest_size);

View File

@@ -137,4 +137,5 @@ extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits);
extern void verity_dtr(struct dm_target *ti);
extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
extern int verity_map(struct dm_target *ti, struct bio *bio);
extern void dm_verity_avb_error_handler(void);
#endif /* DM_VERITY_H */

View File

@@ -23,6 +23,7 @@ void lkdtm_ATOMIC_UNDERFLOW(void);
void lkdtm_ATOMIC_OVERFLOW(void);
void lkdtm_CORRUPT_LIST_ADD(void);
void lkdtm_CORRUPT_LIST_DEL(void);
void lkdtm_CORRUPT_USER_DS(void);
/* lkdtm_heap.c */
void lkdtm_OVERWRITE_ALLOCATION(void);

View File

@@ -7,6 +7,7 @@
#include "lkdtm.h"
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
struct lkdtm_list {
struct list_head node;
@@ -220,3 +221,12 @@ void lkdtm_CORRUPT_LIST_DEL(void)
else
pr_err("list_del() corruption not detected!\n");
}
void lkdtm_CORRUPT_USER_DS(void)
{
pr_info("setting bad task size limit\n");
set_fs(KERNEL_DS);
/* Make sure we do not keep running with a KERNEL_DS! */
force_sig(SIGKILL, current);
}

View File

@@ -199,6 +199,7 @@ struct crashtype crashtypes[] = {
CRASHTYPE(OVERFLOW),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(CORRUPT_USER_DS),
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION),

View File

@@ -130,7 +130,7 @@ static void get_full_task_comm(struct task_entry *task_entry,
struct mm_struct *mm = task->mm;
/* fill the first TASK_COMM_LEN bytes with thread name */
get_task_comm(task_entry->comm, task);
__get_task_comm(task_entry->comm, TASK_COMM_LEN, task);
i = strlen(task_entry->comm);
while (i < TASK_COMM_LEN)
task_entry->comm[i++] = ' ';

View File

@@ -100,6 +100,13 @@ config USB_NET_RNDIS_WLAN
If you choose to build a module, it'll be called rndis_wlan.
config VIRT_WIFI
tristate "Wifi wrapper for ethernet drivers"
depends on CFG80211
---help---
This option adds support for ethernet connections to appear as if they
are wifi connections through a special rtnetlink device.
config WCNSS_MEM_PRE_ALLOC
tristate "WCNSS pre-alloc memory support"
---help---

View File

@@ -26,6 +26,7 @@ obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
obj-$(CONFIG_VIRT_WIFI) += virt_wifi.o
obj-$(CONFIG_CNSS2) += cnss2/
obj-$(CONFIG_CNSS) += cnss/
obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/

View File

@@ -2776,7 +2776,6 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
struct brcmf_bss_info_le *bi)
{
struct wiphy *wiphy = cfg_to_wiphy(cfg);
struct ieee80211_channel *notify_channel;
struct cfg80211_bss *bss;
struct ieee80211_supported_band *band;
struct brcmu_chan ch;
@@ -2786,7 +2785,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
u16 notify_interval;
u8 *notify_ie;
size_t notify_ielen;
s32 notify_signal;
struct cfg80211_inform_bss bss_data = {};
if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
brcmf_err("Bss info is larger than buffer. Discarding\n");
@@ -2806,27 +2805,28 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
band = wiphy->bands[NL80211_BAND_5GHZ];
freq = ieee80211_channel_to_frequency(channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
bss_data.chan = ieee80211_get_channel(wiphy, freq);
bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20;
bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime());
notify_capability = le16_to_cpu(bi->capability);
notify_interval = le16_to_cpu(bi->beacon_period);
notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
notify_ielen = le32_to_cpu(bi->ie_length);
notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
bss_data.signal = (s16)le16_to_cpu(bi->RSSI) * 100;
brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID);
brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq);
brcmf_dbg(CONN, "Capability: %X\n", notify_capability);
brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval);
brcmf_dbg(CONN, "Signal: %d\n", notify_signal);
brcmf_dbg(CONN, "Signal: %d\n", bss_data.signal);
bss = cfg80211_inform_bss(wiphy, notify_channel,
CFG80211_BSS_FTYPE_UNKNOWN,
(const u8 *)bi->BSSID,
0, notify_capability,
notify_interval, notify_ie,
notify_ielen, notify_signal,
GFP_KERNEL);
bss = cfg80211_inform_bss_data(wiphy, &bss_data,
CFG80211_BSS_FTYPE_UNKNOWN,
(const u8 *)bi->BSSID,
0, notify_capability,
notify_interval, notify_ie,
notify_ielen, GFP_KERNEL);
if (!bss)
return -ENOMEM;

View File

@@ -0,0 +1,631 @@
// SPDX-License-Identifier: GPL-2.0
/* drivers/net/wireless/virt_wifi.c
*
* A fake implementation of cfg80211_ops that can be tacked on to an ethernet
* net_device to make it appear as a wireless connection.
*
* Copyright (C) 2018 Google, Inc.
*
* Author: schuffelen@google.com
*/
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
static struct wiphy *common_wiphy;
struct virt_wifi_wiphy_priv {
struct delayed_work scan_result;
struct cfg80211_scan_request *scan_request;
bool being_deleted;
};
static struct ieee80211_channel channel_2ghz = {
.band = NL80211_BAND_2GHZ,
.center_freq = 2432,
.hw_value = 2432,
.max_power = 20,
};
static struct ieee80211_rate bitrates_2ghz[] = {
{ .bitrate = 10 },
{ .bitrate = 20 },
{ .bitrate = 55 },
{ .bitrate = 110 },
{ .bitrate = 60 },
{ .bitrate = 120 },
{ .bitrate = 240 },
};
static struct ieee80211_supported_band band_2ghz = {
.channels = &channel_2ghz,
.bitrates = bitrates_2ghz,
.band = NL80211_BAND_2GHZ,
.n_channels = 1,
.n_bitrates = ARRAY_SIZE(bitrates_2ghz),
.ht_cap = {
.ht_supported = true,
.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40,
.ampdu_factor = 0x3,
.ampdu_density = 0x6,
.mcs = {
.rx_mask = {0xff, 0xff},
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
},
};
static struct ieee80211_channel channel_5ghz = {
.band = NL80211_BAND_5GHZ,
.center_freq = 5240,
.hw_value = 5240,
.max_power = 20,
};
static struct ieee80211_rate bitrates_5ghz[] = {
{ .bitrate = 60 },
{ .bitrate = 120 },
{ .bitrate = 240 },
};
#define RX_MCS_MAP (IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 14)
#define TX_MCS_MAP (IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | \
IEEE80211_VHT_MCS_SUPPORT_0_9 << 14)
static struct ieee80211_supported_band band_5ghz = {
.channels = &channel_5ghz,
.bitrates = bitrates_5ghz,
.band = NL80211_BAND_5GHZ,
.n_channels = 1,
.n_bitrates = ARRAY_SIZE(bitrates_5ghz),
.ht_cap = {
.ht_supported = true,
.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40,
.ampdu_factor = 0x3,
.ampdu_density = 0x6,
.mcs = {
.rx_mask = {0xff, 0xff},
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
},
.vht_cap = {
.vht_supported = true,
.cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_RXSTBC_2 |
IEEE80211_VHT_CAP_RXSTBC_3 |
IEEE80211_VHT_CAP_RXSTBC_4 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
.vht_mcs = {
.rx_mcs_map = cpu_to_le16(RX_MCS_MAP),
.tx_mcs_map = cpu_to_le16(TX_MCS_MAP),
}
},
};
/* Assigned at module init. Guaranteed locally-administered and unicast. */
static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
/* Called with the rtnl lock held. */
static int virt_wifi_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
struct virt_wifi_wiphy_priv *priv = wiphy_priv(wiphy);
wiphy_debug(wiphy, "scan\n");
if (priv->scan_request || priv->being_deleted)
return -EBUSY;
priv->scan_request = request;
schedule_delayed_work(&priv->scan_result, HZ * 2);
return 0;
}
/* Acquires and releases the rdev BSS lock. */
static void virt_wifi_scan_result(struct work_struct *work)
{
struct {
u8 tag;
u8 len;
u8 ssid[8];
} __packed ssid = {
.tag = WLAN_EID_SSID, .len = 8, .ssid = "VirtWifi",
};
struct cfg80211_bss *informed_bss;
struct virt_wifi_wiphy_priv *priv =
container_of(work, struct virt_wifi_wiphy_priv,
scan_result.work);
struct wiphy *wiphy = priv_to_wiphy(priv);
struct cfg80211_scan_info scan_info = { .aborted = false };
informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
CFG80211_BSS_FTYPE_PRESP,
fake_router_bssid,
ktime_get_boot_ns(),
WLAN_CAPABILITY_ESS, 0,
(void *)&ssid, sizeof(ssid),
DBM_TO_MBM(-50), GFP_KERNEL);
cfg80211_put_bss(wiphy, informed_bss);
/* Schedules work which acquires and releases the rtnl lock. */
cfg80211_scan_done(priv->scan_request, &scan_info);
priv->scan_request = NULL;
}
/* May acquire and release the rdev BSS lock. */
static void virt_wifi_cancel_scan(struct wiphy *wiphy)
{
struct virt_wifi_wiphy_priv *priv = wiphy_priv(wiphy);
cancel_delayed_work_sync(&priv->scan_result);
/* Clean up dangling callbacks if necessary. */
if (priv->scan_request) {
struct cfg80211_scan_info scan_info = { .aborted = true };
/* Schedules work which acquires and releases the rtnl lock. */
cfg80211_scan_done(priv->scan_request, &scan_info);
priv->scan_request = NULL;
}
}
struct virt_wifi_netdev_priv {
struct delayed_work connect;
struct net_device *lowerdev;
struct net_device *upperdev;
u32 tx_packets;
u32 tx_failed;
u8 connect_requested_bss[ETH_ALEN];
bool is_up;
bool is_connected;
bool being_deleted;
};
/* Called with the rtnl lock held. */
static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_connect_params *sme)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(netdev);
bool could_schedule;
if (priv->being_deleted || !priv->is_up)
return -EBUSY;
could_schedule = schedule_delayed_work(&priv->connect, HZ * 2);
if (!could_schedule)
return -EBUSY;
if (sme->bssid)
ether_addr_copy(priv->connect_requested_bss, sme->bssid);
else
eth_zero_addr(priv->connect_requested_bss);
wiphy_debug(wiphy, "connect\n");
return 0;
}
/* Acquires and releases the rdev event lock. */
static void virt_wifi_connect_complete(struct work_struct *work)
{
struct virt_wifi_netdev_priv *priv =
container_of(work, struct virt_wifi_netdev_priv, connect.work);
u8 *requested_bss = priv->connect_requested_bss;
bool has_addr = !is_zero_ether_addr(requested_bss);
bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
u16 status = WLAN_STATUS_SUCCESS;
if (!priv->is_up || (has_addr && !right_addr))
status = WLAN_STATUS_UNSPECIFIED_FAILURE;
else
priv->is_connected = true;
/* Schedules an event that acquires the rtnl lock. */
cfg80211_connect_result(priv->upperdev, requested_bss, NULL, 0, NULL, 0,
status, GFP_KERNEL);
netif_carrier_on(priv->upperdev);
}
/* May acquire and release the rdev event lock. */
static void virt_wifi_cancel_connect(struct net_device *netdev)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(netdev);
/* If there is work pending, clean up dangling callbacks. */
if (cancel_delayed_work_sync(&priv->connect)) {
/* Schedules an event that acquires the rtnl lock. */
cfg80211_connect_result(priv->upperdev,
priv->connect_requested_bss, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
}
}
/* Called with the rtnl lock held. Acquires the rdev event lock. */
static int virt_wifi_disconnect(struct wiphy *wiphy, struct net_device *netdev,
u16 reason_code)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(netdev);
if (priv->being_deleted)
return -EBUSY;
wiphy_debug(wiphy, "disconnect\n");
virt_wifi_cancel_connect(netdev);
cfg80211_disconnected(netdev, reason_code, NULL, 0, true, GFP_KERNEL);
priv->is_connected = false;
netif_carrier_off(netdev);
return 0;
}
/* Called with the rtnl lock held. */
static int virt_wifi_get_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_info *sinfo)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
wiphy_debug(wiphy, "get_station\n");
if (!priv->is_connected || !ether_addr_equal(mac, fake_router_bssid))
return -ENOENT;
sinfo->filled = BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
BIT_ULL(NL80211_STA_INFO_TX_FAILED) |
BIT_ULL(NL80211_STA_INFO_SIGNAL) |
BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
sinfo->tx_packets = priv->tx_packets;
sinfo->tx_failed = priv->tx_failed;
/* For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_ */
sinfo->signal = -50;
sinfo->txrate = (struct rate_info) {
.legacy = 10, /* units are 100kbit/s */
};
return 0;
}
/* Called with the rtnl lock held. */
static int virt_wifi_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
wiphy_debug(wiphy, "dump_station\n");
if (idx != 0 || !priv->is_connected)
return -ENOENT;
ether_addr_copy(mac, fake_router_bssid);
return virt_wifi_get_station(wiphy, dev, fake_router_bssid, sinfo);
}
static const struct cfg80211_ops virt_wifi_cfg80211_ops = {
.scan = virt_wifi_scan,
.connect = virt_wifi_connect,
.disconnect = virt_wifi_disconnect,
.get_station = virt_wifi_get_station,
.dump_station = virt_wifi_dump_station,
};
/* Acquires and releases the rtnl lock. */
static struct wiphy *virt_wifi_make_wiphy(void)
{
struct wiphy *wiphy;
struct virt_wifi_wiphy_priv *priv;
int err;
wiphy = wiphy_new(&virt_wifi_cfg80211_ops, sizeof(*priv));
if (!wiphy)
return NULL;
wiphy->max_scan_ssids = 4;
wiphy->max_scan_ie_len = 1000;
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->bands[NL80211_BAND_2GHZ] = &band_2ghz;
wiphy->bands[NL80211_BAND_5GHZ] = &band_5ghz;
wiphy->bands[NL80211_BAND_60GHZ] = NULL;
wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
priv = wiphy_priv(wiphy);
priv->being_deleted = false;
priv->scan_request = NULL;
INIT_DELAYED_WORK(&priv->scan_result, virt_wifi_scan_result);
err = wiphy_register(wiphy);
if (err < 0) {
wiphy_free(wiphy);
return NULL;
}
return wiphy;
}
/* Acquires and releases the rtnl lock. */
static void virt_wifi_destroy_wiphy(struct wiphy *wiphy)
{
struct virt_wifi_wiphy_priv *priv;
WARN(!wiphy, "%s called with null wiphy", __func__);
if (!wiphy)
return;
priv = wiphy_priv(wiphy);
priv->being_deleted = true;
virt_wifi_cancel_scan(wiphy);
if (wiphy->registered)
wiphy_unregister(wiphy);
wiphy_free(wiphy);
}
/* Enters and exits a RCU-bh critical section. */
static netdev_tx_t virt_wifi_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
priv->tx_packets++;
if (!priv->is_connected) {
priv->tx_failed++;
return NET_XMIT_DROP;
}
skb->dev = priv->lowerdev;
return dev_queue_xmit(skb);
}
/* Called with rtnl lock held. */
static int virt_wifi_net_device_open(struct net_device *dev)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
priv->is_up = true;
return 0;
}
/* Called with rtnl lock held. */
static int virt_wifi_net_device_stop(struct net_device *dev)
{
struct virt_wifi_netdev_priv *n_priv = netdev_priv(dev);
struct virt_wifi_wiphy_priv *w_priv;
n_priv->is_up = false;
if (!dev->ieee80211_ptr)
return 0;
w_priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy);
virt_wifi_cancel_connect(dev);
netif_carrier_off(dev);
return 0;
}
static const struct net_device_ops virt_wifi_ops = {
.ndo_start_xmit = virt_wifi_start_xmit,
.ndo_open = virt_wifi_net_device_open,
.ndo_stop = virt_wifi_net_device_stop,
};
/* Invoked as part of rtnl lock release. */
static void virt_wifi_net_device_destructor(struct net_device *dev)
{
/* Delayed past dellink to allow nl80211 to react to the device being
* deleted.
*/
kfree(dev->ieee80211_ptr);
dev->ieee80211_ptr = NULL;
free_netdev(dev);
}
/* No lock interaction. */
static void virt_wifi_setup(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &virt_wifi_ops;
dev->destructor = virt_wifi_net_device_destructor;
}
/* Called in a RCU read critical section from netif_receive_skb */
static rx_handler_result_t virt_wifi_rx_handler(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct virt_wifi_netdev_priv *priv =
rcu_dereference(skb->dev->rx_handler_data);
if (!priv->is_connected)
return RX_HANDLER_PASS;
/* GFP_ATOMIC because this is a packet interrupt handler. */
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb) {
dev_err(&priv->upperdev->dev, "can't skb_share_check\n");
return RX_HANDLER_CONSUMED;
}
*pskb = skb;
skb->dev = priv->upperdev;
skb->pkt_type = PACKET_HOST;
return RX_HANDLER_ANOTHER;
}
/* Called with rtnl lock held. */
static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
int err;
if (!tb[IFLA_LINK])
return -EINVAL;
netif_carrier_off(dev);
priv->upperdev = dev;
priv->lowerdev = __dev_get_by_index(src_net,
nla_get_u32(tb[IFLA_LINK]));
if (!priv->lowerdev)
return -ENODEV;
if (!tb[IFLA_MTU])
dev->mtu = priv->lowerdev->mtu;
else if (dev->mtu > priv->lowerdev->mtu)
return -EINVAL;
err = netdev_rx_handler_register(priv->lowerdev, virt_wifi_rx_handler,
priv);
if (err) {
dev_err(&priv->lowerdev->dev,
"can't netdev_rx_handler_register: %d\n", err);
return err;
}
eth_hw_addr_inherit(dev, priv->lowerdev);
netif_stacked_transfer_operstate(priv->lowerdev, dev);
SET_NETDEV_DEV(dev, &priv->lowerdev->dev);
dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL);
if (!dev->ieee80211_ptr)
goto remove_handler;
dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
dev->ieee80211_ptr->wiphy = common_wiphy;
err = register_netdevice(dev);
if (err) {
dev_err(&priv->lowerdev->dev, "can't register_netdevice: %d\n",
err);
goto free_wireless_dev;
}
err = netdev_upper_dev_link(priv->lowerdev, dev);
if (err) {
dev_err(&priv->lowerdev->dev, "can't netdev_upper_dev_link: %d\n",
err);
goto unregister_netdev;
}
priv->being_deleted = false;
priv->is_connected = false;
priv->is_up = false;
INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete);
return 0;
unregister_netdev:
unregister_netdevice(dev);
free_wireless_dev:
kfree(dev->ieee80211_ptr);
dev->ieee80211_ptr = NULL;
remove_handler:
netdev_rx_handler_unregister(priv->lowerdev);
return err;
}
/* Called with rtnl lock held. */
static void virt_wifi_dellink(struct net_device *dev,
struct list_head *head)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
if (dev->ieee80211_ptr)
virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy);
priv->being_deleted = true;
virt_wifi_cancel_connect(dev);
netif_carrier_off(dev);
netdev_rx_handler_unregister(priv->lowerdev);
netdev_upper_dev_unlink(priv->lowerdev, dev);
unregister_netdevice_queue(dev, head);
/* Deleting the wiphy is handled in the module destructor. */
}
static struct rtnl_link_ops virt_wifi_link_ops = {
.kind = "virt_wifi",
.setup = virt_wifi_setup,
.newlink = virt_wifi_newlink,
.dellink = virt_wifi_dellink,
.priv_size = sizeof(struct virt_wifi_netdev_priv),
};
/* Acquires and releases the rtnl lock. */
static int __init virt_wifi_init_module(void)
{
int err;
/* Guaranteed to be locallly-administered and not multicast. */
eth_random_addr(fake_router_bssid);
common_wiphy = virt_wifi_make_wiphy();
if (!common_wiphy)
return -ENOMEM;
err = rtnl_link_register(&virt_wifi_link_ops);
if (err)
virt_wifi_destroy_wiphy(common_wiphy);
return err;
}
/* Acquires and releases the rtnl lock. */
static void __exit virt_wifi_cleanup_module(void)
{
/* Will delete any devices that depend on the wiphy. */
rtnl_link_unregister(&virt_wifi_link_ops);
virt_wifi_destroy_wiphy(common_wiphy);
}
module_init(virt_wifi_init_module);
module_exit(virt_wifi_cleanup_module);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Cody Schuffelen <schuffelen@google.com>");
MODULE_DESCRIPTION("Driver for a wireless wrapper of ethernet devices");
MODULE_ALIAS_RTNL_LINK("virt_wifi");

View File

@@ -39,7 +39,6 @@ vsoc.c, uapi/vsoc_shm.h
waiting threads. We should eventually use multiple queues and select the
queue based on the region.
- Add debugfs support for examining the permissions of regions.
- Use ioremap_wc instead of ioremap_nocache.
- Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been
superseded by the futex and is there for legacy reasons.

View File

@@ -1229,9 +1229,6 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
int i;
pr_debug("%s: syncing for device %s\n", __func__,
dev ? dev_name(dev) : "null");
if (!ion_buffer_fault_user_mappings(buffer))
return;
@@ -1293,7 +1290,6 @@ static void ion_vm_close(struct vm_area_struct *vma)
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list, *tmp;
pr_debug("%s\n", __func__);
mutex_lock(&buffer->lock);
list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
if (vma_list->vma != vma)
@@ -1696,7 +1692,6 @@ static int ion_release(struct inode *inode, struct file *file)
{
struct ion_client *client = file->private_data;
pr_debug("%s: %d\n", __func__, __LINE__);
ion_client_destroy(client);
return 0;
}
@@ -1708,7 +1703,6 @@ static int ion_open(struct inode *inode, struct file *file)
struct ion_client *client;
char debug_name[64];
pr_debug("%s: %d\n", __func__, __LINE__);
snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
client = ion_client_create(dev, debug_name);
if (IS_ERR(client))

View File

@@ -81,8 +81,8 @@ struct vsoc_region_data {
atomic_t *incoming_signalled;
/* Flag indicating the guest has signalled the host. */
atomic_t *outgoing_signalled;
int irq_requested;
int device_created;
bool irq_requested;
bool device_created;
};
struct vsoc_device {
@@ -91,7 +91,7 @@ struct vsoc_device {
/* Physical address of SHARED_MEMORY_BAR. */
phys_addr_t shm_phys_start;
/* Kernel virtual address of SHARED_MEMORY_BAR. */
void *kernel_mapped_shm;
void __iomem *kernel_mapped_shm;
/* Size of the entire shared memory window in bytes. */
size_t shm_size;
/*
@@ -116,22 +116,23 @@ struct vsoc_device {
* vsoc_region_data because the kernel deals with them as an array.
*/
struct msix_entry *msix_entries;
/*
* Flags that indicate what we've initialzied. These are used to do an
* orderly cleanup of the device.
*/
char enabled_device;
char requested_regions;
char cdev_added;
char class_added;
char msix_enabled;
/* Mutex that protectes the permission list */
struct mutex mtx;
/* Major number assigned by the kernel */
int major;
/* Character device assigned by the kernel */
struct cdev cdev;
/* Device class assigned by the kernel */
struct class *class;
/*
* Flags that indicate what we've initialized. These are used to do an
* orderly cleanup of the device.
*/
bool enabled_device;
bool requested_regions;
bool cdev_added;
bool class_added;
bool msix_enabled;
};
static struct vsoc_device vsoc_dev;
@@ -153,13 +154,13 @@ static long vsoc_ioctl(struct file *, unsigned int, unsigned long);
static int vsoc_mmap(struct file *, struct vm_area_struct *);
static int vsoc_open(struct inode *, struct file *);
static int vsoc_release(struct inode *, struct file *);
static ssize_t vsoc_read(struct file *, char *, size_t, loff_t *);
static ssize_t vsoc_write(struct file *, const char *, size_t, loff_t *);
static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *);
static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
static int do_create_fd_scoped_permission(
struct vsoc_device_region *region_p,
struct fd_scoped_permission_node *np,
struct fd_scoped_permission_arg *__user arg);
struct fd_scoped_permission_arg __user *arg);
static void do_destroy_fd_scoped_permission(
struct vsoc_device_region *owner_region_p,
struct fd_scoped_permission *perm);
@@ -198,7 +199,7 @@ inline int vsoc_validate_filep(struct file *filp)
/* Converts from shared memory offset to virtual address */
static inline void *shm_off_to_virtual_addr(__u32 offset)
{
return vsoc_dev.kernel_mapped_shm + offset;
return (void __force *)vsoc_dev.kernel_mapped_shm + offset;
}
/* Converts from shared memory offset to physical address */
@@ -261,7 +262,7 @@ static struct pci_driver vsoc_pci_driver = {
static int do_create_fd_scoped_permission(
struct vsoc_device_region *region_p,
struct fd_scoped_permission_node *np,
struct fd_scoped_permission_arg *__user arg)
struct fd_scoped_permission_arg __user *arg)
{
struct file *managed_filp;
s32 managed_fd;
@@ -632,11 +633,11 @@ static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return 0;
}
static ssize_t vsoc_read(struct file *filp, char *buffer, size_t len,
static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len,
loff_t *poffset)
{
__u32 area_off;
void *area_p;
const void *area_p;
ssize_t area_len;
int retval = vsoc_validate_filep(filp);
@@ -706,7 +707,7 @@ static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin)
return offset;
}
static ssize_t vsoc_write(struct file *filp, const char *buffer,
static ssize_t vsoc_write(struct file *filp, const char __user *buffer,
size_t len, loff_t *poffset)
{
__u32 area_off;
@@ -772,14 +773,14 @@ static int vsoc_probe_device(struct pci_dev *pdev,
pci_name(pdev), result);
return result;
}
vsoc_dev.enabled_device = 1;
vsoc_dev.enabled_device = true;
result = pci_request_regions(pdev, "vsoc");
if (result < 0) {
dev_err(&pdev->dev, "pci_request_regions failed\n");
vsoc_remove_device(pdev);
return -EBUSY;
}
vsoc_dev.requested_regions = 1;
vsoc_dev.requested_regions = true;
/* Set up the control registers in BAR 0 */
reg_size = pci_resource_len(pdev, REGISTER_BAR);
if (reg_size > MAX_REGISTER_BAR_LEN)
@@ -790,7 +791,7 @@ static int vsoc_probe_device(struct pci_dev *pdev,
if (!vsoc_dev.regs) {
dev_err(&pdev->dev,
"cannot ioremap registers of size %zu\n",
"cannot map registers of size %zu\n",
(size_t)reg_size);
vsoc_remove_device(pdev);
return -EBUSY;
@@ -800,19 +801,17 @@ static int vsoc_probe_device(struct pci_dev *pdev,
vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
dev_info(&pdev->dev, "shared memory @ DMA %p size=0x%zx\n",
(void *)vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
/* TODO(ghartman): ioremap_wc should work here */
vsoc_dev.kernel_mapped_shm = ioremap_nocache(
vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n",
&vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0);
if (!vsoc_dev.kernel_mapped_shm) {
dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
vsoc_remove_device(pdev);
return -EBUSY;
}
vsoc_dev.layout =
(struct vsoc_shm_layout_descriptor *)vsoc_dev.kernel_mapped_shm;
vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *)
vsoc_dev.kernel_mapped_shm;
dev_info(&pdev->dev, "major_version: %d\n",
vsoc_dev.layout->major_version);
dev_info(&pdev->dev, "minor_version: %d\n",
@@ -843,16 +842,16 @@ static int vsoc_probe_device(struct pci_dev *pdev,
vsoc_remove_device(pdev);
return -EBUSY;
}
vsoc_dev.cdev_added = 1;
vsoc_dev.cdev_added = true;
vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
if (IS_ERR(vsoc_dev.class)) {
dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
vsoc_remove_device(pdev);
return PTR_ERR(vsoc_dev.class);
}
vsoc_dev.class_added = 1;
vsoc_dev.regions = (struct vsoc_device_region *)
(vsoc_dev.kernel_mapped_shm +
vsoc_dev.class_added = true;
vsoc_dev.regions = (struct vsoc_device_region __force *)
((void *)vsoc_dev.layout +
vsoc_dev.layout->vsoc_region_desc_offset);
vsoc_dev.msix_entries = kcalloc(
vsoc_dev.layout->region_count,
@@ -912,7 +911,7 @@ static int vsoc_probe_device(struct pci_dev *pdev,
return -EFAULT;
}
}
vsoc_dev.msix_enabled = 1;
vsoc_dev.msix_enabled = true;
for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
const struct vsoc_device_region *region = vsoc_dev.regions + i;
size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
@@ -930,14 +929,11 @@ static int vsoc_probe_device(struct pci_dev *pdev,
&vsoc_dev.regions_data[i].interrupt_wait_queue);
init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
vsoc_dev.regions_data[i].incoming_signalled =
vsoc_dev.kernel_mapped_shm +
region->region_begin_offset +
shm_off_to_virtual_addr(region->region_begin_offset) +
h_to_g_signal_table->interrupt_signalled_offset;
vsoc_dev.regions_data[i].outgoing_signalled =
vsoc_dev.kernel_mapped_shm +
region->region_begin_offset +
shm_off_to_virtual_addr(region->region_begin_offset) +
g_to_h_signal_table->interrupt_signalled_offset;
result = request_irq(
vsoc_dev.msix_entries[i].vector,
vsoc_interrupt, 0,
@@ -950,7 +946,7 @@ static int vsoc_probe_device(struct pci_dev *pdev,
vsoc_remove_device(pdev);
return -ENOSPC;
}
vsoc_dev.regions_data[i].irq_requested = 1;
vsoc_dev.regions_data[i].irq_requested = true;
if (!device_create(vsoc_dev.class, NULL,
MKDEV(vsoc_dev.major, i),
NULL, vsoc_dev.regions_data[i].name)) {
@@ -958,7 +954,7 @@ static int vsoc_probe_device(struct pci_dev *pdev,
vsoc_remove_device(pdev);
return -EBUSY;
}
vsoc_dev.regions_data[i].device_created = 1;
vsoc_dev.regions_data[i].device_created = true;
}
return 0;
}
@@ -990,51 +986,51 @@ static void vsoc_remove_device(struct pci_dev *pdev)
if (vsoc_dev.regions_data[i].device_created) {
device_destroy(vsoc_dev.class,
MKDEV(vsoc_dev.major, i));
vsoc_dev.regions_data[i].device_created = 0;
vsoc_dev.regions_data[i].device_created = false;
}
if (vsoc_dev.regions_data[i].irq_requested)
free_irq(vsoc_dev.msix_entries[i].vector, NULL);
vsoc_dev.regions_data[i].irq_requested = 0;
vsoc_dev.regions_data[i].irq_requested = false;
}
kfree(vsoc_dev.regions_data);
vsoc_dev.regions_data = 0;
vsoc_dev.regions_data = NULL;
}
if (vsoc_dev.msix_enabled) {
pci_disable_msix(pdev);
vsoc_dev.msix_enabled = 0;
vsoc_dev.msix_enabled = false;
}
kfree(vsoc_dev.msix_entries);
vsoc_dev.msix_entries = 0;
vsoc_dev.regions = 0;
vsoc_dev.msix_entries = NULL;
vsoc_dev.regions = NULL;
if (vsoc_dev.class_added) {
class_destroy(vsoc_dev.class);
vsoc_dev.class_added = 0;
vsoc_dev.class_added = false;
}
if (vsoc_dev.cdev_added) {
cdev_del(&vsoc_dev.cdev);
vsoc_dev.cdev_added = 0;
vsoc_dev.cdev_added = false;
}
if (vsoc_dev.major && vsoc_dev.layout) {
unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
vsoc_dev.layout->region_count);
vsoc_dev.major = 0;
}
vsoc_dev.layout = 0;
vsoc_dev.layout = NULL;
if (vsoc_dev.kernel_mapped_shm) {
pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
vsoc_dev.kernel_mapped_shm = 0;
vsoc_dev.kernel_mapped_shm = NULL;
}
if (vsoc_dev.regs) {
pci_iounmap(pdev, vsoc_dev.regs);
vsoc_dev.regs = 0;
vsoc_dev.regs = NULL;
}
if (vsoc_dev.requested_regions) {
pci_release_regions(pdev);
vsoc_dev.requested_regions = 0;
vsoc_dev.requested_regions = false;
}
if (vsoc_dev.enabled_device) {
pci_disable_device(pdev);
vsoc_dev.enabled_device = 0;
vsoc_dev.enabled_device = false;
}
/* Do this last: it indicates that the device is not initialized. */
vsoc_dev.dev = NULL;

View File

@@ -820,6 +820,11 @@ static void send_file_work(struct work_struct *data)
offset = dev->xfer_file_offset;
count = dev->xfer_file_length;
if (count < 0) {
dev->xfer_result = -EINVAL;
return;
}
DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
if (dev->xfer_send_header) {
@@ -936,6 +941,11 @@ static void receive_file_work(struct work_struct *data)
offset = dev->xfer_file_offset;
count = dev->xfer_file_length;
if (count < 0) {
dev->xfer_result = -EINVAL;
return;
}
DBG(cdev, "receive_file_work(%lld)\n", count);
if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,

View File

@@ -166,6 +166,8 @@ static bool f2fs_bio_post_read_required(struct bio *bio)
static void f2fs_read_end_io(struct bio *bio)
{
struct page *first_page = bio->bi_io_vec[0].bv_page;
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_READ_IO)) {
f2fs_show_injection_info(FAULT_READ_IO);
bio->bi_error = -EIO;
@@ -179,6 +181,13 @@ static void f2fs_read_end_io(struct bio *bio)
return;
}
if (first_page != NULL &&
__read_io_type(first_page) == F2FS_RD_DATA) {
trace_android_fs_dataread_end(first_page->mapping->host,
page_offset(first_page),
bio->bi_iter.bi_size);
}
__read_end_io(bio);
}
@@ -345,6 +354,32 @@ submit_io:
submit_bio(bio);
}
static void __f2fs_submit_read_bio(struct f2fs_sb_info *sbi,
struct bio *bio, enum page_type type)
{
if (trace_android_fs_dataread_start_enabled() && (type == DATA)) {
struct page *first_page = bio->bi_io_vec[0].bv_page;
if (first_page != NULL &&
__read_io_type(first_page) == F2FS_RD_DATA) {
char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
path = android_fstrace_get_pathname(pathbuf,
MAX_TRACE_PATHBUF_LEN,
first_page->mapping->host);
trace_android_fs_dataread_start(
first_page->mapping->host,
page_offset(first_page),
bio->bi_iter.bi_size,
current->pid,
path,
current->comm);
}
}
__submit_bio(sbi, bio, type);
}
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
@@ -497,7 +532,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
inc_page_count(fio->sbi, is_read_io(fio->op) ?
__read_io_type(page): WB_DATA_TYPE(fio->page));
__submit_bio(fio->sbi, bio, fio->type);
__f2fs_submit_read_bio(fio->sbi, bio, fio->type);
return 0;
}
@@ -649,7 +684,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
}
ClearPageError(page);
inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
__submit_bio(F2FS_I_SB(inode), bio, DATA);
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
@@ -1656,7 +1691,7 @@ got_it:
if (bio && (last_block_in_bio != block_nr - 1 ||
!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
submit_and_realloc:
__submit_bio(F2FS_I_SB(inode), bio, DATA);
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
@@ -1698,7 +1733,7 @@ set_error_page:
goto next_page;
confused:
if (bio) {
__submit_bio(F2FS_I_SB(inode), bio, DATA);
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
unlock_page(page);
@@ -1708,7 +1743,7 @@ next_page:
}
BUG_ON(pages && !list_empty(pages));
if (bio)
__submit_bio(F2FS_I_SB(inode), bio, DATA);
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}

View File

@@ -174,7 +174,7 @@ static int proc_uid_base_readdir(struct file *file, struct dir_context *ctx)
return 0;
for (u = uid_base_stuff + (ctx->pos - 2);
u <= uid_base_stuff + nents - 1; u++) {
u < uid_base_stuff + nents; u++) {
if (!proc_fill_cache(file, ctx, u->name, u->len,
proc_uident_instantiate, NULL, u))
break;

View File

@@ -51,7 +51,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
* whether the base obbpath has been changed or not
*/
if (is_obbpath_invalid(dentry)) {
d_drop(dentry);
return 0;
}
@@ -65,7 +64,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
if ((lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) {
err = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
if (err == 0) {
d_drop(dentry);
goto out;
}
}
@@ -73,14 +71,12 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
spin_lock(&lower_dentry->d_lock);
if (d_unhashed(lower_dentry)) {
spin_unlock(&lower_dentry->d_lock);
d_drop(dentry);
err = 0;
goto out;
}
spin_unlock(&lower_dentry->d_lock);
if (parent_lower_dentry != lower_cur_parent_dentry) {
d_drop(dentry);
err = 0;
goto out;
}
@@ -94,7 +90,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
}
if (!qstr_case_eq(&dentry->d_name, &lower_dentry->d_name)) {
__d_drop(dentry);
err = 0;
}
@@ -113,7 +108,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (inode) {
data = top_data_get(SDCARDFS_I(inode));
if (!data || data->abandoned) {
d_drop(dentry);
err = 0;
}
if (data)
@@ -129,8 +123,16 @@ out:
return err;
}
/* 1 = delete, 0 = cache */
static int sdcardfs_d_delete(const struct dentry *d)
{
return SDCARDFS_SB(d->d_sb)->options.nocache ? 1 : 0;
}
static void sdcardfs_d_release(struct dentry *dentry)
{
if (!dentry || !dentry->d_fsdata)
return;
/* release and reset the lower paths */
if (has_graft_path(dentry))
sdcardfs_put_reset_orig_path(dentry);
@@ -185,6 +187,7 @@ static void sdcardfs_canonical_path(const struct path *path,
const struct dentry_operations sdcardfs_ci_dops = {
.d_revalidate = sdcardfs_d_revalidate,
.d_delete = sdcardfs_d_delete,
.d_release = sdcardfs_d_release,
.d_hash = sdcardfs_hash_ci,
.d_compare = sdcardfs_cmp_ci,

View File

@@ -62,6 +62,7 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
int err;
struct qstr q_Android = QSTR_LITERAL("Android");
struct qstr q_data = QSTR_LITERAL("data");
struct qstr q_sandbox = QSTR_LITERAL("sandbox");
struct qstr q_obb = QSTR_LITERAL("obb");
struct qstr q_media = QSTR_LITERAL("media");
struct qstr q_cache = QSTR_LITERAL("cache");
@@ -110,6 +111,9 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
if (qstr_case_eq(name, &q_data)) {
/* App-specific directories inside; let anyone traverse */
info->data->perm = PERM_ANDROID_DATA;
} else if (qstr_case_eq(name, &q_sandbox)) {
/* App-specific directories inside; let anyone traverse */
info->data->perm = PERM_ANDROID_DATA;
} else if (qstr_case_eq(name, &q_obb)) {
/* App-specific directories inside; let anyone traverse */
info->data->perm = PERM_ANDROID_OBB;
@@ -356,7 +360,8 @@ int need_graft_path(struct dentry *dentry)
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
struct qstr obb = QSTR_LITERAL("obb");
if (parent_info->data->perm == PERM_ANDROID &&
if (!sbi->options.unshared_obb &&
parent_info->data->perm == PERM_ANDROID &&
qstr_case_eq(&dentry->d_name, &obb)) {
/* /Android/obb is the base obbpath of DERIVED_UNIFIED */

View File

@@ -205,6 +205,7 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct dentry *lower_dentry;
struct vfsmount *lower_mnt;
struct dentry *lower_parent_dentry = NULL;
struct dentry *parent_dentry = NULL;
struct path lower_path;
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
const struct cred *saved_cred = NULL;
@@ -227,11 +228,14 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
return -ENOMEM;
/* check disk space */
if (!check_min_free_space(dentry, 0, 1)) {
parent_dentry = dget_parent(dentry);
if (!check_min_free_space(parent_dentry, 0, 1)) {
pr_err("sdcardfs: No minimum free space.\n");
err = -ENOSPC;
dput(parent_dentry);
goto out_revert;
}
dput(parent_dentry);
/* the lower_dentry is negative here */
sdcardfs_get_lower_path(dentry, &lower_path);

View File

@@ -43,8 +43,6 @@ void sdcardfs_destroy_dentry_cache(void)
void free_dentry_private_data(struct dentry *dentry)
{
if (!dentry || !dentry->d_fsdata)
return;
kmem_cache_free(sdcardfs_dentry_cachep, dentry->d_fsdata);
dentry->d_fsdata = NULL;
}

Some files were not shown because too many files have changed in this diff Show More