Changes in 4.4.160: (114 commits)
crypto: skcipher - Fix -Wstringop-truncation warnings
tsl2550: fix lux1_input error in low light
vmci: type promotion bug in qp_host_get_user_memory()
x86/numa_emulation: Fix emulated-to-physical node mapping
staging: rts5208: fix missing error check on call to rtsx_write_register
uwb: hwa-rc: fix memory leak at probe
power: vexpress: fix corruption in notifier registration
Bluetooth: Add a new Realtek 8723DE ID 0bda:b009
USB: serial: kobil_sct: fix modem-status error handling
6lowpan: iphc: reset mac_header after decompress to fix panic
md-cluster: clear another node's suspend_area after the copy is finished
media: exynos4-is: Prevent NULL pointer dereference in __isp_video_try_fmt()
powerpc/kdump: Handle crashkernel memory reservation failure
media: fsl-viu: fix error handling in viu_of_probe()
x86/tsc: Add missing header to tsc_msr.c
x86/entry/64: Add two more instruction suffixes
scsi: target/iscsi: Make iscsit_ta_authentication() respect the output buffer size
scsi: klist: Make it safe to use klists in atomic context
scsi: ibmvscsi: Improve strings handling
usb: wusbcore: security: cast sizeof to int for comparison
powerpc/powernv/ioda2: Reduce upper limit for DMA window size
alarmtimer: Prevent overflow for relative nanosleep
s390/extmem: fix gcc 8 stringop-overflow warning
ALSA: snd-aoa: add of_node_put() in error path
media: s3c-camif: ignore -ENOIOCTLCMD from v4l2_subdev_call for s_power
media: soc_camera: ov772x: correct setting of banding filter
media: omap3isp: zero-initialize the isp cam_xclk{a,b} initial data
staging: android: ashmem: Fix mmap size validation
drivers/tty: add error handling for pcmcia_loop_config
media: tm6000: add error handling for dvb_register_adapter
ALSA: hda: Add AZX_DCAPS_PM_RUNTIME for AMD Raven Ridge
ath10k: protect ath10k_htt_rx_ring_free with rx_ring.lock
rndis_wlan: potential buffer overflow in rndis_wlan_auth_indication()
wlcore: Add missing PM call for wlcore_cmd_wait_for_event_or_timeout()
ARM: mvebu: declare asm symbols as character arrays in pmsu.c
HID: hid-ntrig: add error handling for sysfs_create_group
scsi: bnx2i: add error handling for ioremap_nocache
EDAC, i7core: Fix memleaks and use-after-free on probe and remove
ASoC: dapm: Fix potential DAI widget pointer deref when linking DAIs
module: exclude SHN_UNDEF symbols from kallsyms api
nfsd: fix corrupted reply to badly ordered compound
ARM: dts: dra7: fix DCAN node addresses
floppy: Do not copy a kernel pointer to user memory in FDGETPRM ioctl
serial: cpm_uart: return immediately from console poll
spi: tegra20-slink: explicitly enable/disable clock
spi: sh-msiof: Fix invalid SPI use during system suspend
spi: sh-msiof: Fix handling of write value for SISTR register
spi: rspi: Fix invalid SPI use during system suspend
spi: rspi: Fix interrupted DMA transfers
USB: fix error handling in usb_driver_claim_interface()
USB: handle NULL config in usb_find_alt_setting()
slub: make ->cpu_partial unsigned int
media: uvcvideo: Support realtek's UVC 1.5 device
USB: usbdevfs: sanitize flags more
USB: usbdevfs: restore warning for nonsensical flags
Revert "usb: cdc-wdm: Fix a sleep-in-atomic-context bug in service_outstanding_interrupt()"
USB: remove LPM management from usb_driver_claim_interface()
Input: elantech - enable middle button of touchpad on ThinkPad P72
IB/srp: Avoid that sg_reset -d ${srp_device} triggers an infinite loop
scsi: target: iscsi: Use bin2hex instead of a re-implementation
serial: imx: restore handshaking irq for imx1
arm64: KVM: Tighten guest core register access from userspace
ext4: never move the system.data xattr out of the inode body
thermal: of-thermal: disable passive polling when thermal zone is disabled
net: hns: fix length and page_offset overflow when CONFIG_ARM64_64K_PAGES
e1000: check on netif_running() before calling e1000_up()
e1000: ensure to free old tx/rx rings in set_ringparam()
hwmon: (ina2xx) fix sysfs shunt resistor read access
hwmon: (adt7475) Make adt7475_read_word() return errors
i2c: i801: Allow ACPI AML access I/O ports not reserved for SMBus
arm64: cpufeature: Track 32bit EL0 support
arm64: KVM: Sanitize PSTATE.M when being set from userspace
media: v4l: event: Prevent freeing event subscriptions while accessed
KVM: PPC: Book3S HV: Don't truncate HPTE index in xlate function
mac80211: correct use of IEEE80211_VHT_CAP_RXSTBC_X
mac80211_hwsim: correct use of IEEE80211_VHT_CAP_RXSTBC_X
gpio: adp5588: Fix sleep-in-atomic-context bug
mac80211: mesh: fix HWMP sequence numbering to follow standard
cfg80211: nl80211_update_ft_ies() to validate NL80211_ATTR_IE
RAID10 BUG_ON in raise_barrier when force is true and conf->barrier is 0
i2c: uniphier: issue STOP only for last message or I2C_M_STOP
i2c: uniphier-f: issue STOP only for last message or I2C_M_STOP
net: cadence: Fix a sleep-in-atomic-context bug in macb_halt_tx()
fs/cifs: don't translate SFM_SLASH (U+F026) to backslash
cfg80211: fix a type issue in ieee80211_chandef_to_operating_class()
mac80211: fix a race between restart and CSA flows
mac80211: Fix station bandwidth setting after channel switch
mac80211: shorten the IBSS debug messages
tools/vm/slabinfo.c: fix sign-compare warning
tools/vm/page-types.c: fix "defined but not used" warning
mm: madvise(MADV_DODUMP): allow hugetlbfs pages
usb: gadget: fotg210-udc: Fix memory leak of fotg210->ep[i]
perf probe powerpc: Ignore SyS symbols irrespective of endianness
RDMA/ucma: check fd type in ucma_migrate_id()
USB: yurex: Check for truncation in yurex_read()
drm/nouveau/TBDdevinit: don't fail when PMU/PRE_OS is missing from VBIOS
fs/cifs: suppress a string overflow warning
dm thin metadata: try to avoid ever aborting transactions
arch/hexagon: fix kernel/dma.c build warning
hexagon: modify ffs() and fls() to return int
arm64: jump_label.h: use asm_volatile_goto macro instead of "asm goto"
r8169: Clear RTL_FLAG_TASK_*_PENDING when clearing RTL_FLAG_TASK_ENABLED
s390/qeth: don't dump past end of unknown HW header
cifs: read overflow in is_valid_oplock_break()
xen/manage: don't complain about an empty value in control/sysrq node
xen: avoid crash in disable_hotplug_cpu
xen: fix GCC warning and remove duplicate EVTCHN_ROW/EVTCHN_COL usage
smb2: fix missing files in root share directory listing
ALSA: hda/realtek - Cannot adjust speaker's volume on Dell XPS 27 7760
crypto: mxs-dcp - Fix wait logic on chan threads
proc: restrict kernel stack dumps to root
ocfs2: fix locking for res->tracking and dlm->tracking_list
dm thin metadata: fix __udivdi3 undefined on 32-bit
Linux 4.4.160
Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
Conflicts:
arch/arm64/include/asm/cpufeature.h
arch/arm64/kernel/cpufeature.c
766 lines
21 KiB
C
766 lines
21 KiB
C
/*
|
|
* Block chaining cipher operations.
|
|
*
|
|
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
|
|
* multiple page boundaries by using temporary blocks. In user context,
|
|
* the kernel is given a chance to schedule us once per page.
|
|
*
|
|
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License as published by the Free
|
|
* Software Foundation; either version 2 of the License, or (at your option)
|
|
* any later version.
|
|
*
|
|
*/
|
|
|
|
#include <crypto/aead.h>
|
|
#include <crypto/internal/skcipher.h>
|
|
#include <crypto/scatterwalk.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <linux/cryptouser.h>
|
|
#include <net/netlink.h>
|
|
|
|
#include "internal.h"
|
|
|
|
enum {
|
|
BLKCIPHER_WALK_PHYS = 1 << 0,
|
|
BLKCIPHER_WALK_SLOW = 1 << 1,
|
|
BLKCIPHER_WALK_COPY = 1 << 2,
|
|
BLKCIPHER_WALK_DIFF = 1 << 3,
|
|
};
|
|
|
|
static int blkcipher_walk_next(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk);
|
|
static int blkcipher_walk_first(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk);
|
|
|
|
static inline void blkcipher_map_src(struct blkcipher_walk *walk)
|
|
{
|
|
walk->src.virt.addr = scatterwalk_map(&walk->in);
|
|
}
|
|
|
|
static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
|
|
{
|
|
walk->dst.virt.addr = scatterwalk_map(&walk->out);
|
|
}
|
|
|
|
static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
|
|
{
|
|
scatterwalk_unmap(walk->src.virt.addr);
|
|
}
|
|
|
|
static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
|
|
{
|
|
scatterwalk_unmap(walk->dst.virt.addr);
|
|
}
|
|
|
|
/* Get a spot of the specified length that does not straddle a page.
|
|
* The caller needs to ensure that there is enough space for this operation.
|
|
*/
|
|
static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
|
|
{
|
|
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
|
|
return max(start, end_page);
|
|
}
|
|
|
|
static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
|
|
unsigned int bsize)
|
|
{
|
|
u8 *addr;
|
|
|
|
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
|
|
addr = blkcipher_get_spot(addr, bsize);
|
|
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
|
|
}
|
|
|
|
static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
|
|
unsigned int n)
|
|
{
|
|
if (walk->flags & BLKCIPHER_WALK_COPY) {
|
|
blkcipher_map_dst(walk);
|
|
memcpy(walk->dst.virt.addr, walk->page, n);
|
|
blkcipher_unmap_dst(walk);
|
|
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
|
|
if (walk->flags & BLKCIPHER_WALK_DIFF)
|
|
blkcipher_unmap_dst(walk);
|
|
blkcipher_unmap_src(walk);
|
|
}
|
|
|
|
scatterwalk_advance(&walk->in, n);
|
|
scatterwalk_advance(&walk->out, n);
|
|
}
|
|
|
|
int blkcipher_walk_done(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk, int err)
|
|
{
|
|
unsigned int n; /* bytes processed */
|
|
bool more;
|
|
|
|
if (unlikely(err < 0))
|
|
goto finish;
|
|
|
|
n = walk->nbytes - err;
|
|
walk->total -= n;
|
|
more = (walk->total != 0);
|
|
|
|
if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
|
|
blkcipher_done_fast(walk, n);
|
|
} else {
|
|
if (WARN_ON(err)) {
|
|
/* unexpected case; didn't process all bytes */
|
|
err = -EINVAL;
|
|
goto finish;
|
|
}
|
|
blkcipher_done_slow(walk, n);
|
|
}
|
|
|
|
scatterwalk_done(&walk->in, 0, more);
|
|
scatterwalk_done(&walk->out, 1, more);
|
|
|
|
if (more) {
|
|
crypto_yield(desc->flags);
|
|
return blkcipher_walk_next(desc, walk);
|
|
}
|
|
err = 0;
|
|
finish:
|
|
walk->nbytes = 0;
|
|
if (walk->iv != desc->info)
|
|
memcpy(desc->info, walk->iv, walk->ivsize);
|
|
if (walk->buffer != walk->page)
|
|
kfree(walk->buffer);
|
|
if (walk->page)
|
|
free_page((unsigned long)walk->page);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(blkcipher_walk_done);
|
|
|
|
static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk,
|
|
unsigned int bsize,
|
|
unsigned int alignmask)
|
|
{
|
|
unsigned int n;
|
|
unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
|
|
|
|
if (walk->buffer)
|
|
goto ok;
|
|
|
|
walk->buffer = walk->page;
|
|
if (walk->buffer)
|
|
goto ok;
|
|
|
|
n = aligned_bsize * 3 - (alignmask + 1) +
|
|
(alignmask & ~(crypto_tfm_ctx_alignment() - 1));
|
|
walk->buffer = kmalloc(n, GFP_ATOMIC);
|
|
if (!walk->buffer)
|
|
return blkcipher_walk_done(desc, walk, -ENOMEM);
|
|
|
|
ok:
|
|
walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
|
|
alignmask + 1);
|
|
walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
|
|
walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
|
|
aligned_bsize, bsize);
|
|
|
|
scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
|
|
|
|
walk->nbytes = bsize;
|
|
walk->flags |= BLKCIPHER_WALK_SLOW;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
|
|
{
|
|
u8 *tmp = walk->page;
|
|
|
|
blkcipher_map_src(walk);
|
|
memcpy(tmp, walk->src.virt.addr, walk->nbytes);
|
|
blkcipher_unmap_src(walk);
|
|
|
|
walk->src.virt.addr = tmp;
|
|
walk->dst.virt.addr = tmp;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk)
|
|
{
|
|
unsigned long diff;
|
|
|
|
walk->src.phys.page = scatterwalk_page(&walk->in);
|
|
walk->src.phys.offset = offset_in_page(walk->in.offset);
|
|
walk->dst.phys.page = scatterwalk_page(&walk->out);
|
|
walk->dst.phys.offset = offset_in_page(walk->out.offset);
|
|
|
|
if (walk->flags & BLKCIPHER_WALK_PHYS)
|
|
return 0;
|
|
|
|
diff = walk->src.phys.offset - walk->dst.phys.offset;
|
|
diff |= walk->src.virt.page - walk->dst.virt.page;
|
|
|
|
blkcipher_map_src(walk);
|
|
walk->dst.virt.addr = walk->src.virt.addr;
|
|
|
|
if (diff) {
|
|
walk->flags |= BLKCIPHER_WALK_DIFF;
|
|
blkcipher_map_dst(walk);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int blkcipher_walk_next(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk)
|
|
{
|
|
unsigned int bsize;
|
|
unsigned int n;
|
|
int err;
|
|
|
|
n = walk->total;
|
|
if (unlikely(n < walk->cipher_blocksize)) {
|
|
desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
|
|
return blkcipher_walk_done(desc, walk, -EINVAL);
|
|
}
|
|
|
|
bsize = min(walk->walk_blocksize, n);
|
|
|
|
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
|
|
BLKCIPHER_WALK_DIFF);
|
|
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
|
|
!scatterwalk_aligned(&walk->out, walk->alignmask)) {
|
|
walk->flags |= BLKCIPHER_WALK_COPY;
|
|
if (!walk->page) {
|
|
walk->page = (void *)__get_free_page(GFP_ATOMIC);
|
|
if (!walk->page)
|
|
n = 0;
|
|
}
|
|
}
|
|
|
|
n = scatterwalk_clamp(&walk->in, n);
|
|
n = scatterwalk_clamp(&walk->out, n);
|
|
|
|
if (unlikely(n < bsize)) {
|
|
err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
|
|
goto set_phys_lowmem;
|
|
}
|
|
|
|
walk->nbytes = n;
|
|
if (walk->flags & BLKCIPHER_WALK_COPY) {
|
|
err = blkcipher_next_copy(walk);
|
|
goto set_phys_lowmem;
|
|
}
|
|
|
|
return blkcipher_next_fast(desc, walk);
|
|
|
|
set_phys_lowmem:
|
|
if (walk->flags & BLKCIPHER_WALK_PHYS) {
|
|
walk->src.phys.page = virt_to_page(walk->src.virt.addr);
|
|
walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
|
|
walk->src.phys.offset &= PAGE_SIZE - 1;
|
|
walk->dst.phys.offset &= PAGE_SIZE - 1;
|
|
}
|
|
return err;
|
|
}
|
|
|
|
static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
|
|
{
|
|
unsigned bs = walk->walk_blocksize;
|
|
unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
|
|
unsigned int size = aligned_bs * 2 +
|
|
walk->ivsize + max(aligned_bs, walk->ivsize) -
|
|
(walk->alignmask + 1);
|
|
u8 *iv;
|
|
|
|
size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
|
|
walk->buffer = kmalloc(size, GFP_ATOMIC);
|
|
if (!walk->buffer)
|
|
return -ENOMEM;
|
|
|
|
iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
|
|
iv = blkcipher_get_spot(iv, bs) + aligned_bs;
|
|
iv = blkcipher_get_spot(iv, bs) + aligned_bs;
|
|
iv = blkcipher_get_spot(iv, walk->ivsize);
|
|
|
|
walk->iv = memcpy(iv, walk->iv, walk->ivsize);
|
|
return 0;
|
|
}
|
|
|
|
int blkcipher_walk_virt(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk)
|
|
{
|
|
walk->flags &= ~BLKCIPHER_WALK_PHYS;
|
|
walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
|
|
walk->cipher_blocksize = walk->walk_blocksize;
|
|
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
|
|
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
|
|
return blkcipher_walk_first(desc, walk);
|
|
}
|
|
EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
|
|
|
|
int blkcipher_walk_phys(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk)
|
|
{
|
|
walk->flags |= BLKCIPHER_WALK_PHYS;
|
|
walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
|
|
walk->cipher_blocksize = walk->walk_blocksize;
|
|
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
|
|
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
|
|
return blkcipher_walk_first(desc, walk);
|
|
}
|
|
EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
|
|
|
|
static int blkcipher_walk_first(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk)
|
|
{
|
|
if (WARN_ON_ONCE(in_irq()))
|
|
return -EDEADLK;
|
|
|
|
walk->iv = desc->info;
|
|
walk->nbytes = walk->total;
|
|
if (unlikely(!walk->total))
|
|
return 0;
|
|
|
|
walk->buffer = NULL;
|
|
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
|
|
int err = blkcipher_copy_iv(walk);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
scatterwalk_start(&walk->in, walk->in.sg);
|
|
scatterwalk_start(&walk->out, walk->out.sg);
|
|
walk->page = NULL;
|
|
|
|
return blkcipher_walk_next(desc, walk);
|
|
}
|
|
|
|
int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk,
|
|
unsigned int blocksize)
|
|
{
|
|
walk->flags &= ~BLKCIPHER_WALK_PHYS;
|
|
walk->walk_blocksize = blocksize;
|
|
walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
|
|
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
|
|
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
|
|
return blkcipher_walk_first(desc, walk);
|
|
}
|
|
EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
|
|
|
|
int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk,
|
|
struct crypto_aead *tfm,
|
|
unsigned int blocksize)
|
|
{
|
|
walk->flags &= ~BLKCIPHER_WALK_PHYS;
|
|
walk->walk_blocksize = blocksize;
|
|
walk->cipher_blocksize = crypto_aead_blocksize(tfm);
|
|
walk->ivsize = crypto_aead_ivsize(tfm);
|
|
walk->alignmask = crypto_aead_alignmask(tfm);
|
|
return blkcipher_walk_first(desc, walk);
|
|
}
|
|
EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
|
|
|
|
/*
|
|
* This function allows ablkcipher algorithms to use the blkcipher_walk API to
|
|
* walk over their data. The specified crypto_ablkcipher tfm is used to
|
|
* initialize the struct blkcipher_walk, and the crypto_blkcipher specified in
|
|
* desc->tfm is never used so it can be left NULL. (Yes, this design is ugly,
|
|
* but it parallels blkcipher_aead_walk_virt_block() above. In the 4.10 kernel
|
|
* this is starting to be cleaned up...)
|
|
*/
|
|
int blkcipher_ablkcipher_walk_virt(struct blkcipher_desc *desc,
|
|
struct blkcipher_walk *walk,
|
|
struct crypto_ablkcipher *tfm)
|
|
{
|
|
walk->flags &= ~BLKCIPHER_WALK_PHYS;
|
|
walk->walk_blocksize = crypto_ablkcipher_blocksize(tfm);
|
|
walk->cipher_blocksize = walk->walk_blocksize;
|
|
walk->ivsize = crypto_ablkcipher_ivsize(tfm);
|
|
walk->alignmask = crypto_ablkcipher_alignmask(tfm);
|
|
return blkcipher_walk_first(desc, walk);
|
|
}
|
|
EXPORT_SYMBOL_GPL(blkcipher_ablkcipher_walk_virt);
|
|
|
|
static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
|
|
unsigned int keylen)
|
|
{
|
|
struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
|
|
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
|
|
int ret;
|
|
u8 *buffer, *alignbuffer;
|
|
unsigned long absize;
|
|
|
|
absize = keylen + alignmask;
|
|
buffer = kmalloc(absize, GFP_ATOMIC);
|
|
if (!buffer)
|
|
return -ENOMEM;
|
|
|
|
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
|
|
memcpy(alignbuffer, key, keylen);
|
|
ret = cipher->setkey(tfm, alignbuffer, keylen);
|
|
memset(alignbuffer, 0, keylen);
|
|
kfree(buffer);
|
|
return ret;
|
|
}
|
|
|
|
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
|
|
{
|
|
struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
|
|
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
|
|
|
|
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
|
|
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
return -EINVAL;
|
|
}
|
|
|
|
if ((unsigned long)key & alignmask)
|
|
return setkey_unaligned(tfm, key, keylen);
|
|
|
|
return cipher->setkey(tfm, key, keylen);
|
|
}
|
|
|
|
static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
unsigned int keylen)
|
|
{
|
|
return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
|
|
}
|
|
|
|
static int async_encrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct crypto_tfm *tfm = req->base.tfm;
|
|
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
|
struct blkcipher_desc desc = {
|
|
.tfm = __crypto_blkcipher_cast(tfm),
|
|
.info = req->info,
|
|
.flags = req->base.flags,
|
|
};
|
|
|
|
|
|
return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
|
|
}
|
|
|
|
static int async_decrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct crypto_tfm *tfm = req->base.tfm;
|
|
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
|
struct blkcipher_desc desc = {
|
|
.tfm = __crypto_blkcipher_cast(tfm),
|
|
.info = req->info,
|
|
.flags = req->base.flags,
|
|
};
|
|
|
|
return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
|
|
}
|
|
|
|
static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
|
|
u32 mask)
|
|
{
|
|
struct blkcipher_alg *cipher = &alg->cra_blkcipher;
|
|
unsigned int len = alg->cra_ctxsize;
|
|
|
|
if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
|
|
cipher->ivsize) {
|
|
len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
|
|
len += cipher->ivsize;
|
|
}
|
|
|
|
return len;
|
|
}
|
|
|
|
static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
|
|
{
|
|
struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
|
|
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
|
|
|
crt->setkey = async_setkey;
|
|
crt->encrypt = async_encrypt;
|
|
crt->decrypt = async_decrypt;
|
|
if (!alg->ivsize) {
|
|
crt->givencrypt = skcipher_null_givencrypt;
|
|
crt->givdecrypt = skcipher_null_givdecrypt;
|
|
}
|
|
crt->base = __crypto_ablkcipher_cast(tfm);
|
|
crt->ivsize = alg->ivsize;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
|
|
{
|
|
struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
|
|
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
|
unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
|
|
unsigned long addr;
|
|
|
|
crt->setkey = setkey;
|
|
crt->encrypt = alg->encrypt;
|
|
crt->decrypt = alg->decrypt;
|
|
|
|
addr = (unsigned long)crypto_tfm_ctx(tfm);
|
|
addr = ALIGN(addr, align);
|
|
addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
|
|
crt->iv = (void *)addr;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
|
{
|
|
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
|
|
|
if (alg->ivsize > PAGE_SIZE / 8)
|
|
return -EINVAL;
|
|
|
|
if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
|
|
return crypto_init_blkcipher_ops_sync(tfm);
|
|
else
|
|
return crypto_init_blkcipher_ops_async(tfm);
|
|
}
|
|
|
|
#ifdef CONFIG_NET
|
|
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
{
|
|
struct crypto_report_blkcipher rblkcipher;
|
|
|
|
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
|
|
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
|
|
sizeof(rblkcipher.geniv));
|
|
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
|
|
|
|
rblkcipher.blocksize = alg->cra_blocksize;
|
|
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
|
|
rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
|
|
rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
|
|
|
|
if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
|
|
sizeof(struct crypto_report_blkcipher), &rblkcipher))
|
|
goto nla_put_failure;
|
|
return 0;
|
|
|
|
nla_put_failure:
|
|
return -EMSGSIZE;
|
|
}
|
|
#else
|
|
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
#endif
|
|
|
|
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
|
__attribute__ ((unused));
|
|
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
|
{
|
|
seq_printf(m, "type : blkcipher\n");
|
|
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
|
seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
|
|
seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
|
|
seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
|
|
seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
|
|
"<default>");
|
|
}
|
|
|
|
const struct crypto_type crypto_blkcipher_type = {
|
|
.ctxsize = crypto_blkcipher_ctxsize,
|
|
.init = crypto_init_blkcipher_ops,
|
|
#ifdef CONFIG_PROC_FS
|
|
.show = crypto_blkcipher_show,
|
|
#endif
|
|
.report = crypto_blkcipher_report,
|
|
};
|
|
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
|
|
|
|
static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
|
|
const char *name, u32 type, u32 mask)
|
|
{
|
|
struct crypto_alg *alg;
|
|
int err;
|
|
|
|
type = crypto_skcipher_type(type);
|
|
mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
|
|
|
|
alg = crypto_alg_mod_lookup(name, type, mask);
|
|
if (IS_ERR(alg))
|
|
return PTR_ERR(alg);
|
|
|
|
err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
|
|
crypto_mod_put(alg);
|
|
return err;
|
|
}
|
|
|
|
struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
|
|
struct rtattr **tb, u32 type,
|
|
u32 mask)
|
|
{
|
|
struct {
|
|
int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
unsigned int keylen);
|
|
int (*encrypt)(struct ablkcipher_request *req);
|
|
int (*decrypt)(struct ablkcipher_request *req);
|
|
|
|
unsigned int min_keysize;
|
|
unsigned int max_keysize;
|
|
unsigned int ivsize;
|
|
|
|
const char *geniv;
|
|
} balg;
|
|
const char *name;
|
|
struct crypto_skcipher_spawn *spawn;
|
|
struct crypto_attr_type *algt;
|
|
struct crypto_instance *inst;
|
|
struct crypto_alg *alg;
|
|
int err;
|
|
|
|
algt = crypto_get_attr_type(tb);
|
|
if (IS_ERR(algt))
|
|
return ERR_CAST(algt);
|
|
|
|
if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
|
|
algt->mask)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
name = crypto_attr_alg_name(tb[1]);
|
|
if (IS_ERR(name))
|
|
return ERR_CAST(name);
|
|
|
|
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
|
if (!inst)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
spawn = crypto_instance_ctx(inst);
|
|
|
|
/* Ignore async algorithms if necessary. */
|
|
mask |= crypto_requires_sync(algt->type, algt->mask);
|
|
|
|
crypto_set_skcipher_spawn(spawn, inst);
|
|
err = crypto_grab_nivcipher(spawn, name, type, mask);
|
|
if (err)
|
|
goto err_free_inst;
|
|
|
|
alg = crypto_skcipher_spawn_alg(spawn);
|
|
|
|
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
|
CRYPTO_ALG_TYPE_BLKCIPHER) {
|
|
balg.ivsize = alg->cra_blkcipher.ivsize;
|
|
balg.min_keysize = alg->cra_blkcipher.min_keysize;
|
|
balg.max_keysize = alg->cra_blkcipher.max_keysize;
|
|
|
|
balg.setkey = async_setkey;
|
|
balg.encrypt = async_encrypt;
|
|
balg.decrypt = async_decrypt;
|
|
|
|
balg.geniv = alg->cra_blkcipher.geniv;
|
|
} else {
|
|
balg.ivsize = alg->cra_ablkcipher.ivsize;
|
|
balg.min_keysize = alg->cra_ablkcipher.min_keysize;
|
|
balg.max_keysize = alg->cra_ablkcipher.max_keysize;
|
|
|
|
balg.setkey = alg->cra_ablkcipher.setkey;
|
|
balg.encrypt = alg->cra_ablkcipher.encrypt;
|
|
balg.decrypt = alg->cra_ablkcipher.decrypt;
|
|
|
|
balg.geniv = alg->cra_ablkcipher.geniv;
|
|
}
|
|
|
|
err = -EINVAL;
|
|
if (!balg.ivsize)
|
|
goto err_drop_alg;
|
|
|
|
/*
|
|
* This is only true if we're constructing an algorithm with its
|
|
* default IV generator. For the default generator we elide the
|
|
* template name and double-check the IV generator.
|
|
*/
|
|
if (algt->mask & CRYPTO_ALG_GENIV) {
|
|
if (!balg.geniv)
|
|
balg.geniv = crypto_default_geniv(alg);
|
|
err = -EAGAIN;
|
|
if (strcmp(tmpl->name, balg.geniv))
|
|
goto err_drop_alg;
|
|
|
|
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
|
|
memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
|
|
CRYPTO_MAX_ALG_NAME);
|
|
} else {
|
|
err = -ENAMETOOLONG;
|
|
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
|
"%s(%s)", tmpl->name, alg->cra_name) >=
|
|
CRYPTO_MAX_ALG_NAME)
|
|
goto err_drop_alg;
|
|
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
|
"%s(%s)", tmpl->name, alg->cra_driver_name) >=
|
|
CRYPTO_MAX_ALG_NAME)
|
|
goto err_drop_alg;
|
|
}
|
|
|
|
inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
|
|
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
|
|
inst->alg.cra_priority = alg->cra_priority;
|
|
inst->alg.cra_blocksize = alg->cra_blocksize;
|
|
inst->alg.cra_alignmask = alg->cra_alignmask;
|
|
inst->alg.cra_type = &crypto_givcipher_type;
|
|
|
|
inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
|
|
inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
|
|
inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
|
|
inst->alg.cra_ablkcipher.geniv = balg.geniv;
|
|
|
|
inst->alg.cra_ablkcipher.setkey = balg.setkey;
|
|
inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
|
|
inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
|
|
|
|
out:
|
|
return inst;
|
|
|
|
err_drop_alg:
|
|
crypto_drop_skcipher(spawn);
|
|
err_free_inst:
|
|
kfree(inst);
|
|
inst = ERR_PTR(err);
|
|
goto out;
|
|
}
|
|
EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
|
|
|
|
void skcipher_geniv_free(struct crypto_instance *inst)
|
|
{
|
|
crypto_drop_skcipher(crypto_instance_ctx(inst));
|
|
kfree(inst);
|
|
}
|
|
EXPORT_SYMBOL_GPL(skcipher_geniv_free);
|
|
|
|
int skcipher_geniv_init(struct crypto_tfm *tfm)
|
|
{
|
|
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
|
struct crypto_ablkcipher *cipher;
|
|
|
|
cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
|
|
if (IS_ERR(cipher))
|
|
return PTR_ERR(cipher);
|
|
|
|
tfm->crt_ablkcipher.base = cipher;
|
|
tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(skcipher_geniv_init);
|
|
|
|
void skcipher_geniv_exit(struct crypto_tfm *tfm)
|
|
{
|
|
crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
|
|
}
|
|
EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Generic block chaining cipher type");
|