android_kernel_oneplus_msm8998/crypto/shash.c
Greg Kroah-Hartman 0ca3fcabdc This is the 4.4.163 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlvm/EEACgkQONu9yGCS
 aT5qaw//fjbtlLntj6zurFCquQFd7MkjsY+9fxWGvrknmDQrQLVD6u5q4Ii6JUkh
 hbcnCnPH70viQsjlxnwVP7YCuHhuiuja2TCFihKyVEEJXMgwtnjjN6pgay+DCikz
 k8921xsAlpU0N5em9NExu6abQMvsFg1u3h6kLA0Gob120VM4FiK4I8WMyVDZT9ya
 gjdnAzCGfvhdBa7jUokWjOnFPg7s1Y8S4f3OR7/6NjDGupiBYq4vc19cRfofBpnI
 IMZfP9QBj+tUsj3TKBMyQyq2f6qBVaD0XvcpeEdwFxwNxfWgH1oB9tb6kugTgZ6H
 3+fX/XoSJZYKJJpTsKr16FkpLElXeAXjbVKxrNg9qLYTSnJPNkfrGvTOqXjArWC8
 92F5Q/ZlGfZhiuRXTfVoLoThUgRcyru6VPo5dBXgMqNYnV6QHEkwqHkizMHqP3nG
 dlMi40OIx02OuEy6576rLRGpF7kbZ1q6T4zxh/cGzFOz5v8v72HkZ5UildJ+DazU
 oO+tZDCP7yI42jLMafdcn1z/IK20yBiALGIQE1vMQSFxil8wn542T8eS8mAodD9V
 SIPet9oBtWIT0vf0T4JQ2W8SkFZNJwQZc7TbyiUBJrDVUSW/pGhNqAu0lLN4eIxb
 0kA043zy3+apQX6k1qbuXGApbJENk3N2/25NR/n1PQvFCIIqzfU=
 =r3vv
 -----END PGP SIGNATURE-----

Merge 4.4.163 into android-4.4

Changes in 4.4.163
	xfrm: Validate address prefix lengths in the xfrm selector.
	xfrm6: call kfree_skb when skb is toobig
	mac80211: Always report TX status
	cfg80211: reg: Init wiphy_idx in regulatory_hint_core()
	ARM: 8799/1: mm: fix pci_ioremap_io() offset check
	xfrm: validate template mode
	mac80211_hwsim: do not omit multicast announce of first added radio
	Bluetooth: SMP: fix crash in unpairing
	pxa168fb: prepare the clock
	asix: Check for supported Wake-on-LAN modes
	ax88179_178a: Check for supported Wake-on-LAN modes
	lan78xx: Check for supported Wake-on-LAN modes
	sr9800: Check for supported Wake-on-LAN modes
	r8152: Check for supported Wake-on-LAN Modes
	smsc75xx: Check for Wake-on-LAN modes
	smsc95xx: Check for Wake-on-LAN modes
	perf/ring_buffer: Prevent concurent ring buffer access
	net: cxgb3_main: fix a missing-check bug
	KEYS: put keyring if install_session_keyring_to_cred() fails
	ipv6: suppress sparse warnings in IP6_ECN_set_ce()
	net: drop write-only stack variable
	ser_gigaset: use container_of() instead of detour
	tracing: Skip more functions when doing stack tracing of events
	ARM: dts: apq8064: add ahci ports-implemented mask
	x86/mm/pat: Prevent hang during boot when mapping pages
	radix-tree: fix radix_tree_iter_retry() for tagged iterators.
	af_iucv: Move sockaddr length checks to before accessing sa_family in bind and connect handlers
	net/mlx4_en: Resolve dividing by zero in 32-bit system
	ipv6: orphan skbs in reassembly unit
	um: Avoid longjmp/setjmp symbol clashes with libpthread.a
	sched/cgroup: Fix cgroup entity load tracking tear-down
	btrfs: don't create or leak aliased root while cleaning up orphans
	thermal: allow spear-thermal driver to be a module
	thermal: allow u8500-thermal driver to be a module
	tpm: fix: return rc when devm_add_action() fails
	x86/PCI: Mark Broadwell-EP Home Agent 1 as having non-compliant BARs
	aacraid: Start adapter after updating number of MSIX vectors
	perf/core: Don't leak event in the syscall error path
	usbvision: revert commit 588afcc1
	MIPS: Fix FCSR Cause bit handling for correct SIGFPE issue
	ASoC: ak4613: Enable cache usage to fix crashes on resume
	ASoC: wm8940: Enable cache usage to fix crashes on resume
	CIFS: handle guest access errors to Windows shares
	arm64: Fix potential race with hardware DBM in ptep_set_access_flags()
	xfrm: Clear sk_dst_cache when applying per-socket policy.
	scsi: Add STARGET_CREATED_REMOVE state to scsi_target_state
	sparc/pci: Refactor dev_archdata initialization into pci_init_dev_archdata
	sch_red: update backlog as well
	usb-storage: fix bogus hardware error messages for ATA pass-thru devices
	bpf: generally move prog destruction to RCU deferral
	drm/nouveau/fbcon: fix oops without fbdev emulation
	fuse: Dont call set_page_dirty_lock() for ITER_BVEC pages for async_dio
	ixgbevf: Fix handling of NAPI budget when multiple queues are enabled per vector
	net/mlx5e: Fix LRO modify
	net/mlx5e: Correctly handle RSS indirection table when changing number of channels
	ixgbe: fix RSS limit for X550
	ixgbe: Correct X550EM_x revision check
	ALSA: timer: Fix zero-division by continue of uninitialized instance
	vti6: flush x-netns xfrm cache when vti interface is removed
	gro: Allow tunnel stacking in the case of FOU/GUE
	brcmfmac: Fix glom_skb leak in brcmf_sdiod_recv_chain
	l2tp: hold socket before dropping lock in l2tp_ip{, 6}_recv()
	tty: serial: sprd: fix error return code in sprd_probe()
	video: fbdev: pxa3xx_gcu: fix error return code in pxa3xx_gcu_probe()
	sparc64 mm: Fix more TSB sizing issues
	gpu: host1x: fix error return code in host1x_probe()
	sparc64: Fix exception handling in UltraSPARC-III memcpy.
	gpio: msic: fix error return code in platform_msic_gpio_probe()
	usb: imx21-hcd: fix error return code in imx21_probe()
	usb: ehci-omap: fix error return code in ehci_hcd_omap_probe()
	usb: dwc3: omap: fix error return code in dwc3_omap_probe()
	spi/bcm63xx-hspi: fix error return code in bcm63xx_hsspi_probe()
	MIPS: Handle non word sized instructions when examining frame
	spi/bcm63xx: fix error return code in bcm63xx_spi_probe()
	spi: xlp: fix error return code in xlp_spi_probe()
	ASoC: spear: fix error return code in spdif_in_probe()
	PM / devfreq: tegra: fix error return code in tegra_devfreq_probe()
	bonding: avoid defaulting hard_header_len to ETH_HLEN on slave removal
	scsi: aacraid: Fix typo in blink status
	MIPS: microMIPS: Fix decoding of swsp16 instruction
	igb: Remove superfluous reset to PHY and page 0 selection
	MIPS: DEC: Fix an int-handler.S CPU_DADDI_WORKAROUNDS regression
	ARM: dts: imx53-qsb: disable 1.2GHz OPP
	fs/fat/fatent.c: add cond_resched() to fat_count_free_clusters()
	mtd: spi-nor: Add support for is25wp series chips
	perf tools: Disable parallelism for 'make clean'
	bridge: do not add port to router list when receives query with source 0.0.0.0
	net: bridge: remove ipv6 zero address check in mcast queries
	ipv6: mcast: fix a use-after-free in inet6_mc_check
	ipv6/ndisc: Preserve IPv6 control buffer if protocol error handlers are called
	net/ipv6: Fix index counter for unicast addresses in in6_dump_addrs
	net: sched: gred: pass the right attribute to gred_change_table_def()
	net: socket: fix a missing-check bug
	net: stmmac: Fix stmmac_mdio_reset() when building stmmac as modules
	r8169: fix NAPI handling under high load
	sctp: fix race on sctp_id2asoc
	net: drop skb on failure in ip_check_defrag()
	vhost: Fix Spectre V1 vulnerability
	rtnetlink: Disallow FDB configuration for non-Ethernet device
	mremap: properly flush TLB before releasing the page
	crypto: shash - Fix a sleep-in-atomic bug in shash_setkey_unaligned
	ahci: don't ignore result code of ahci_reset_controller()
	cachefiles: fix the race between cachefiles_bury_object() and rmdir(2)
	ptp: fix Spectre v1 vulnerability
	RDMA/ucma: Fix Spectre v1 vulnerability
	IB/ucm: Fix Spectre v1 vulnerability
	cdc-acm: correct counting of UART states in serial state notification
	usb: gadget: storage: Fix Spectre v1 vulnerability
	USB: fix the usbfs flag sanitization for control transfers
	Input: elan_i2c - add ACPI ID for Lenovo IdeaPad 330-15IGM
	sched/fair: Fix throttle_list starvation with low CFS quota
	x86/percpu: Fix this_cpu_read()
	cpuidle: Do not access cpuidle_devices when !CONFIG_CPU_IDLE
	l2tp: hold tunnel socket when handling control frames in l2tp_ip and l2tp_ip6
	x86/time: Correct the attribute on jiffies' definition
	Linux 4.4.163

Change-Id: Idb0efd175853886145a1fb7eaaf18797c39e5f6f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2018-11-10 07:54:26 -08:00

717 lines
18 KiB
C

/*
* Synchronous Cryptographic Hash operations.
*
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h"
static const struct crypto_type crypto_shash_type;
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(shash_no_setkey);
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
unsigned long absize;
u8 *buffer, *alignbuffer;
int err;
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
err = shash->setkey(tfm, alignbuffer, keylen);
kzfree(buffer);
return err;
}
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
if ((unsigned long)key & alignmask)
return shash_setkey_unaligned(tfm, key, keylen);
return shash->setkey(tfm, key, keylen);
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
static inline unsigned int shash_align_buffer_size(unsigned len,
unsigned long mask)
{
typedef u8 __attribute__ ((aligned)) u8_aligned;
return len + (mask & ~(__alignof__(u8_aligned) - 1));
}
static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
unsigned int unaligned_len = alignmask + 1 -
((unsigned long)data & alignmask);
u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)]
__attribute__ ((aligned));
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
if (unaligned_len > len)
unaligned_len = len;
memcpy(buf, data, unaligned_len);
err = shash->update(desc, buf, unaligned_len);
memset(buf, 0, unaligned_len);
return err ?:
shash->update(desc, data + unaligned_len, len - unaligned_len);
}
int crypto_shash_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
if ((unsigned long)data & alignmask)
return shash_update_unaligned(desc, data, len);
return shash->update(desc, data, len);
}
EXPORT_SYMBOL_GPL(crypto_shash_update);
static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
unsigned long alignmask = crypto_shash_alignmask(tfm);
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned int ds = crypto_shash_digestsize(tfm);
u8 ubuf[shash_align_buffer_size(ds, alignmask)]
__attribute__ ((aligned));
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
err = shash->final(desc, buf);
if (err)
goto out;
memcpy(out, buf, ds);
out:
memset(buf, 0, ds);
return err;
}
int crypto_shash_final(struct shash_desc *desc, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
if ((unsigned long)out & alignmask)
return shash_final_unaligned(desc, out);
return shash->final(desc, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_final);
static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return crypto_shash_update(desc, data, len) ?:
crypto_shash_final(desc, out);
}
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_finup_unaligned(desc, data, len, out);
return shash->finup(desc, data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return crypto_shash_init(desc) ?:
crypto_shash_finup(desc, data, len, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_digest_unaligned(desc, data, len, out);
return shash->digest(desc, data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
static int shash_default_export(struct shash_desc *desc, void *out)
{
memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
return 0;
}
static int shash_default_import(struct shash_desc *desc, const void *in)
{
memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm));
return 0;
}
static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct crypto_shash **ctx = crypto_ahash_ctx(tfm);
return crypto_shash_setkey(*ctx, key, keylen);
}
static int shash_async_init(struct ahash_request *req)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
desc->flags = req->base.flags;
return crypto_shash_init(desc);
}
int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
{
struct crypto_hash_walk walk;
int nbytes;
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
nbytes = crypto_hash_walk_done(&walk, nbytes))
nbytes = crypto_shash_update(desc, walk.data, nbytes);
return nbytes;
}
EXPORT_SYMBOL_GPL(shash_ahash_update);
static int shash_async_update(struct ahash_request *req)
{
return shash_ahash_update(req, ahash_request_ctx(req));
}
static int shash_async_final(struct ahash_request *req)
{
return crypto_shash_final(ahash_request_ctx(req), req->result);
}
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
{
struct crypto_hash_walk walk;
int nbytes;
nbytes = crypto_hash_walk_first(req, &walk);
if (!nbytes)
return crypto_shash_final(desc, req->result);
do {
nbytes = crypto_hash_walk_last(&walk) ?
crypto_shash_finup(desc, walk.data, nbytes,
req->result) :
crypto_shash_update(desc, walk.data, nbytes);
nbytes = crypto_hash_walk_done(&walk, nbytes);
} while (nbytes > 0);
return nbytes;
}
EXPORT_SYMBOL_GPL(shash_ahash_finup);
static int shash_async_finup(struct ahash_request *req)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
desc->flags = req->base.flags;
return shash_ahash_finup(req, desc);
}
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
unsigned int nbytes = req->nbytes;
struct scatterlist *sg;
unsigned int offset;
int err;
if (nbytes &&
(sg = req->src, offset = sg->offset,
nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
data = kmap_atomic(sg_page(sg));
err = crypto_shash_digest(desc, data + offset, nbytes,
req->result);
kunmap_atomic(data);
crypto_yield(desc->flags);
} else
err = crypto_shash_init(desc) ?:
shash_ahash_finup(req, desc);
return err;
}
EXPORT_SYMBOL_GPL(shash_ahash_digest);
static int shash_async_digest(struct ahash_request *req)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
desc->flags = req->base.flags;
return shash_ahash_digest(req, desc);
}
static int shash_async_export(struct ahash_request *req, void *out)
{
return crypto_shash_export(ahash_request_ctx(req), out);
}
static int shash_async_import(struct ahash_request *req, const void *in)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
{
struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
crypto_free_shash(*ctx);
}
int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
{
struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg);
struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
if (!crypto_mod_get(calg))
return -EAGAIN;
shash = crypto_create_tfm(calg, &crypto_shash_type);
if (IS_ERR(shash)) {
crypto_mod_put(calg);
return PTR_ERR(shash);
}
*ctx = shash;
tfm->exit = crypto_exit_shash_ops_async;
crt->init = shash_async_init;
crt->update = shash_async_update;
crt->final = shash_async_final;
crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
crt->setkey = shash_async_setkey;
crt->has_setkey = alg->setkey != shash_no_setkey;
if (alg->export)
crt->export = shash_async_export;
if (alg->import)
crt->import = shash_async_import;
crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
return 0;
}
static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key,
unsigned int keylen)
{
struct shash_desc **descp = crypto_hash_ctx(tfm);
struct shash_desc *desc = *descp;
return crypto_shash_setkey(desc->tfm, key, keylen);
}
static int shash_compat_init(struct hash_desc *hdesc)
{
struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
struct shash_desc *desc = *descp;
desc->flags = hdesc->flags;
return crypto_shash_init(desc);
}
static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
unsigned int len)
{
struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
struct shash_desc *desc = *descp;
struct crypto_hash_walk walk;
int nbytes;
for (nbytes = crypto_hash_walk_first_compat(hdesc, &walk, sg, len);
nbytes > 0; nbytes = crypto_hash_walk_done(&walk, nbytes))
nbytes = crypto_shash_update(desc, walk.data, nbytes);
return nbytes;
}
static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
{
struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
return crypto_shash_final(*descp, out);
}
static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
unsigned int nbytes, u8 *out)
{
unsigned int offset = sg->offset;
int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
struct shash_desc *desc = *descp;
void *data;
desc->flags = hdesc->flags;
data = kmap_atomic(sg_page(sg));
err = crypto_shash_digest(desc, data + offset, nbytes, out);
kunmap_atomic(data);
crypto_yield(desc->flags);
goto out;
}
err = shash_compat_init(hdesc);
if (err)
goto out;
err = shash_compat_update(hdesc, sg, nbytes);
if (err)
goto out;
err = shash_compat_final(hdesc, out);
out:
return err;
}
static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm)
{
struct shash_desc **descp = crypto_tfm_ctx(tfm);
struct shash_desc *desc = *descp;
crypto_free_shash(desc->tfm);
kzfree(desc);
}
static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
{
struct hash_tfm *crt = &tfm->crt_hash;
struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg);
struct shash_desc **descp = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
struct shash_desc *desc;
if (!crypto_mod_get(calg))
return -EAGAIN;
shash = crypto_create_tfm(calg, &crypto_shash_type);
if (IS_ERR(shash)) {
crypto_mod_put(calg);
return PTR_ERR(shash);
}
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash),
GFP_KERNEL);
if (!desc) {
crypto_free_shash(shash);
return -ENOMEM;
}
*descp = desc;
desc->tfm = shash;
tfm->exit = crypto_exit_shash_ops_compat;
crt->init = shash_compat_init;
crt->update = shash_compat_update;
crt->final = shash_compat_final;
crt->digest = shash_compat_digest;
crt->setkey = shash_compat_setkey;
crt->digestsize = alg->digestsize;
return 0;
}
static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
switch (mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_HASH_MASK:
return crypto_init_shash_ops_compat(tfm);
}
return -EINVAL;
}
static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
switch (mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_HASH_MASK:
return sizeof(struct shash_desc *);
}
return 0;
}
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
hash->descsize = crypto_shash_alg(hash)->descsize;
return 0;
}
#ifdef CONFIG_NET
static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
strncpy(rhash.type, "shash", sizeof(rhash.type));
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = salg->digestsize;
if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
sizeof(struct crypto_report_hash), &rhash))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
#else
static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
{
struct shash_alg *salg = __crypto_shash_alg(alg);
seq_printf(m, "type : shash\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", salg->digestsize);
}
static const struct crypto_type crypto_shash_type = {
.ctxsize = crypto_shash_ctxsize,
.extsize = crypto_alg_extsize,
.init = crypto_init_shash_ops,
.init_tfm = crypto_shash_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_shash_show,
#endif
.report = crypto_shash_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
.tfmsize = offsetof(struct crypto_shash, base),
};
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_shash);
static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->base;
if (alg->digestsize > PAGE_SIZE / 8 ||
alg->descsize > PAGE_SIZE / 8 ||
alg->statesize > PAGE_SIZE / 8)
return -EINVAL;
base->cra_type = &crypto_shash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
if (!alg->finup)
alg->finup = shash_finup_unaligned;
if (!alg->digest)
alg->digest = shash_digest_unaligned;
if (!alg->export) {
alg->export = shash_default_export;
alg->import = shash_default_import;
alg->statesize = alg->descsize;
}
if (!alg->setkey)
alg->setkey = shash_no_setkey;
return 0;
}
int crypto_register_shash(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->base;
int err;
err = shash_prepare_alg(alg);
if (err)
return err;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_shash);
int crypto_unregister_shash(struct shash_alg *alg)
{
return crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_shash);
int crypto_register_shashes(struct shash_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_shash(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_shash(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_shashes);
int crypto_unregister_shashes(struct shash_alg *algs, int count)
{
int i, ret;
for (i = count - 1; i >= 0; --i) {
ret = crypto_unregister_shash(&algs[i]);
if (ret)
pr_err("Failed to unregister %s %s: %d\n",
algs[i].base.cra_driver_name,
algs[i].base.cra_name, ret);
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_unregister_shashes);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst)
{
int err;
err = shash_prepare_alg(&inst->alg);
if (err)
return err;
return crypto_register_instance(tmpl, shash_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(shash_register_instance);
void shash_free_instance(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(shash_instance(inst));
}
EXPORT_SYMBOL_GPL(shash_free_instance);
int crypto_grab_shash(struct crypto_shash_spawn *spawn,
const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_shash_type;
return crypto_grab_spawn(&spawn->base, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_shash);
int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
struct shash_alg *alg,
struct crypto_instance *inst)
{
return crypto_init_spawn2(&spawn->base, &alg->base, inst,
&crypto_shash_type);
}
EXPORT_SYMBOL_GPL(crypto_init_shash_spawn);
struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
{
struct crypto_alg *alg;
alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask);
return IS_ERR(alg) ? ERR_CAST(alg) :
container_of(alg, struct shash_alg, base);
}
EXPORT_SYMBOL_GPL(shash_attr_alg);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synchronous cryptographic hash type");