diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-18 12:11:14 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-18 12:11:14 -0700 | 
| commit | 8b53c76533aa4356602aea98f98a2f3b4051464c (patch) | |
| tree | ab10ba58e21501407f8108a6bb9003daa2176962 /crypto/pcrypt.c | |
| parent | 6cfae0c26b21dce323fe8799b66cf4bc996e3565 (diff) | |
| parent | 9575d1a5c0780ea26ff8dd29c94a32be32ce3c85 (diff) | |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
 "API:
   - Add the ability to abort a skcipher walk.
  Algorithms:
   - Fix XTS to actually do the stealing.
   - Add library helpers for AES and DES for single-block users.
   - Add library helpers for SHA256.
   - Add new DES key verification helper.
   - Add surrounding bits for ESSIV generator.
   - Add accelerations for aegis128.
   - Add test vectors for lzo-rle.
  Drivers:
   - Add i.MX8MQ support to caam.
   - Add gcm/ccm/cfb/ofb aes support in inside-secure.
   - Add ofb/cfb aes support in media-tek.
   - Add HiSilicon ZIP accelerator support.
  Others:
   - Fix potential race condition in padata.
   - Use unbound workqueues in padata"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (311 commits)
  crypto: caam - Cast to long first before pointer conversion
  crypto: ccree - enable CTS support in AES-XTS
  crypto: inside-secure - Probe transform record cache RAM sizes
  crypto: inside-secure - Base RD fetchcount on actual RD FIFO size
  crypto: inside-secure - Base CD fetchcount on actual CD FIFO size
  crypto: inside-secure - Enable extended algorithms on newer HW
  crypto: inside-secure: Corrected configuration of EIP96_TOKEN_CTRL
  crypto: inside-secure - Add EIP97/EIP197 and endianness detection
  padata: remove cpu_index from the parallel_queue
  padata: unbind parallel jobs from specific CPUs
  padata: use separate workqueues for parallel and serial work
  padata, pcrypt: take CPU hotplug lock internally in padata_alloc_possible
  crypto: pcrypt - remove padata cpumask notifier
  padata: make padata_do_parallel find alternate callback CPU
  workqueue: require CPU hotplug read exclusion for apply_workqueue_attrs
  workqueue: unconfine alloc/apply/free_workqueue_attrs()
  padata: allocate workqueue internally
  arm64: dts: imx8mq: Add CAAM node
  random: Use wait_event_freezable() in add_hwgenerator_randomness()
  crypto: ux500 - Fix COMPILE_TEST warnings
  ...
Diffstat (limited to 'crypto/pcrypt.c')
| -rw-r--r-- | crypto/pcrypt.c | 167 | 
1 files changed, 18 insertions, 149 deletions
| diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 0edf5b54fc77..543792e0ebf0 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -18,34 +18,8 @@  #include <linux/cpu.h>  #include <crypto/pcrypt.h> -struct padata_pcrypt { -	struct padata_instance *pinst; -	struct workqueue_struct *wq; - -	/* -	 * Cpumask for callback CPUs. It should be -	 * equal to serial cpumask of corresponding padata instance, -	 * so it is updated when padata notifies us about serial -	 * cpumask change. -	 * -	 * cb_cpumask is protected by RCU. This fact prevents us from -	 * using cpumask_var_t directly because the actual type of -	 * cpumsak_var_t depends on kernel configuration(particularly on -	 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration -	 * cpumask_var_t may be either a pointer to the struct cpumask -	 * or a variable allocated on the stack. Thus we can not safely use -	 * cpumask_var_t with RCU operations such as rcu_assign_pointer or -	 * rcu_dereference. So cpumask_var_t is wrapped with struct -	 * pcrypt_cpumask which makes possible to use it with RCU. -	 */ -	struct pcrypt_cpumask { -		cpumask_var_t mask; -	} *cb_cpumask; -	struct notifier_block nblock; -}; - -static struct padata_pcrypt pencrypt; -static struct padata_pcrypt pdecrypt; +static struct padata_instance *pencrypt; +static struct padata_instance *pdecrypt;  static struct kset           *pcrypt_kset;  struct pcrypt_instance_ctx { @@ -58,35 +32,6 @@ struct pcrypt_aead_ctx {  	unsigned int cb_cpu;  }; -static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, -			      struct padata_pcrypt *pcrypt) -{ -	unsigned int cpu_index, cpu, i; -	struct pcrypt_cpumask *cpumask; - -	cpu = *cb_cpu; - -	rcu_read_lock_bh(); -	cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); -	if (cpumask_test_cpu(cpu, cpumask->mask)) -			goto out; - -	if (!cpumask_weight(cpumask->mask)) -			goto out; - -	cpu_index = cpu % cpumask_weight(cpumask->mask); - -	cpu = cpumask_first(cpumask->mask); -	for (i = 0; i < cpu_index; i++) -		cpu = cpumask_next(cpu, cpumask->mask); - -	*cb_cpu = cpu; - -out: -	rcu_read_unlock_bh(); -	return padata_do_parallel(pcrypt->pinst, padata, cpu); -} -  static int pcrypt_aead_setkey(struct crypto_aead *parent,  			      const u8 *key, unsigned int keylen)  { @@ -158,7 +103,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req)  			       req->cryptlen, req->iv);  	aead_request_set_ad(creq, req->assoclen); -	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); +	err = padata_do_parallel(pencrypt, padata, &ctx->cb_cpu);  	if (!err)  		return -EINPROGRESS; @@ -200,7 +145,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req)  			       req->cryptlen, req->iv);  	aead_request_set_ad(creq, req->assoclen); -	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); +	err = padata_do_parallel(pdecrypt, padata, &ctx->cb_cpu);  	if (!err)  		return -EINPROGRESS; @@ -347,36 +292,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)  	return -EINVAL;  } -static int pcrypt_cpumask_change_notify(struct notifier_block *self, -					unsigned long val, void *data) -{ -	struct padata_pcrypt *pcrypt; -	struct pcrypt_cpumask *new_mask, *old_mask; -	struct padata_cpumask *cpumask = (struct padata_cpumask *)data; - -	if (!(val & PADATA_CPU_SERIAL)) -		return 0; - -	pcrypt = container_of(self, struct padata_pcrypt, nblock); -	new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); -	if (!new_mask) -		return -ENOMEM; -	if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { -		kfree(new_mask); -		return -ENOMEM; -	} - -	old_mask = pcrypt->cb_cpumask; - -	cpumask_copy(new_mask->mask, cpumask->cbcpu); -	rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); -	synchronize_rcu(); - -	free_cpumask_var(old_mask->mask); -	kfree(old_mask); -	return 0; -} -  static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)  {  	int ret; @@ -389,71 +304,25 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)  	return ret;  } -static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, -			      const char *name) +static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)  {  	int ret = -ENOMEM; -	struct pcrypt_cpumask *mask; -	get_online_cpus(); - -	pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, -				     1, name); -	if (!pcrypt->wq) -		goto err; - -	pcrypt->pinst = padata_alloc_possible(pcrypt->wq); -	if (!pcrypt->pinst) -		goto err_destroy_workqueue; - -	mask = kmalloc(sizeof(*mask), GFP_KERNEL); -	if (!mask) -		goto err_free_padata; -	if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { -		kfree(mask); -		goto err_free_padata; -	} +	*pinst = padata_alloc_possible(name); +	if (!*pinst) +		return ret; -	cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask); -	rcu_assign_pointer(pcrypt->cb_cpumask, mask); - -	pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; -	ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); -	if (ret) -		goto err_free_cpumask; - -	ret = pcrypt_sysfs_add(pcrypt->pinst, name); +	ret = pcrypt_sysfs_add(*pinst, name);  	if (ret) -		goto err_unregister_notifier; - -	put_online_cpus(); - -	return ret; - -err_unregister_notifier: -	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); -err_free_cpumask: -	free_cpumask_var(mask->mask); -	kfree(mask); -err_free_padata: -	padata_free(pcrypt->pinst); -err_destroy_workqueue: -	destroy_workqueue(pcrypt->wq); -err: -	put_online_cpus(); +		padata_free(*pinst);  	return ret;  } -static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) +static void pcrypt_fini_padata(struct padata_instance *pinst)  { -	free_cpumask_var(pcrypt->cb_cpumask->mask); -	kfree(pcrypt->cb_cpumask); - -	padata_stop(pcrypt->pinst); -	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); -	destroy_workqueue(pcrypt->wq); -	padata_free(pcrypt->pinst); +	padata_stop(pinst); +	padata_free(pinst);  }  static struct crypto_template pcrypt_tmpl = { @@ -478,13 +347,13 @@ static int __init pcrypt_init(void)  	if (err)  		goto err_deinit_pencrypt; -	padata_start(pencrypt.pinst); -	padata_start(pdecrypt.pinst); +	padata_start(pencrypt); +	padata_start(pdecrypt);  	return crypto_register_template(&pcrypt_tmpl);  err_deinit_pencrypt: -	pcrypt_fini_padata(&pencrypt); +	pcrypt_fini_padata(pencrypt);  err_unreg_kset:  	kset_unregister(pcrypt_kset);  err: @@ -493,8 +362,8 @@ err:  static void __exit pcrypt_exit(void)  { -	pcrypt_fini_padata(&pencrypt); -	pcrypt_fini_padata(&pdecrypt); +	pcrypt_fini_padata(pencrypt); +	pcrypt_fini_padata(pdecrypt);  	kset_unregister(pcrypt_kset);  	crypto_unregister_template(&pcrypt_tmpl); | 
