Home | History | Annotate | Download | only in fsl
      1 // SPDX-License-Identifier: GPL-2.0+
      2 /*
      3  * Copyright 2014 Freescale Semiconductor, Inc.
      4  *
      5  */
      6 
      7 #include <common.h>
      8 #include <malloc.h>
      9 #include <memalign.h>
     10 #include "jobdesc.h"
     11 #include "desc.h"
     12 #include "jr.h"
     13 #include "fsl_hash.h"
     14 #include <hw_sha.h>
     15 #include <linux/errno.h>
     16 
     17 #define CRYPTO_MAX_ALG_NAME	80
     18 #define SHA1_DIGEST_SIZE        20
     19 #define SHA256_DIGEST_SIZE      32
     20 
     21 struct caam_hash_template {
     22 	char name[CRYPTO_MAX_ALG_NAME];
     23 	unsigned int digestsize;
     24 	u32 alg_type;
     25 };
     26 
     27 enum caam_hash_algos {
     28 	SHA1 = 0,
     29 	SHA256
     30 };
     31 
     32 static struct caam_hash_template driver_hash[] = {
     33 	{
     34 		.name = "sha1",
     35 		.digestsize = SHA1_DIGEST_SIZE,
     36 		.alg_type = OP_ALG_ALGSEL_SHA1,
     37 	},
     38 	{
     39 		.name = "sha256",
     40 		.digestsize = SHA256_DIGEST_SIZE,
     41 		.alg_type = OP_ALG_ALGSEL_SHA256,
     42 	},
     43 };
     44 
     45 static enum caam_hash_algos get_hash_type(struct hash_algo *algo)
     46 {
     47 	if (!strcmp(algo->name, driver_hash[SHA1].name))
     48 		return SHA1;
     49 	else
     50 		return SHA256;
     51 }
     52 
     53 /* Create the context for progressive hashing using h/w acceleration.
     54  *
     55  * @ctxp: Pointer to the pointer of the context for hashing
     56  * @caam_algo: Enum for SHA1 or SHA256
     57  * @return 0 if ok, -ENOMEM on error
     58  */
     59 static int caam_hash_init(void **ctxp, enum caam_hash_algos caam_algo)
     60 {
     61 	*ctxp = calloc(1, sizeof(struct sha_ctx));
     62 	if (*ctxp == NULL) {
     63 		debug("Cannot allocate memory for context\n");
     64 		return -ENOMEM;
     65 	}
     66 	return 0;
     67 }
     68 
     69 /*
     70  * Update sg table for progressive hashing using h/w acceleration
     71  *
     72  * The context is freed by this function if an error occurs.
     73  * We support at most 32 Scatter/Gather Entries.
     74  *
     75  * @hash_ctx: Pointer to the context for hashing
     76  * @buf: Pointer to the buffer being hashed
     77  * @size: Size of the buffer being hashed
     78  * @is_last: 1 if this is the last update; 0 otherwise
     79  * @caam_algo: Enum for SHA1 or SHA256
     80  * @return 0 if ok, -EINVAL on error
     81  */
     82 static int caam_hash_update(void *hash_ctx, const void *buf,
     83 			    unsigned int size, int is_last,
     84 			    enum caam_hash_algos caam_algo)
     85 {
     86 	uint32_t final = 0;
     87 	phys_addr_t addr = virt_to_phys((void *)buf);
     88 	struct sha_ctx *ctx = hash_ctx;
     89 
     90 	if (ctx->sg_num >= MAX_SG_32) {
     91 		free(ctx);
     92 		return -EINVAL;
     93 	}
     94 
     95 #ifdef CONFIG_PHYS_64BIT
     96 	sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, (uint32_t)(addr >> 32));
     97 #else
     98 	sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, 0x0);
     99 #endif
    100 	sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_lo, (uint32_t)addr);
    101 
    102 	sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag,
    103 		  (size & SG_ENTRY_LENGTH_MASK));
    104 
    105 	ctx->sg_num++;
    106 
    107 	if (is_last) {
    108 		final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) |
    109 			SG_ENTRY_FINAL_BIT;
    110 		sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final);
    111 	}
    112 
    113 	return 0;
    114 }
    115 
    116 /*
    117  * Perform progressive hashing on the given buffer and copy hash at
    118  * destination buffer
    119  *
    120  * The context is freed after completion of hash operation.
    121  *
    122  * @hash_ctx: Pointer to the context for hashing
    123  * @dest_buf: Pointer to the destination buffer where hash is to be copied
    124  * @size: Size of the buffer being hashed
    125  * @caam_algo: Enum for SHA1 or SHA256
    126  * @return 0 if ok, -EINVAL on error
    127  */
    128 static int caam_hash_finish(void *hash_ctx, void *dest_buf,
    129 			    int size, enum caam_hash_algos caam_algo)
    130 {
    131 	uint32_t len = 0;
    132 	struct sha_ctx *ctx = hash_ctx;
    133 	int i = 0, ret = 0;
    134 
    135 	if (size < driver_hash[caam_algo].digestsize) {
    136 		free(ctx);
    137 		return -EINVAL;
    138 	}
    139 
    140 	for (i = 0; i < ctx->sg_num; i++)
    141 		len += (sec_in32(&ctx->sg_tbl[i].len_flag) &
    142 			SG_ENTRY_LENGTH_MASK);
    143 
    144 	inline_cnstr_jobdesc_hash(ctx->sha_desc, (uint8_t *)ctx->sg_tbl, len,
    145 				  ctx->hash,
    146 				  driver_hash[caam_algo].alg_type,
    147 				  driver_hash[caam_algo].digestsize,
    148 				  1);
    149 
    150 	ret = run_descriptor_jr(ctx->sha_desc);
    151 
    152 	if (ret)
    153 		debug("Error %x\n", ret);
    154 	else
    155 		memcpy(dest_buf, ctx->hash, sizeof(ctx->hash));
    156 
    157 	free(ctx);
    158 	return ret;
    159 }
    160 
    161 int caam_hash(const unsigned char *pbuf, unsigned int buf_len,
    162 	      unsigned char *pout, enum caam_hash_algos algo)
    163 {
    164 	int ret = 0;
    165 	uint32_t *desc;
    166 	unsigned int size;
    167 
    168 	desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
    169 	if (!desc) {
    170 		debug("Not enough memory for descriptor allocation\n");
    171 		return -ENOMEM;
    172 	}
    173 
    174 	if (!IS_ALIGNED((uintptr_t)pbuf, ARCH_DMA_MINALIGN) ||
    175 	    !IS_ALIGNED((uintptr_t)pout, ARCH_DMA_MINALIGN)) {
    176 		puts("Error: Address arguments are not aligned\n");
    177 		return -EINVAL;
    178 	}
    179 
    180 	size = ALIGN(buf_len, ARCH_DMA_MINALIGN);
    181 	flush_dcache_range((unsigned long)pbuf, (unsigned long)pbuf + size);
    182 
    183 	inline_cnstr_jobdesc_hash(desc, pbuf, buf_len, pout,
    184 				  driver_hash[algo].alg_type,
    185 				  driver_hash[algo].digestsize,
    186 				  0);
    187 
    188 	size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
    189 	flush_dcache_range((unsigned long)desc, (unsigned long)desc + size);
    190 
    191 	ret = run_descriptor_jr(desc);
    192 
    193 	size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN);
    194 	invalidate_dcache_range((unsigned long)pout,
    195 				(unsigned long)pout + size);
    196 
    197 	free(desc);
    198 	return ret;
    199 }
    200 
    201 void hw_sha256(const unsigned char *pbuf, unsigned int buf_len,
    202 			unsigned char *pout, unsigned int chunk_size)
    203 {
    204 	if (caam_hash(pbuf, buf_len, pout, SHA256))
    205 		printf("CAAM was not setup properly or it is faulty\n");
    206 }
    207 
    208 void hw_sha1(const unsigned char *pbuf, unsigned int buf_len,
    209 			unsigned char *pout, unsigned int chunk_size)
    210 {
    211 	if (caam_hash(pbuf, buf_len, pout, SHA1))
    212 		printf("CAAM was not setup properly or it is faulty\n");
    213 }
    214 
    215 int hw_sha_init(struct hash_algo *algo, void **ctxp)
    216 {
    217 	return caam_hash_init(ctxp, get_hash_type(algo));
    218 }
    219 
    220 int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
    221 			    unsigned int size, int is_last)
    222 {
    223 	return caam_hash_update(ctx, buf, size, is_last, get_hash_type(algo));
    224 }
    225 
    226 int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
    227 		     int size)
    228 {
    229 	return caam_hash_finish(ctx, dest_buf, size, get_hash_type(algo));
    230 }
    231