1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Common header for Virtio crypto device. 3 * 4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD. 5 */ 6 7 #ifndef _VIRTIO_CRYPTO_COMMON_H 8 #define _VIRTIO_CRYPTO_COMMON_H 9 10 #include <linux/virtio.h> 11 #include <linux/crypto.h> 12 #include <linux/spinlock.h> 13 #include <linux/interrupt.h> 14 #include <crypto/aead.h> 15 #include <crypto/aes.h> 16 #include <crypto/engine.h> 17 #include <uapi/linux/virtio_crypto.h> 18 19 20 /* Internal representation of a data virtqueue */ 21 struct data_queue { 22 /* Virtqueue associated with this send _queue */ 23 struct virtqueue *vq; 24 25 /* To protect the vq operations for the dataq */ 26 spinlock_t lock; 27 28 /* Name of the tx queue: dataq.$index */ 29 char name[32]; 30 31 struct crypto_engine *engine; 32 struct tasklet_struct done_task; 33 }; 34 35 struct virtio_crypto { 36 struct virtio_device *vdev; 37 struct virtqueue *ctrl_vq; 38 struct data_queue *data_vq; 39 40 /* Work struct for config space updates */ 41 struct work_struct config_work; 42 43 /* To protect the vq operations for the controlq */ 44 spinlock_t ctrl_lock; 45 46 /* Maximum of data queues supported by the device */ 47 u32 max_data_queues; 48 49 /* Number of queue currently used by the driver */ 50 u32 curr_queue; 51 52 /* 53 * Specifies the services mask which the device support, 54 * see VIRTIO_CRYPTO_SERVICE_* 55 */ 56 u32 crypto_services; 57 58 /* Detailed algorithms mask */ 59 u32 cipher_algo_l; 60 u32 cipher_algo_h; 61 u32 hash_algo; 62 u32 mac_algo_l; 63 u32 mac_algo_h; 64 u32 aead_algo; 65 u32 akcipher_algo; 66 67 /* Maximum length of cipher key */ 68 u32 max_cipher_key_len; 69 /* Maximum length of authenticated key */ 70 u32 max_auth_key_len; 71 /* Maximum size of per request */ 72 u64 max_size; 73 74 unsigned long status; 75 atomic_t ref_count; 76 struct list_head list; 77 struct module *owner; 78 uint8_t dev_id; 79 80 /* Does the affinity hint is set for virtqueues? */ 81 bool affinity_hint_set; 82 }; 83 84 struct virtio_crypto_sym_session_info { 85 /* Backend session id, which come from the host side */ 86 __u64 session_id; 87 }; 88 89 /* 90 * Note: there are padding fields in request, clear them to zero before 91 * sending to host to avoid to divulge any information. 92 * Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48] 93 */ 94 struct virtio_crypto_ctrl_request { 95 struct virtio_crypto_op_ctrl_req ctrl; 96 struct virtio_crypto_session_input input; 97 struct virtio_crypto_inhdr ctrl_status; 98 struct completion compl; 99 }; 100 101 struct virtio_crypto_request; 102 typedef void (*virtio_crypto_data_callback) 103 (struct virtio_crypto_request *vc_req, int len); 104 105 struct virtio_crypto_request { 106 uint8_t status; 107 struct virtio_crypto_op_data_req *req_data; 108 struct scatterlist **sgs; 109 struct data_queue *dataq; 110 virtio_crypto_data_callback alg_cb; 111 }; 112 113 int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev); 114 struct list_head *virtcrypto_devmgr_get_head(void); 115 void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev); 116 struct virtio_crypto *virtcrypto_devmgr_get_first(void); 117 int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev); 118 int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev); 119 void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev); 120 int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev); 121 bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev, 122 uint32_t service, 123 uint32_t algo); 124 struct virtio_crypto *virtcrypto_get_dev_node(int node, 125 uint32_t service, 126 uint32_t algo); 127 int virtcrypto_dev_start(struct virtio_crypto *vcrypto); 128 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto); 129 int virtio_crypto_skcipher_crypt_req( 130 struct crypto_engine *engine, void *vreq); 131 132 void 133 virtcrypto_clear_request(struct virtio_crypto_request *vc_req); 134 135 static inline int virtio_crypto_get_current_node(void) 136 { 137 int cpu, node; 138 139 cpu = get_cpu(); 140 node = topology_physical_package_id(cpu); 141 put_cpu(); 142 143 return node; 144 } 145 146 int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto); 147 void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto); 148 int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto); 149 void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto); 150 int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[], 151 unsigned int out_sgs, unsigned int in_sgs, 152 struct virtio_crypto_ctrl_request *vc_ctrl_req); 153 154 #endif /* _VIRTIO_CRYPTO_COMMON_H */ 155