ib_verbs.h (99db9494035f5b9fbb1d579f89c6fa1beba6dbb7) | ib_verbs.h (0bbb3b7496eabb6779962a998a8a91f4a8e589ff) |
---|---|
1/* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. --- 1770 unchanged lines hidden (view full) --- 1779 rwlock_t lock; 1780 struct ib_event_handler event_handler; 1781 struct ib_pkey_cache **pkey_cache; 1782 struct ib_gid_table **gid_cache; 1783 u8 *lmc_cache; 1784 enum ib_port_state *port_state_cache; 1785}; 1786 | 1/* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. --- 1770 unchanged lines hidden (view full) --- 1779 rwlock_t lock; 1780 struct ib_event_handler event_handler; 1781 struct ib_pkey_cache **pkey_cache; 1782 struct ib_gid_table **gid_cache; 1783 u8 *lmc_cache; 1784 enum ib_port_state *port_state_cache; 1785}; 1786 |
1787struct ib_dma_mapping_ops { 1788 int (*mapping_error)(struct ib_device *dev, 1789 u64 dma_addr); 1790 u64 (*map_single)(struct ib_device *dev, 1791 void *ptr, size_t size, 1792 enum dma_data_direction direction); 1793 void (*unmap_single)(struct ib_device *dev, 1794 u64 addr, size_t size, 1795 enum dma_data_direction direction); 1796 u64 (*map_page)(struct ib_device *dev, 1797 struct page *page, unsigned long offset, 1798 size_t size, 1799 enum dma_data_direction direction); 1800 void (*unmap_page)(struct ib_device *dev, 1801 u64 addr, size_t size, 1802 enum dma_data_direction direction); 1803 int (*map_sg)(struct ib_device *dev, 1804 struct scatterlist *sg, int nents, 1805 enum dma_data_direction direction); 1806 void (*unmap_sg)(struct ib_device *dev, 1807 struct scatterlist *sg, int nents, 1808 enum dma_data_direction direction); 1809 int (*map_sg_attrs)(struct ib_device *dev, 1810 struct scatterlist *sg, int nents, 1811 enum dma_data_direction direction, 1812 unsigned long attrs); 1813 void (*unmap_sg_attrs)(struct ib_device *dev, 1814 struct scatterlist *sg, int nents, 1815 enum dma_data_direction direction, 1816 unsigned long attrs); 1817 void (*sync_single_for_cpu)(struct ib_device *dev, 1818 u64 dma_handle, 1819 size_t size, 1820 enum dma_data_direction dir); 1821 void (*sync_single_for_device)(struct ib_device *dev, 1822 u64 dma_handle, 1823 size_t size, 1824 enum dma_data_direction dir); 1825 void *(*alloc_coherent)(struct ib_device *dev, 1826 size_t size, 1827 u64 *dma_handle, 1828 gfp_t flag); 1829 void (*free_coherent)(struct ib_device *dev, 1830 size_t size, void *cpu_addr, 1831 u64 dma_handle); 1832}; 1833 | |
1834struct iw_cm_verbs; 1835 1836struct ib_port_immutable { 1837 int pkey_tbl_len; 1838 int gid_tbl_len; 1839 u32 core_cap_flags; 1840 u32 max_mad_size; 1841}; --- 243 unchanged lines hidden (view full) --- 2085 int (*modify_wq)(struct ib_wq *wq, 2086 struct ib_wq_attr *attr, 2087 u32 wq_attr_mask, 2088 struct ib_udata *udata); 2089 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 2090 struct ib_rwq_ind_table_init_attr *init_attr, 2091 struct ib_udata *udata); 2092 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); | 1787struct iw_cm_verbs; 1788 1789struct ib_port_immutable { 1790 int pkey_tbl_len; 1791 int gid_tbl_len; 1792 u32 core_cap_flags; 1793 u32 max_mad_size; 1794}; --- 243 unchanged lines hidden (view full) --- 2038 int (*modify_wq)(struct ib_wq *wq, 2039 struct ib_wq_attr *attr, 2040 u32 wq_attr_mask, 2041 struct ib_udata *udata); 2042 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 2043 struct ib_rwq_ind_table_init_attr *init_attr, 2044 struct ib_udata *udata); 2045 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); |
2093 struct ib_dma_mapping_ops *dma_ops; | |
2094 2095 struct module *owner; 2096 struct device dev; 2097 struct kobject *ports_parent; 2098 struct list_head port_list; 2099 2100 enum { 2101 IB_DEV_UNINITIALIZED, --- 858 unchanged lines hidden (view full) --- 2960 2961/** 2962 * ib_dma_mapping_error - check a DMA addr for error 2963 * @dev: The device for which the dma_addr was created 2964 * @dma_addr: The DMA address to check 2965 */ 2966static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 2967{ | 2046 2047 struct module *owner; 2048 struct device dev; 2049 struct kobject *ports_parent; 2050 struct list_head port_list; 2051 2052 enum { 2053 IB_DEV_UNINITIALIZED, --- 858 unchanged lines hidden (view full) --- 2912 2913/** 2914 * ib_dma_mapping_error - check a DMA addr for error 2915 * @dev: The device for which the dma_addr was created 2916 * @dma_addr: The DMA address to check 2917 */ 2918static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 2919{ |
2968 if (dev->dma_ops) 2969 return dev->dma_ops->mapping_error(dev, dma_addr); | |
2970 return dma_mapping_error(&dev->dev, dma_addr); 2971} 2972 2973/** 2974 * ib_dma_map_single - Map a kernel virtual address to DMA address 2975 * @dev: The device for which the dma_addr is to be created 2976 * @cpu_addr: The kernel virtual address 2977 * @size: The size of the region in bytes 2978 * @direction: The direction of the DMA 2979 */ 2980static inline u64 ib_dma_map_single(struct ib_device *dev, 2981 void *cpu_addr, size_t size, 2982 enum dma_data_direction direction) 2983{ | 2920 return dma_mapping_error(&dev->dev, dma_addr); 2921} 2922 2923/** 2924 * ib_dma_map_single - Map a kernel virtual address to DMA address 2925 * @dev: The device for which the dma_addr is to be created 2926 * @cpu_addr: The kernel virtual address 2927 * @size: The size of the region in bytes 2928 * @direction: The direction of the DMA 2929 */ 2930static inline u64 ib_dma_map_single(struct ib_device *dev, 2931 void *cpu_addr, size_t size, 2932 enum dma_data_direction direction) 2933{ |
2984 if (dev->dma_ops) 2985 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); | |
2986 return dma_map_single(&dev->dev, cpu_addr, size, direction); 2987} 2988 2989/** 2990 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 2991 * @dev: The device for which the DMA address was created 2992 * @addr: The DMA address 2993 * @size: The size of the region in bytes 2994 * @direction: The direction of the DMA 2995 */ 2996static inline void ib_dma_unmap_single(struct ib_device *dev, 2997 u64 addr, size_t size, 2998 enum dma_data_direction direction) 2999{ | 2934 return dma_map_single(&dev->dev, cpu_addr, size, direction); 2935} 2936 2937/** 2938 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 2939 * @dev: The device for which the DMA address was created 2940 * @addr: The DMA address 2941 * @size: The size of the region in bytes 2942 * @direction: The direction of the DMA 2943 */ 2944static inline void ib_dma_unmap_single(struct ib_device *dev, 2945 u64 addr, size_t size, 2946 enum dma_data_direction direction) 2947{ |
3000 if (dev->dma_ops) 3001 dev->dma_ops->unmap_single(dev, addr, size, direction); 3002 else 3003 dma_unmap_single(&dev->dev, addr, size, direction); | 2948 dma_unmap_single(&dev->dev, addr, size, direction); |
3004} 3005 3006/** 3007 * ib_dma_map_page - Map a physical page to DMA address 3008 * @dev: The device for which the dma_addr is to be created 3009 * @page: The page to be mapped 3010 * @offset: The offset within the page 3011 * @size: The size of the region in bytes 3012 * @direction: The direction of the DMA 3013 */ 3014static inline u64 ib_dma_map_page(struct ib_device *dev, 3015 struct page *page, 3016 unsigned long offset, 3017 size_t size, 3018 enum dma_data_direction direction) 3019{ | 2949} 2950 2951/** 2952 * ib_dma_map_page - Map a physical page to DMA address 2953 * @dev: The device for which the dma_addr is to be created 2954 * @page: The page to be mapped 2955 * @offset: The offset within the page 2956 * @size: The size of the region in bytes 2957 * @direction: The direction of the DMA 2958 */ 2959static inline u64 ib_dma_map_page(struct ib_device *dev, 2960 struct page *page, 2961 unsigned long offset, 2962 size_t size, 2963 enum dma_data_direction direction) 2964{ |
3020 if (dev->dma_ops) 3021 return dev->dma_ops->map_page(dev, page, offset, size, direction); | |
3022 return dma_map_page(&dev->dev, page, offset, size, direction); 3023} 3024 3025/** 3026 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3027 * @dev: The device for which the DMA address was created 3028 * @addr: The DMA address 3029 * @size: The size of the region in bytes 3030 * @direction: The direction of the DMA 3031 */ 3032static inline void ib_dma_unmap_page(struct ib_device *dev, 3033 u64 addr, size_t size, 3034 enum dma_data_direction direction) 3035{ | 2965 return dma_map_page(&dev->dev, page, offset, size, direction); 2966} 2967 2968/** 2969 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 2970 * @dev: The device for which the DMA address was created 2971 * @addr: The DMA address 2972 * @size: The size of the region in bytes 2973 * @direction: The direction of the DMA 2974 */ 2975static inline void ib_dma_unmap_page(struct ib_device *dev, 2976 u64 addr, size_t size, 2977 enum dma_data_direction direction) 2978{ |
3036 if (dev->dma_ops) 3037 dev->dma_ops->unmap_page(dev, addr, size, direction); 3038 else 3039 dma_unmap_page(&dev->dev, addr, size, direction); | 2979 dma_unmap_page(&dev->dev, addr, size, direction); |
3040} 3041 3042/** 3043 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3044 * @dev: The device for which the DMA addresses are to be created 3045 * @sg: The array of scatter/gather entries 3046 * @nents: The number of scatter/gather entries 3047 * @direction: The direction of the DMA 3048 */ 3049static inline int ib_dma_map_sg(struct ib_device *dev, 3050 struct scatterlist *sg, int nents, 3051 enum dma_data_direction direction) 3052{ | 2980} 2981 2982/** 2983 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 2984 * @dev: The device for which the DMA addresses are to be created 2985 * @sg: The array of scatter/gather entries 2986 * @nents: The number of scatter/gather entries 2987 * @direction: The direction of the DMA 2988 */ 2989static inline int ib_dma_map_sg(struct ib_device *dev, 2990 struct scatterlist *sg, int nents, 2991 enum dma_data_direction direction) 2992{ |
3053 if (dev->dma_ops) 3054 return dev->dma_ops->map_sg(dev, sg, nents, direction); | |
3055 return dma_map_sg(&dev->dev, sg, nents, direction); 3056} 3057 3058/** 3059 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 3060 * @dev: The device for which the DMA addresses were created 3061 * @sg: The array of scatter/gather entries 3062 * @nents: The number of scatter/gather entries 3063 * @direction: The direction of the DMA 3064 */ 3065static inline void ib_dma_unmap_sg(struct ib_device *dev, 3066 struct scatterlist *sg, int nents, 3067 enum dma_data_direction direction) 3068{ | 2993 return dma_map_sg(&dev->dev, sg, nents, direction); 2994} 2995 2996/** 2997 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 2998 * @dev: The device for which the DMA addresses were created 2999 * @sg: The array of scatter/gather entries 3000 * @nents: The number of scatter/gather entries 3001 * @direction: The direction of the DMA 3002 */ 3003static inline void ib_dma_unmap_sg(struct ib_device *dev, 3004 struct scatterlist *sg, int nents, 3005 enum dma_data_direction direction) 3006{ |
3069 if (dev->dma_ops) 3070 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 3071 else 3072 dma_unmap_sg(&dev->dev, sg, nents, direction); | 3007 dma_unmap_sg(&dev->dev, sg, nents, direction); |
3073} 3074 3075static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3076 struct scatterlist *sg, int nents, 3077 enum dma_data_direction direction, 3078 unsigned long dma_attrs) 3079{ | 3008} 3009 3010static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3011 struct scatterlist *sg, int nents, 3012 enum dma_data_direction direction, 3013 unsigned long dma_attrs) 3014{ |
3080 if (dev->dma_ops) 3081 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, 3082 dma_attrs); | |
3083 return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); 3084} 3085 3086static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3087 struct scatterlist *sg, int nents, 3088 enum dma_data_direction direction, 3089 unsigned long dma_attrs) 3090{ | 3015 return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); 3016} 3017 3018static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3019 struct scatterlist *sg, int nents, 3020 enum dma_data_direction direction, 3021 unsigned long dma_attrs) 3022{ |
3091 if (dev->dma_ops) 3092 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, 3093 dma_attrs); 3094 else 3095 dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); | 3023 dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); |
3096} 3097/** 3098 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3099 * @dev: The device for which the DMA addresses were created 3100 * @sg: The scatter/gather entry 3101 * 3102 * Note: this function is obsolete. To do: change all occurrences of 3103 * ib_sg_dma_address() into sg_dma_address(). --- 25 unchanged lines hidden (view full) --- 3129 * @size: The size of the region in bytes 3130 * @dir: The direction of the DMA 3131 */ 3132static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3133 u64 addr, 3134 size_t size, 3135 enum dma_data_direction dir) 3136{ | 3024} 3025/** 3026 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3027 * @dev: The device for which the DMA addresses were created 3028 * @sg: The scatter/gather entry 3029 * 3030 * Note: this function is obsolete. To do: change all occurrences of 3031 * ib_sg_dma_address() into sg_dma_address(). --- 25 unchanged lines hidden (view full) --- 3057 * @size: The size of the region in bytes 3058 * @dir: The direction of the DMA 3059 */ 3060static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3061 u64 addr, 3062 size_t size, 3063 enum dma_data_direction dir) 3064{ |
3137 if (dev->dma_ops) 3138 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 3139 else 3140 dma_sync_single_for_cpu(&dev->dev, addr, size, dir); | 3065 dma_sync_single_for_cpu(&dev->dev, addr, size, dir); |
3141} 3142 3143/** 3144 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3145 * @dev: The device for which the DMA address was created 3146 * @addr: The DMA address 3147 * @size: The size of the region in bytes 3148 * @dir: The direction of the DMA 3149 */ 3150static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3151 u64 addr, 3152 size_t size, 3153 enum dma_data_direction dir) 3154{ | 3066} 3067 3068/** 3069 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3070 * @dev: The device for which the DMA address was created 3071 * @addr: The DMA address 3072 * @size: The size of the region in bytes 3073 * @dir: The direction of the DMA 3074 */ 3075static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3076 u64 addr, 3077 size_t size, 3078 enum dma_data_direction dir) 3079{ |
3155 if (dev->dma_ops) 3156 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 3157 else 3158 dma_sync_single_for_device(&dev->dev, addr, size, dir); | 3080 dma_sync_single_for_device(&dev->dev, addr, size, dir); |
3159} 3160 3161/** 3162 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3163 * @dev: The device for which the DMA address is requested 3164 * @size: The size of the region to allocate in bytes 3165 * @dma_handle: A pointer for returning the DMA address of the region 3166 * @flag: memory allocator flags 3167 */ 3168static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3169 size_t size, 3170 dma_addr_t *dma_handle, 3171 gfp_t flag) 3172{ | 3081} 3082 3083/** 3084 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3085 * @dev: The device for which the DMA address is requested 3086 * @size: The size of the region to allocate in bytes 3087 * @dma_handle: A pointer for returning the DMA address of the region 3088 * @flag: memory allocator flags 3089 */ 3090static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3091 size_t size, 3092 dma_addr_t *dma_handle, 3093 gfp_t flag) 3094{ |
3173 if (dev->dma_ops) { 3174 u64 handle; 3175 void *ret; 3176 3177 ret = dev->dma_ops->alloc_coherent(dev, size, &handle, flag); 3178 *dma_handle = handle; 3179 return ret; 3180 } | |
3181 return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); 3182} 3183 3184/** 3185 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3186 * @dev: The device for which the DMA addresses were allocated 3187 * @size: The size of the region 3188 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3189 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3190 */ 3191static inline void ib_dma_free_coherent(struct ib_device *dev, 3192 size_t size, void *cpu_addr, 3193 dma_addr_t dma_handle) 3194{ | 3095 return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); 3096} 3097 3098/** 3099 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3100 * @dev: The device for which the DMA addresses were allocated 3101 * @size: The size of the region 3102 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3103 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3104 */ 3105static inline void ib_dma_free_coherent(struct ib_device *dev, 3106 size_t size, void *cpu_addr, 3107 dma_addr_t dma_handle) 3108{ |
3195 if (dev->dma_ops) 3196 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 3197 else 3198 dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); | 3109 dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); |
3199} 3200 3201/** 3202 * ib_dereg_mr - Deregisters a memory region and removes it from the 3203 * HCA translation table. 3204 * @mr: The memory region to deregister. 3205 * 3206 * This function can fail, if the memory region has memory windows bound to it. --- 173 unchanged lines hidden --- | 3110} 3111 3112/** 3113 * ib_dereg_mr - Deregisters a memory region and removes it from the 3114 * HCA translation table. 3115 * @mr: The memory region to deregister. 3116 * 3117 * This function can fail, if the memory region has memory windows bound to it. --- 173 unchanged lines hidden --- |