1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2025, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "ice_common.h" 33 #include "ice_hw_autogen.h" 34 #include "ice_vf_mbx.h" 35 36 /** 37 * ice_aq_send_msg_to_vf 38 * @hw: pointer to the hardware structure 39 * @vfid: VF ID to send msg 40 * @v_opcode: opcodes for VF-PF communication 41 * @v_retval: return error code 42 * @msg: pointer to the msg buffer 43 * @msglen: msg length 44 * @cd: pointer to command details 45 * 46 * Send message to VF driver (0x0802) using mailbox 47 * queue and asynchronously sending message via 48 * ice_sq_send_cmd() function 49 */ 50 int 51 ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, 52 u8 *msg, u16 msglen, struct ice_sq_cd *cd) 53 { 54 struct ice_aqc_pf_vf_msg *cmd; 55 struct ice_aq_desc desc; 56 57 ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf); 58 59 cmd = &desc.params.virt; 60 cmd->id = CPU_TO_LE32(vfid); 61 62 desc.cookie_high = CPU_TO_LE32(v_opcode); 63 desc.cookie_low = CPU_TO_LE32(v_retval); 64 65 if (msglen) 66 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 67 68 return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); 69 } 70 71 /** 72 * ice_aq_send_msg_to_pf 73 * @hw: pointer to the hardware structure 74 * @v_opcode: opcodes for VF-PF communication 75 * @v_retval: return error code 76 * @msg: pointer to the msg buffer 77 * @msglen: msg length 78 * @cd: pointer to command details 79 * 80 * Send message to PF driver using mailbox queue. By default, this 81 * message is sent asynchronously, i.e. ice_sq_send_cmd() 82 * does not wait for completion before returning. 83 */ 84 int 85 ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode, 86 int v_retval, u8 *msg, u16 msglen, 87 struct ice_sq_cd *cd) 88 { 89 struct ice_aq_desc desc; 90 91 ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_pf); 92 desc.cookie_high = CPU_TO_LE32(v_opcode); 93 desc.cookie_low = CPU_TO_LE32(v_retval); 94 95 if (msglen) 96 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 97 98 return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); 99 } 100 101 static const u32 ice_legacy_aq_to_vc_speed[] = { 102 VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */ 103 VIRTCHNL_LINK_SPEED_100MB, 104 VIRTCHNL_LINK_SPEED_1GB, 105 VIRTCHNL_LINK_SPEED_1GB, 106 VIRTCHNL_LINK_SPEED_1GB, 107 VIRTCHNL_LINK_SPEED_10GB, 108 VIRTCHNL_LINK_SPEED_20GB, 109 VIRTCHNL_LINK_SPEED_25GB, 110 VIRTCHNL_LINK_SPEED_40GB, 111 VIRTCHNL_LINK_SPEED_40GB, 112 VIRTCHNL_LINK_SPEED_40GB, 113 }; 114 115 /** 116 * ice_conv_link_speed_to_virtchnl 117 * @adv_link_support: determines the format of the returned link speed 118 * @link_speed: variable containing the link_speed to be converted 119 * 120 * Convert link speed supported by HW to link speed supported by virtchnl. 121 * If adv_link_support is true, then return link speed in Mbps. Else return 122 * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller 123 * needs to cast back to an enum virtchnl_link_speed in the case where 124 * adv_link_support is false, but when adv_link_support is true the caller can 125 * expect the speed in Mbps. 126 */ 127 u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) 128 { 129 /* convert a BIT() value into an array index */ 130 u16 index = (u16)(ice_fls(link_speed) - 1); 131 132 if (adv_link_support) 133 return ice_get_link_speed(index); 134 else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed)) 135 /* Virtchnl speeds are not defined for every speed supported in 136 * the hardware. To maintain compatibility with older AVF 137 * drivers, while reporting the speed the new speed values are 138 * resolved to the closest known virtchnl speeds 139 */ 140 return ice_legacy_aq_to_vc_speed[index]; 141 142 return VIRTCHNL_LINK_SPEED_UNKNOWN; 143 } 144 145 /* The mailbox overflow detection algorithm helps to check if there 146 * is a possibility of a malicious VF transmitting too many MBX messages to the 147 * PF. 148 * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during 149 * driver initialization in ice_init_hw() using ice_mbx_init_snapshot(). 150 * The struct ice_mbx_snapshot helps to track and traverse a static window of 151 * messages within the mailbox queue while looking for a malicious VF. 152 * 153 * 2. When the caller starts processing its mailbox queue in response to an 154 * interrupt, the structure ice_mbx_snapshot is expected to be cleared before 155 * the algorithm can be run for the first time for that interrupt. This 156 * requires calling ice_mbx_reset_snapshot() as well as calling 157 * ice_mbx_reset_vf_info() for each VF tracking structure. 158 * 159 * 3. For every message read by the caller from the MBX Queue, the caller must 160 * call the detection algorithm's entry function ice_mbx_vf_state_handler(). 161 * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is 162 * filled as it is required to be passed to the algorithm. 163 * 164 * 4. Every time a message is read from the MBX queue, a tracking structure 165 * for the VF must be passed to the state handler. The boolean output 166 * report_malvf from ice_mbx_vf_state_handler() serves as an indicator to the 167 * caller whether it must report this VF as malicious or not. 168 * 169 * 5. When a VF is identified to be malicious, the caller can send a message 170 * to the system administrator. 171 * 172 * 6. The PF is responsible for maintaining the struct ice_mbx_vf_info 173 * structure for each VF. The PF should clear the VF tracking structure if the 174 * VF is reset. When a VF is shut down and brought back up, we will then 175 * assume that the new VF is not malicious and may report it again if we 176 * detect it again. 177 * 178 * 7. The function ice_mbx_reset_snapshot() is called to reset the information 179 * in ice_mbx_snapshot for every new mailbox interrupt handled. 180 */ 181 #define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M) 182 /* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that 183 * the max messages check must be ignored in the algorithm 184 */ 185 #define ICE_IGNORE_MAX_MSG_CNT 0xFFFF 186 187 /** 188 * ice_mbx_reset_snapshot - Initialize mailbox snapshot structure 189 * @snap: pointer to the mailbox snapshot 190 */ 191 static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap) 192 { 193 struct ice_mbx_vf_info *vf_info; 194 195 /* Clear mbx_buf in the mailbox snaphot structure and setting the 196 * mailbox snapshot state to a new capture. 197 */ 198 ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM); 199 snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 200 201 /* Reset message counts for all VFs to zero */ 202 LIST_FOR_EACH_ENTRY(vf_info, &snap->mbx_vf, ice_mbx_vf_info, list_entry) 203 vf_info->msg_count = 0; 204 } 205 206 /** 207 * ice_mbx_traverse - Pass through mailbox snapshot 208 * @hw: pointer to the HW struct 209 * @new_state: new algorithm state 210 * 211 * Traversing the mailbox static snapshot without checking 212 * for malicious VFs. 213 */ 214 static void 215 ice_mbx_traverse(struct ice_hw *hw, 216 enum ice_mbx_snapshot_state *new_state) 217 { 218 struct ice_mbx_snap_buffer_data *snap_buf; 219 u32 num_iterations; 220 221 snap_buf = &hw->mbx_snapshot.mbx_buf; 222 223 /* As mailbox buffer is circular, applying a mask 224 * on the incremented iteration count. 225 */ 226 num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations); 227 228 /* Checking either of the below conditions to exit snapshot traversal: 229 * Condition-1: If the number of iterations in the mailbox is equal to 230 * the mailbox head which would indicate that we have reached the end 231 * of the static snapshot. 232 * Condition-2: If the maximum messages serviced in the mailbox for a 233 * given interrupt is the highest possible value then there is no need 234 * to check if the number of messages processed is equal to it. If not 235 * check if the number of messages processed is greater than or equal 236 * to the maximum number of mailbox entries serviced in current work item. 237 */ 238 if (num_iterations == snap_buf->head || 239 (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT && 240 ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx)) 241 *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 242 } 243 244 /** 245 * ice_mbx_detect_malvf - Detect malicious VF in snapshot 246 * @hw: pointer to the HW struct 247 * @vf_info: mailbox tracking structure for a VF 248 * @new_state: new algorithm state 249 * @is_malvf: boolean output to indicate if VF is malicious 250 * 251 * This function tracks the number of asynchronous messages 252 * sent per VF and marks the VF as malicious if it exceeds 253 * the permissible number of messages to send. 254 */ 255 static int 256 ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info, 257 enum ice_mbx_snapshot_state *new_state, 258 bool *is_malvf) 259 { 260 /* increment the message count for this VF */ 261 vf_info->msg_count++; 262 263 if (vf_info->msg_count >= ICE_ASYNC_VF_MSG_THRESHOLD) 264 *is_malvf = true; 265 266 /* continue to iterate through the mailbox snapshot */ 267 ice_mbx_traverse(hw, new_state); 268 269 return 0; 270 } 271 272 /** 273 * ice_e830_mbx_vf_dec_trig - Decrements the VF mailbox queue counter 274 * @hw: pointer to the HW struct 275 * @event: pointer to the control queue receive event 276 * 277 * This function triggers to decrement the counter 278 * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes 279 * the buffers at the PF mailbox queue. 280 */ 281 void ice_e830_mbx_vf_dec_trig(struct ice_hw *hw, 282 struct ice_rq_event_info *event) 283 { 284 u16 vfid = LE16_TO_CPU(event->desc.retval); 285 286 wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1); 287 } 288 289 /** 290 * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count 291 * @hw: pointer to the HW struct 292 * @vf_id: VF ID in the PF space 293 * 294 * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should 295 * be called when a VF is created and on VF reset. 296 */ 297 void ice_mbx_vf_clear_cnt_e830(struct ice_hw *hw, u16 vf_id) 298 { 299 u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id)); 300 301 wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg); 302 } 303 304 /** 305 * ice_mbx_vf_state_handler - Handle states of the overflow algorithm 306 * @hw: pointer to the HW struct 307 * @mbx_data: pointer to structure containing mailbox data 308 * @vf_info: mailbox tracking structure for the VF in question 309 * @report_malvf: boolean output to indicate whether VF should be reported 310 * 311 * The function serves as an entry point for the malicious VF 312 * detection algorithm by handling the different states and state 313 * transitions of the algorithm: 314 * New snapshot: This state is entered when creating a new static 315 * snapshot. The data from any previous mailbox snapshot is 316 * cleared and a new capture of the mailbox head and tail is 317 * logged. This will be the new static snapshot to detect 318 * asynchronous messages sent by VFs. On capturing the snapshot 319 * and depending on whether the number of pending messages in that 320 * snapshot exceed the watermark value, the state machine enters 321 * traverse or detect states. 322 * Traverse: If pending message count is below watermark then iterate 323 * through the snapshot without any action on VF. 324 * Detect: If pending message count exceeds watermark traverse 325 * the static snapshot and look for a malicious VF. 326 */ 327 int 328 ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, 329 struct ice_mbx_vf_info *vf_info, bool *report_malvf) 330 { 331 struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; 332 struct ice_mbx_snap_buffer_data *snap_buf; 333 struct ice_ctl_q_info *cq = &hw->mailboxq; 334 enum ice_mbx_snapshot_state new_state; 335 int status = 0; 336 bool is_malvf = false; 337 338 if (!report_malvf || !mbx_data || !vf_info) 339 return ICE_ERR_BAD_PTR; 340 341 *report_malvf = false; 342 343 /* When entering the mailbox state machine assume that the VF 344 * is not malicious until detected. 345 */ 346 /* Checking if max messages allowed to be processed while servicing current 347 * interrupt is not less than the defined AVF message threshold. 348 */ 349 if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD) 350 return ICE_ERR_INVAL_SIZE; 351 352 /* The watermark value should not be lesser than the threshold limit 353 * set for the number of asynchronous messages a VF can send to mailbox 354 * nor should it be greater than the maximum number of messages in the 355 * mailbox serviced in current interrupt. 356 */ 357 if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD || 358 mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx) 359 return ICE_ERR_PARAM; 360 361 new_state = ICE_MAL_VF_DETECT_STATE_INVALID; 362 snap_buf = &snap->mbx_buf; 363 364 switch (snap_buf->state) { 365 case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT: 366 /* Clear any previously held data in mailbox snapshot structure. */ 367 ice_mbx_reset_snapshot(snap); 368 369 /* Collect the pending ARQ count, number of messages processed and 370 * the maximum number of messages allowed to be processed from the 371 * Mailbox for current interrupt. 372 */ 373 snap_buf->num_pending_arq = mbx_data->num_pending_arq; 374 snap_buf->num_msg_proc = mbx_data->num_msg_proc; 375 snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx; 376 377 /* Capture a new static snapshot of the mailbox by logging the 378 * head and tail of snapshot and set num_iterations to the tail 379 * value to mark the start of the iteration through the snapshot. 380 */ 381 snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean + 382 mbx_data->num_pending_arq); 383 snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1); 384 snap_buf->num_iterations = snap_buf->tail; 385 386 /* Pending ARQ messages returned by ice_clean_rq_elem 387 * is the difference between the head and tail of the 388 * mailbox queue. Comparing this value against the watermark 389 * helps to check if we potentially have malicious VFs. 390 */ 391 if (snap_buf->num_pending_arq >= 392 mbx_data->async_watermark_val) { 393 new_state = ICE_MAL_VF_DETECT_STATE_DETECT; 394 status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf); 395 } else { 396 new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE; 397 ice_mbx_traverse(hw, &new_state); 398 } 399 break; 400 401 case ICE_MAL_VF_DETECT_STATE_TRAVERSE: 402 new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE; 403 ice_mbx_traverse(hw, &new_state); 404 break; 405 406 case ICE_MAL_VF_DETECT_STATE_DETECT: 407 new_state = ICE_MAL_VF_DETECT_STATE_DETECT; 408 status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf); 409 break; 410 411 default: 412 new_state = ICE_MAL_VF_DETECT_STATE_INVALID; 413 status = ICE_ERR_CFG; 414 } 415 416 snap_buf->state = new_state; 417 418 /* Only report VFs as malicious the first time we detect it */ 419 if (is_malvf && !vf_info->malicious) { 420 vf_info->malicious = 1; 421 *report_malvf = true; 422 } 423 424 return status; 425 } 426 427 /** 428 * ice_mbx_clear_malvf - Clear VF mailbox info 429 * @vf_info: the mailbox tracking structure for a VF 430 * 431 * In case of a VF reset, this function shall be called to clear the VF's 432 * current mailbox tracking state. 433 */ 434 void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info) 435 { 436 vf_info->malicious = 0; 437 vf_info->msg_count = 0; 438 } 439 440 /** 441 * ice_mbx_init_vf_info - Initialize a new VF mailbox tracking info 442 * @hw: pointer to the hardware structure 443 * @vf_info: the mailbox tracking info structure for a VF 444 * 445 * Initialize a VF mailbox tracking info structure and insert it into the 446 * snapshot list. 447 * 448 * If you remove the VF, you must also delete the associated VF info structure 449 * from the linked list. 450 */ 451 void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info) 452 { 453 struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; 454 455 ice_mbx_clear_malvf(vf_info); 456 LIST_ADD(&vf_info->list_entry, &snap->mbx_vf); 457 } 458 459 /** 460 * ice_mbx_init_snapshot - Initialize mailbox snapshot data 461 * @hw: pointer to the hardware structure 462 * 463 * Clear the mailbox snapshot structure and initialize the VF mailbox list. 464 */ 465 void ice_mbx_init_snapshot(struct ice_hw *hw) 466 { 467 struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; 468 469 INIT_LIST_HEAD(&snap->mbx_vf); 470 ice_mbx_reset_snapshot(snap); 471 } 472