1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include "hclge_main.h" 11 #include "hclge_tm.h" 12 #include "hnae3.h" 13 14 #define BW_PERCENT 100 15 16 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev, 17 struct ieee_ets *ets) 18 { 19 u8 i; 20 21 for (i = 0; i < HNAE3_MAX_TC; i++) { 22 switch (ets->tc_tsa[i]) { 23 case IEEE_8021QAZ_TSA_STRICT: 24 hdev->tm_info.tc_info[i].tc_sch_mode = 25 HCLGE_SCH_MODE_SP; 26 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0; 27 break; 28 case IEEE_8021QAZ_TSA_ETS: 29 hdev->tm_info.tc_info[i].tc_sch_mode = 30 HCLGE_SCH_MODE_DWRR; 31 hdev->tm_info.pg_info[0].tc_dwrr[i] = 32 ets->tc_tx_bw[i]; 33 break; 34 default: 35 /* Hardware only supports SP (strict priority) 36 * or ETS (enhanced transmission selection) 37 * algorithms, if we receive some other value 38 * from dcbnl, then throw an error. 39 */ 40 return -EINVAL; 41 } 42 } 43 44 return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); 45 } 46 47 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, 48 struct ieee_ets *ets) 49 { 50 u32 i; 51 52 memset(ets, 0, sizeof(*ets)); 53 ets->willing = 1; 54 ets->ets_cap = hdev->tc_max; 55 56 for (i = 0; i < HNAE3_MAX_TC; i++) { 57 ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; 58 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; 59 60 if (hdev->tm_info.tc_info[i].tc_sch_mode == 61 HCLGE_SCH_MODE_SP) 62 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT; 63 else 64 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 65 } 66 } 67 68 /* IEEE std */ 69 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) 70 { 71 struct hclge_vport *vport = hclge_get_vport(h); 72 struct hclge_dev *hdev = vport->back; 73 74 hclge_tm_info_to_ieee_ets(hdev, ets); 75 76 return 0; 77 } 78 79 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, 80 u8 *tc, bool *changed) 81 { 82 u32 total_ets_bw = 0; 83 u8 max_tc = 0; 84 u8 i; 85 86 for (i = 0; i < HNAE3_MAX_TC; i++) { 87 if (ets->prio_tc[i] >= hdev->tc_max || 88 i >= hdev->tc_max) 89 return -EINVAL; 90 91 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) 92 *changed = true; 93 94 if (ets->prio_tc[i] > max_tc) 95 max_tc = ets->prio_tc[i]; 96 97 switch (ets->tc_tsa[i]) { 98 case IEEE_8021QAZ_TSA_STRICT: 99 if (hdev->tm_info.tc_info[i].tc_sch_mode != 100 HCLGE_SCH_MODE_SP) 101 *changed = true; 102 break; 103 case IEEE_8021QAZ_TSA_ETS: 104 if (hdev->tm_info.tc_info[i].tc_sch_mode != 105 HCLGE_SCH_MODE_DWRR) 106 *changed = true; 107 108 total_ets_bw += ets->tc_tx_bw[i]; 109 break; 110 default: 111 return -EINVAL; 112 } 113 } 114 115 if (total_ets_bw != BW_PERCENT) 116 return -EINVAL; 117 118 *tc = max_tc + 1; 119 if (*tc != hdev->tm_info.num_tc) 120 *changed = true; 121 122 return 0; 123 } 124 125 static int hclge_map_update(struct hnae3_handle *h) 126 { 127 struct hclge_vport *vport = hclge_get_vport(h); 128 struct hclge_dev *hdev = vport->back; 129 int ret; 130 131 ret = hclge_tm_map_cfg(hdev); 132 if (ret) 133 return ret; 134 135 ret = hclge_tm_schd_mode_hw(hdev); 136 if (ret) 137 return ret; 138 139 ret = hclge_pause_setup_hw(hdev); 140 if (ret) 141 return ret; 142 143 ret = hclge_buffer_alloc(hdev); 144 if (ret) 145 return ret; 146 147 hclge_rss_indir_init_cfg(hdev); 148 149 return hclge_rss_init_hw(hdev); 150 } 151 152 static int hclge_client_setup_tc(struct hclge_dev *hdev) 153 { 154 struct hclge_vport *vport = hdev->vport; 155 struct hnae3_client *client; 156 struct hnae3_handle *handle; 157 int ret; 158 u32 i; 159 160 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 161 handle = &vport[i].nic; 162 client = handle->client; 163 164 if (!client || !client->ops || !client->ops->setup_tc) 165 continue; 166 167 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc); 168 if (ret) 169 return ret; 170 } 171 172 return 0; 173 } 174 175 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) 176 { 177 struct hclge_vport *vport = hclge_get_vport(h); 178 struct hclge_dev *hdev = vport->back; 179 bool map_changed = false; 180 u8 num_tc = 0; 181 int ret; 182 183 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || 184 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) 185 return -EINVAL; 186 187 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed); 188 if (ret) 189 return ret; 190 191 hclge_tm_schd_info_update(hdev, num_tc); 192 193 ret = hclge_ieee_ets_to_tm_info(hdev, ets); 194 if (ret) 195 return ret; 196 197 if (map_changed) { 198 ret = hclge_client_setup_tc(hdev); 199 if (ret) 200 return ret; 201 } 202 203 return hclge_tm_dwrr_cfg(hdev); 204 } 205 206 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) 207 { 208 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC]; 209 struct hclge_vport *vport = hclge_get_vport(h); 210 struct hclge_dev *hdev = vport->back; 211 u8 i, j, pfc_map, *prio_tc; 212 int ret; 213 214 memset(pfc, 0, sizeof(*pfc)); 215 pfc->pfc_cap = hdev->pfc_max; 216 prio_tc = hdev->tm_info.prio_tc; 217 pfc_map = hdev->tm_info.hw_pfc_map; 218 219 /* Pfc setting is based on TC */ 220 for (i = 0; i < hdev->tm_info.num_tc; i++) { 221 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { 222 if ((prio_tc[j] == i) && (pfc_map & BIT(i))) 223 pfc->pfc_en |= BIT(j); 224 } 225 } 226 227 ret = hclge_pfc_tx_stats_get(hdev, requests); 228 if (ret) 229 return ret; 230 231 ret = hclge_pfc_rx_stats_get(hdev, indications); 232 if (ret) 233 return ret; 234 235 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 236 pfc->requests[i] = requests[i]; 237 pfc->indications[i] = indications[i]; 238 } 239 return 0; 240 } 241 242 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) 243 { 244 struct hclge_vport *vport = hclge_get_vport(h); 245 struct hclge_dev *hdev = vport->back; 246 u8 i, j, pfc_map, *prio_tc; 247 248 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || 249 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) 250 return -EINVAL; 251 252 prio_tc = hdev->tm_info.prio_tc; 253 pfc_map = 0; 254 255 for (i = 0; i < hdev->tm_info.num_tc; i++) { 256 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { 257 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) { 258 pfc_map |= BIT(i); 259 break; 260 } 261 } 262 } 263 264 if (pfc_map == hdev->tm_info.hw_pfc_map) 265 return 0; 266 267 hdev->tm_info.hw_pfc_map = pfc_map; 268 269 return hclge_pause_setup_hw(hdev); 270 } 271 272 /* DCBX configuration */ 273 static u8 hclge_getdcbx(struct hnae3_handle *h) 274 { 275 struct hclge_vport *vport = hclge_get_vport(h); 276 struct hclge_dev *hdev = vport->back; 277 278 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) 279 return 0; 280 281 return hdev->dcbx_cap; 282 } 283 284 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) 285 { 286 struct hclge_vport *vport = hclge_get_vport(h); 287 struct hclge_dev *hdev = vport->back; 288 289 /* No support for LLD_MANAGED modes or CEE */ 290 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 291 (mode & DCB_CAP_DCBX_VER_CEE) || 292 !(mode & DCB_CAP_DCBX_HOST)) 293 return 1; 294 295 hdev->dcbx_cap = mode; 296 297 return 0; 298 } 299 300 /* Set up TC for hardware offloaded mqprio in channel mode */ 301 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) 302 { 303 struct hclge_vport *vport = hclge_get_vport(h); 304 struct hclge_dev *hdev = vport->back; 305 int ret; 306 307 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) 308 return -EINVAL; 309 310 if (tc > hdev->tc_max) { 311 dev_err(&hdev->pdev->dev, 312 "setup tc failed, tc(%u) > tc_max(%u)\n", 313 tc, hdev->tc_max); 314 return -EINVAL; 315 } 316 317 hclge_tm_schd_info_update(hdev, tc); 318 319 ret = hclge_tm_prio_tc_info_update(hdev, prio_tc); 320 if (ret) 321 return ret; 322 323 ret = hclge_tm_init_hw(hdev); 324 if (ret) 325 return ret; 326 327 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 328 329 if (tc > 1) 330 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE; 331 else 332 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE; 333 334 return 0; 335 } 336 337 static const struct hnae3_dcb_ops hns3_dcb_ops = { 338 .ieee_getets = hclge_ieee_getets, 339 .ieee_setets = hclge_ieee_setets, 340 .ieee_getpfc = hclge_ieee_getpfc, 341 .ieee_setpfc = hclge_ieee_setpfc, 342 .getdcbx = hclge_getdcbx, 343 .setdcbx = hclge_setdcbx, 344 .map_update = hclge_map_update, 345 .setup_tc = hclge_setup_tc, 346 }; 347 348 void hclge_dcb_ops_set(struct hclge_dev *hdev) 349 { 350 struct hclge_vport *vport = hdev->vport; 351 struct hnae3_knic_private_info *kinfo; 352 353 /* Hdev does not support DCB or vport is 354 * not a pf, then dcb_ops is not set. 355 */ 356 if (!hnae3_dev_dcb_supported(hdev) || 357 vport->vport_id != 0) 358 return; 359 360 kinfo = &vport->nic.kinfo; 361 kinfo->dcb_ops = &hns3_dcb_ops; 362 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST; 363 } 364