1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <net/netdev_queues.h> 4 5 #include "netlink.h" 6 #include "common.h" 7 8 struct rings_req_info { 9 struct ethnl_req_info base; 10 }; 11 12 struct rings_reply_data { 13 struct ethnl_reply_data base; 14 struct ethtool_ringparam ringparam; 15 struct kernel_ethtool_ringparam kernel_ringparam; 16 u32 supported_ring_params; 17 }; 18 19 #define RINGS_REPDATA(__reply_base) \ 20 container_of(__reply_base, struct rings_reply_data, base) 21 22 const struct nla_policy ethnl_rings_get_policy[] = { 23 [ETHTOOL_A_RINGS_HEADER] = 24 NLA_POLICY_NESTED(ethnl_header_policy), 25 }; 26 27 static int rings_prepare_data(const struct ethnl_req_info *req_base, 28 struct ethnl_reply_data *reply_base, 29 const struct genl_info *info) 30 { 31 struct rings_reply_data *data = RINGS_REPDATA(reply_base); 32 struct net_device *dev = reply_base->dev; 33 int ret; 34 35 if (!dev->ethtool_ops->get_ringparam) 36 return -EOPNOTSUPP; 37 38 data->supported_ring_params = dev->ethtool_ops->supported_ring_params; 39 ret = ethnl_ops_begin(dev); 40 if (ret < 0) 41 return ret; 42 43 data->kernel_ringparam.tcp_data_split = dev->cfg->hds_config; 44 data->kernel_ringparam.hds_thresh = dev->cfg->hds_thresh; 45 46 dev->ethtool_ops->get_ringparam(dev, &data->ringparam, 47 &data->kernel_ringparam, info->extack); 48 ethnl_ops_complete(dev); 49 50 return 0; 51 } 52 53 static int rings_reply_size(const struct ethnl_req_info *req_base, 54 const struct ethnl_reply_data *reply_base) 55 { 56 return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */ 57 nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */ 58 nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */ 59 nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */ 60 nla_total_size(sizeof(u32)) + /* _RINGS_RX */ 61 nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ 62 nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ 63 nla_total_size(sizeof(u32)) + /* _RINGS_TX */ 64 nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ 65 nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ 66 nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */ 67 nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */ 68 nla_total_size(sizeof(u8))) + /* _RINGS_RX_PUSH */ 69 nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN */ 70 nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN_MAX */ 71 nla_total_size(sizeof(u32)) + /* _RINGS_HDS_THRESH */ 72 nla_total_size(sizeof(u32)); /* _RINGS_HDS_THRESH_MAX*/ 73 } 74 75 static int rings_fill_reply(struct sk_buff *skb, 76 const struct ethnl_req_info *req_base, 77 const struct ethnl_reply_data *reply_base) 78 { 79 const struct rings_reply_data *data = RINGS_REPDATA(reply_base); 80 const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; 81 const struct ethtool_ringparam *ringparam = &data->ringparam; 82 u32 supported_ring_params = data->supported_ring_params; 83 84 WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); 85 86 if ((ringparam->rx_max_pending && 87 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX, 88 ringparam->rx_max_pending) || 89 nla_put_u32(skb, ETHTOOL_A_RINGS_RX, 90 ringparam->rx_pending))) || 91 (ringparam->rx_mini_max_pending && 92 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX, 93 ringparam->rx_mini_max_pending) || 94 nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI, 95 ringparam->rx_mini_pending))) || 96 (ringparam->rx_jumbo_max_pending && 97 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX, 98 ringparam->rx_jumbo_max_pending) || 99 nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO, 100 ringparam->rx_jumbo_pending))) || 101 (ringparam->tx_max_pending && 102 (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX, 103 ringparam->tx_max_pending) || 104 nla_put_u32(skb, ETHTOOL_A_RINGS_TX, 105 ringparam->tx_pending))) || 106 (kr->rx_buf_len && 107 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) || 108 (kr->tcp_data_split && 109 (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, 110 kr->tcp_data_split))) || 111 (kr->cqe_size && 112 (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) || 113 nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) || 114 nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push) || 115 ((supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN) && 116 (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, 117 kr->tx_push_buf_max_len) || 118 nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, 119 kr->tx_push_buf_len))) || 120 ((supported_ring_params & ETHTOOL_RING_USE_HDS_THRS) && 121 (nla_put_u32(skb, ETHTOOL_A_RINGS_HDS_THRESH, 122 kr->hds_thresh) || 123 nla_put_u32(skb, ETHTOOL_A_RINGS_HDS_THRESH_MAX, 124 kr->hds_thresh_max)))) 125 return -EMSGSIZE; 126 127 return 0; 128 } 129 130 /* RINGS_SET */ 131 132 const struct nla_policy ethnl_rings_set_policy[] = { 133 [ETHTOOL_A_RINGS_HEADER] = 134 NLA_POLICY_NESTED(ethnl_header_policy), 135 [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 }, 136 [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, 137 [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, 138 [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, 139 [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), 140 [ETHTOOL_A_RINGS_TCP_DATA_SPLIT] = 141 NLA_POLICY_MAX(NLA_U8, ETHTOOL_TCP_DATA_SPLIT_ENABLED), 142 [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), 143 [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), 144 [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), 145 [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .type = NLA_U32 }, 146 [ETHTOOL_A_RINGS_HDS_THRESH] = { .type = NLA_U32 }, 147 }; 148 149 static int 150 ethnl_set_rings_validate(struct ethnl_req_info *req_info, 151 struct genl_info *info) 152 { 153 const struct ethtool_ops *ops = req_info->dev->ethtool_ops; 154 struct nlattr **tb = info->attrs; 155 156 if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] && 157 !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { 158 NL_SET_ERR_MSG_ATTR(info->extack, 159 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], 160 "setting rx buf len not supported"); 161 return -EOPNOTSUPP; 162 } 163 164 if (tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT] && 165 !(ops->supported_ring_params & ETHTOOL_RING_USE_TCP_DATA_SPLIT)) { 166 NL_SET_ERR_MSG_ATTR(info->extack, 167 tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], 168 "setting TCP data split is not supported"); 169 return -EOPNOTSUPP; 170 } 171 172 if (tb[ETHTOOL_A_RINGS_HDS_THRESH] && 173 !(ops->supported_ring_params & ETHTOOL_RING_USE_HDS_THRS)) { 174 NL_SET_ERR_MSG_ATTR(info->extack, 175 tb[ETHTOOL_A_RINGS_HDS_THRESH], 176 "setting hds-thresh is not supported"); 177 return -EOPNOTSUPP; 178 } 179 180 if (tb[ETHTOOL_A_RINGS_CQE_SIZE] && 181 !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { 182 NL_SET_ERR_MSG_ATTR(info->extack, 183 tb[ETHTOOL_A_RINGS_CQE_SIZE], 184 "setting cqe size not supported"); 185 return -EOPNOTSUPP; 186 } 187 188 if (tb[ETHTOOL_A_RINGS_TX_PUSH] && 189 !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) { 190 NL_SET_ERR_MSG_ATTR(info->extack, 191 tb[ETHTOOL_A_RINGS_TX_PUSH], 192 "setting tx push not supported"); 193 return -EOPNOTSUPP; 194 } 195 196 if (tb[ETHTOOL_A_RINGS_RX_PUSH] && 197 !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) { 198 NL_SET_ERR_MSG_ATTR(info->extack, 199 tb[ETHTOOL_A_RINGS_RX_PUSH], 200 "setting rx push not supported"); 201 return -EOPNOTSUPP; 202 } 203 204 if (tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] && 205 !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN)) { 206 NL_SET_ERR_MSG_ATTR(info->extack, 207 tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], 208 "setting tx push buf len is not supported"); 209 return -EOPNOTSUPP; 210 } 211 212 return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP; 213 } 214 215 static int 216 ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info) 217 { 218 struct kernel_ethtool_ringparam kernel_ringparam = {}; 219 struct ethtool_ringparam ringparam = {}; 220 struct net_device *dev = req_info->dev; 221 struct nlattr **tb = info->attrs; 222 const struct nlattr *err_attr; 223 bool mod = false; 224 int ret; 225 226 dev->ethtool_ops->get_ringparam(dev, &ringparam, 227 &kernel_ringparam, info->extack); 228 kernel_ringparam.tcp_data_split = dev->cfg->hds_config; 229 230 ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); 231 ethnl_update_u32(&ringparam.rx_mini_pending, 232 tb[ETHTOOL_A_RINGS_RX_MINI], &mod); 233 ethnl_update_u32(&ringparam.rx_jumbo_pending, 234 tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod); 235 ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); 236 ethnl_update_u32(&kernel_ringparam.rx_buf_len, 237 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); 238 ethnl_update_u8(&kernel_ringparam.tcp_data_split, 239 tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], &mod); 240 ethnl_update_u32(&kernel_ringparam.cqe_size, 241 tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); 242 ethnl_update_u8(&kernel_ringparam.tx_push, 243 tb[ETHTOOL_A_RINGS_TX_PUSH], &mod); 244 ethnl_update_u8(&kernel_ringparam.rx_push, 245 tb[ETHTOOL_A_RINGS_RX_PUSH], &mod); 246 ethnl_update_u32(&kernel_ringparam.tx_push_buf_len, 247 tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], &mod); 248 ethnl_update_u32(&kernel_ringparam.hds_thresh, 249 tb[ETHTOOL_A_RINGS_HDS_THRESH], &mod); 250 if (!mod) 251 return 0; 252 253 if (kernel_ringparam.tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && 254 dev_xdp_sb_prog_count(dev)) { 255 NL_SET_ERR_MSG_ATTR(info->extack, 256 tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], 257 "tcp-data-split can not be enabled with single buffer XDP"); 258 return -EINVAL; 259 } 260 261 if (dev_get_min_mp_channel_count(dev)) { 262 if (kernel_ringparam.tcp_data_split != 263 ETHTOOL_TCP_DATA_SPLIT_ENABLED) { 264 NL_SET_ERR_MSG(info->extack, 265 "can't disable tcp-data-split while device has memory provider enabled"); 266 return -EINVAL; 267 } else if (kernel_ringparam.hds_thresh) { 268 NL_SET_ERR_MSG(info->extack, 269 "can't set non-zero hds_thresh while device is memory provider enabled"); 270 return -EINVAL; 271 } 272 } 273 274 /* ensure new ring parameters are within limits */ 275 if (ringparam.rx_pending > ringparam.rx_max_pending) 276 err_attr = tb[ETHTOOL_A_RINGS_RX]; 277 else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending) 278 err_attr = tb[ETHTOOL_A_RINGS_RX_MINI]; 279 else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending) 280 err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO]; 281 else if (ringparam.tx_pending > ringparam.tx_max_pending) 282 err_attr = tb[ETHTOOL_A_RINGS_TX]; 283 else if (kernel_ringparam.hds_thresh > kernel_ringparam.hds_thresh_max) 284 err_attr = tb[ETHTOOL_A_RINGS_HDS_THRESH]; 285 else 286 err_attr = NULL; 287 if (err_attr) { 288 NL_SET_ERR_MSG_ATTR(info->extack, err_attr, 289 "requested ring size exceeds maximum"); 290 return -EINVAL; 291 } 292 293 if (kernel_ringparam.tx_push_buf_len > kernel_ringparam.tx_push_buf_max_len) { 294 NL_SET_ERR_MSG_ATTR_FMT(info->extack, tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], 295 "Requested TX push buffer exceeds the maximum of %u", 296 kernel_ringparam.tx_push_buf_max_len); 297 298 return -EINVAL; 299 } 300 301 dev->cfg_pending->hds_config = kernel_ringparam.tcp_data_split; 302 dev->cfg_pending->hds_thresh = kernel_ringparam.hds_thresh; 303 304 ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, 305 &kernel_ringparam, info->extack); 306 return ret < 0 ? ret : 1; 307 } 308 309 const struct ethnl_request_ops ethnl_rings_request_ops = { 310 .request_cmd = ETHTOOL_MSG_RINGS_GET, 311 .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY, 312 .hdr_attr = ETHTOOL_A_RINGS_HEADER, 313 .req_info_size = sizeof(struct rings_req_info), 314 .reply_data_size = sizeof(struct rings_reply_data), 315 316 .prepare_data = rings_prepare_data, 317 .reply_size = rings_reply_size, 318 .fill_reply = rings_fill_reply, 319 320 .set_validate = ethnl_set_rings_validate, 321 .set = ethnl_set_rings, 322 .set_ntf_cmd = ETHTOOL_MSG_RINGS_NTF, 323 }; 324