1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include "netlink.h" 4 #include "common.h" 5 6 struct rings_req_info { 7 struct ethnl_req_info base; 8 }; 9 10 struct rings_reply_data { 11 struct ethnl_reply_data base; 12 struct ethtool_ringparam ringparam; 13 struct kernel_ethtool_ringparam kernel_ringparam; 14 u32 supported_ring_params; 15 }; 16 17 #define RINGS_REPDATA(__reply_base) \ 18 container_of(__reply_base, struct rings_reply_data, base) 19 20 const struct nla_policy ethnl_rings_get_policy[] = { 21 [ETHTOOL_A_RINGS_HEADER] = 22 NLA_POLICY_NESTED(ethnl_header_policy), 23 }; 24 25 static int rings_prepare_data(const struct ethnl_req_info *req_base, 26 struct ethnl_reply_data *reply_base, 27 const struct genl_info *info) 28 { 29 struct rings_reply_data *data = RINGS_REPDATA(reply_base); 30 struct net_device *dev = reply_base->dev; 31 int ret; 32 33 if (!dev->ethtool_ops->get_ringparam) 34 return -EOPNOTSUPP; 35 36 data->supported_ring_params = dev->ethtool_ops->supported_ring_params; 37 ret = ethnl_ops_begin(dev); 38 if (ret < 0) 39 return ret; 40 dev->ethtool_ops->get_ringparam(dev, &data->ringparam, 41 &data->kernel_ringparam, info->extack); 42 ethnl_ops_complete(dev); 43 44 return 0; 45 } 46 47 static int rings_reply_size(const struct ethnl_req_info *req_base, 48 const struct ethnl_reply_data *reply_base) 49 { 50 return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */ 51 nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */ 52 nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */ 53 nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */ 54 nla_total_size(sizeof(u32)) + /* _RINGS_RX */ 55 nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ 56 nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ 57 nla_total_size(sizeof(u32)) + /* _RINGS_TX */ 58 nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ 59 nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ 60 nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */ 61 nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */ 62 nla_total_size(sizeof(u8))) + /* _RINGS_RX_PUSH */ 63 nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN */ 64 nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN_MAX */ 65 nla_total_size(sizeof(u32)) + /* _RINGS_HDS_THRESH */ 66 nla_total_size(sizeof(u32)); /* _RINGS_HDS_THRESH_MAX*/ 67 } 68 69 static int rings_fill_reply(struct sk_buff *skb, 70 const struct ethnl_req_info *req_base, 71 const struct ethnl_reply_data *reply_base) 72 { 73 const struct rings_reply_data *data = RINGS_REPDATA(reply_base); 74 const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; 75 const struct ethtool_ringparam *ringparam = &data->ringparam; 76 u32 supported_ring_params = data->supported_ring_params; 77 78 WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); 79 80 if ((ringparam->rx_max_pending && 81 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX, 82 ringparam->rx_max_pending) || 83 nla_put_u32(skb, ETHTOOL_A_RINGS_RX, 84 ringparam->rx_pending))) || 85 (ringparam->rx_mini_max_pending && 86 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX, 87 ringparam->rx_mini_max_pending) || 88 nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI, 89 ringparam->rx_mini_pending))) || 90 (ringparam->rx_jumbo_max_pending && 91 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX, 92 ringparam->rx_jumbo_max_pending) || 93 nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO, 94 ringparam->rx_jumbo_pending))) || 95 (ringparam->tx_max_pending && 96 (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX, 97 ringparam->tx_max_pending) || 98 nla_put_u32(skb, ETHTOOL_A_RINGS_TX, 99 ringparam->tx_pending))) || 100 (kr->rx_buf_len && 101 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) || 102 (kr->tcp_data_split && 103 (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, 104 kr->tcp_data_split))) || 105 (kr->cqe_size && 106 (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) || 107 nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) || 108 nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push) || 109 ((supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN) && 110 (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, 111 kr->tx_push_buf_max_len) || 112 nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, 113 kr->tx_push_buf_len))) || 114 ((supported_ring_params & ETHTOOL_RING_USE_HDS_THRS) && 115 (nla_put_u32(skb, ETHTOOL_A_RINGS_HDS_THRESH, 116 kr->hds_thresh) || 117 nla_put_u32(skb, ETHTOOL_A_RINGS_HDS_THRESH_MAX, 118 kr->hds_thresh_max)))) 119 return -EMSGSIZE; 120 121 return 0; 122 } 123 124 /* RINGS_SET */ 125 126 const struct nla_policy ethnl_rings_set_policy[] = { 127 [ETHTOOL_A_RINGS_HEADER] = 128 NLA_POLICY_NESTED(ethnl_header_policy), 129 [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 }, 130 [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, 131 [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, 132 [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, 133 [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), 134 [ETHTOOL_A_RINGS_TCP_DATA_SPLIT] = 135 NLA_POLICY_MAX(NLA_U8, ETHTOOL_TCP_DATA_SPLIT_ENABLED), 136 [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), 137 [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), 138 [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), 139 [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .type = NLA_U32 }, 140 [ETHTOOL_A_RINGS_HDS_THRESH] = { .type = NLA_U32 }, 141 }; 142 143 static int 144 ethnl_set_rings_validate(struct ethnl_req_info *req_info, 145 struct genl_info *info) 146 { 147 const struct ethtool_ops *ops = req_info->dev->ethtool_ops; 148 struct nlattr **tb = info->attrs; 149 150 if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] && 151 !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { 152 NL_SET_ERR_MSG_ATTR(info->extack, 153 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], 154 "setting rx buf len not supported"); 155 return -EOPNOTSUPP; 156 } 157 158 if (tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT] && 159 !(ops->supported_ring_params & ETHTOOL_RING_USE_TCP_DATA_SPLIT)) { 160 NL_SET_ERR_MSG_ATTR(info->extack, 161 tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], 162 "setting TCP data split is not supported"); 163 return -EOPNOTSUPP; 164 } 165 166 if (tb[ETHTOOL_A_RINGS_HDS_THRESH] && 167 !(ops->supported_ring_params & ETHTOOL_RING_USE_HDS_THRS)) { 168 NL_SET_ERR_MSG_ATTR(info->extack, 169 tb[ETHTOOL_A_RINGS_HDS_THRESH], 170 "setting hds-thresh is not supported"); 171 return -EOPNOTSUPP; 172 } 173 174 if (tb[ETHTOOL_A_RINGS_CQE_SIZE] && 175 !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { 176 NL_SET_ERR_MSG_ATTR(info->extack, 177 tb[ETHTOOL_A_RINGS_CQE_SIZE], 178 "setting cqe size not supported"); 179 return -EOPNOTSUPP; 180 } 181 182 if (tb[ETHTOOL_A_RINGS_TX_PUSH] && 183 !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) { 184 NL_SET_ERR_MSG_ATTR(info->extack, 185 tb[ETHTOOL_A_RINGS_TX_PUSH], 186 "setting tx push not supported"); 187 return -EOPNOTSUPP; 188 } 189 190 if (tb[ETHTOOL_A_RINGS_RX_PUSH] && 191 !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) { 192 NL_SET_ERR_MSG_ATTR(info->extack, 193 tb[ETHTOOL_A_RINGS_RX_PUSH], 194 "setting rx push not supported"); 195 return -EOPNOTSUPP; 196 } 197 198 if (tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] && 199 !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN)) { 200 NL_SET_ERR_MSG_ATTR(info->extack, 201 tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], 202 "setting tx push buf len is not supported"); 203 return -EOPNOTSUPP; 204 } 205 206 return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP; 207 } 208 209 static int 210 ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info) 211 { 212 struct kernel_ethtool_ringparam kernel_ringparam = {}; 213 struct ethtool_ringparam ringparam = {}; 214 struct net_device *dev = req_info->dev; 215 struct nlattr **tb = info->attrs; 216 const struct nlattr *err_attr; 217 bool mod = false; 218 int ret; 219 220 dev->ethtool_ops->get_ringparam(dev, &ringparam, 221 &kernel_ringparam, info->extack); 222 kernel_ringparam.tcp_data_split = dev->ethtool->hds_config; 223 224 ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); 225 ethnl_update_u32(&ringparam.rx_mini_pending, 226 tb[ETHTOOL_A_RINGS_RX_MINI], &mod); 227 ethnl_update_u32(&ringparam.rx_jumbo_pending, 228 tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod); 229 ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); 230 ethnl_update_u32(&kernel_ringparam.rx_buf_len, 231 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); 232 ethnl_update_u8(&kernel_ringparam.tcp_data_split, 233 tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], &mod); 234 ethnl_update_u32(&kernel_ringparam.cqe_size, 235 tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); 236 ethnl_update_u8(&kernel_ringparam.tx_push, 237 tb[ETHTOOL_A_RINGS_TX_PUSH], &mod); 238 ethnl_update_u8(&kernel_ringparam.rx_push, 239 tb[ETHTOOL_A_RINGS_RX_PUSH], &mod); 240 ethnl_update_u32(&kernel_ringparam.tx_push_buf_len, 241 tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], &mod); 242 ethnl_update_u32(&kernel_ringparam.hds_thresh, 243 tb[ETHTOOL_A_RINGS_HDS_THRESH], &mod); 244 if (!mod) 245 return 0; 246 247 if (kernel_ringparam.tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && 248 dev_xdp_sb_prog_count(dev)) { 249 NL_SET_ERR_MSG_ATTR(info->extack, 250 tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], 251 "tcp-data-split can not be enabled with single buffer XDP"); 252 return -EINVAL; 253 } 254 255 /* ensure new ring parameters are within limits */ 256 if (ringparam.rx_pending > ringparam.rx_max_pending) 257 err_attr = tb[ETHTOOL_A_RINGS_RX]; 258 else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending) 259 err_attr = tb[ETHTOOL_A_RINGS_RX_MINI]; 260 else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending) 261 err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO]; 262 else if (ringparam.tx_pending > ringparam.tx_max_pending) 263 err_attr = tb[ETHTOOL_A_RINGS_TX]; 264 else if (kernel_ringparam.hds_thresh > kernel_ringparam.hds_thresh_max) 265 err_attr = tb[ETHTOOL_A_RINGS_HDS_THRESH]; 266 else 267 err_attr = NULL; 268 if (err_attr) { 269 NL_SET_ERR_MSG_ATTR(info->extack, err_attr, 270 "requested ring size exceeds maximum"); 271 return -EINVAL; 272 } 273 274 if (kernel_ringparam.tx_push_buf_len > kernel_ringparam.tx_push_buf_max_len) { 275 NL_SET_ERR_MSG_ATTR_FMT(info->extack, tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], 276 "Requested TX push buffer exceeds the maximum of %u", 277 kernel_ringparam.tx_push_buf_max_len); 278 279 return -EINVAL; 280 } 281 282 ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, 283 &kernel_ringparam, info->extack); 284 if (!ret) { 285 dev->ethtool->hds_config = kernel_ringparam.tcp_data_split; 286 dev->ethtool->hds_thresh = kernel_ringparam.hds_thresh; 287 } 288 289 return ret < 0 ? ret : 1; 290 } 291 292 const struct ethnl_request_ops ethnl_rings_request_ops = { 293 .request_cmd = ETHTOOL_MSG_RINGS_GET, 294 .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY, 295 .hdr_attr = ETHTOOL_A_RINGS_HEADER, 296 .req_info_size = sizeof(struct rings_req_info), 297 .reply_data_size = sizeof(struct rings_reply_data), 298 299 .prepare_data = rings_prepare_data, 300 .reply_size = rings_reply_size, 301 .fill_reply = rings_fill_reply, 302 303 .set_validate = ethnl_set_rings_validate, 304 .set = ethnl_set_rings, 305 .set_ntf_cmd = ETHTOOL_MSG_RINGS_NTF, 306 }; 307