xref: /illumos-gate/usr/src/uts/common/io/mlxcx/mlxcx.conf (revision 445784c00080f22524a1a4659cf8b7b2e1ad84d1)
1#
2# This file and its contents are supplied under the terms of the
3# Common Development and Distribution License ("CDDL"), version 1.0.
4# You may only use this file in accordance with the terms of version
5# 1.0 of the CDDL.
6#
7# A full copy of the text of the CDDL should have accompanied this
8# source.  A copy of the CDDL is also available via the Internet at
9# http://www.illumos.org/license/CDDL.
10#
11
12#
13# Copyright 2018, Joyent, Inc.
14# Copyright 2023 The University of Queensland
15# Copyright 2020 RackTop Systems, Inc.
16#
17
18#
19# Driver.conf file for Mellanox Connect-X 4/5/6.
20# See mlxcx(4D) for valid options.
21#
22
23#
24# Sizing of event and completion queues.
25#
26# The number of entries on each queue will be (1 << *_size_shift) -- so
27# a value of 10 would mean 1024 entries.
28#
29#eq_size_shift = 9;
30
31# The default for devices with a maximum supported speed up to 10Gb/s
32#cq_size_shift = 10;
33#
34# The default for devices with a maximum supported speed above 10Gb/s
35#cq_size_shift = 12;
36
37#
38# Sizing of send and receive queues.
39#
40# Note that this determines the size of the RX and TX rings that mlxcx will
41# advertise to MAC. It also determines how many packet buffers we will allocate
42# when starting the interface.
43#
44# The defaults for devices with a maximum supported speed up to 10Gb/s
45#sq_size_shift = 11;
46#rq_size_shift = 10;
47#
48# The defaults for devices with a maximum supported speed above 10Gb/s
49#sq_size_shift = 13;
50#rq_size_shift = 12;
51
52#
53# Number and configuration of TX groups and rings.
54#
55#tx_ngroups = 1;
56#tx_nrings_per_group = 64;
57
58#
59# Number and configuration of RX groups and rings.
60#
61#rx_ngroups_large = 2;
62#rx_nrings_per_large_group = 16;
63#rx_ngroups_small = 256;
64#rx_nrings_per_small_group = 4;
65
66#
67# Number of flow table entries allocated to root flow tables.
68#
69# This places an upper ceiling on how many MAC addresses can be filtered into
70# groups across the whole NIC. If you have a lot of VNICs you might want to
71# raise this (and probably also rx_ngroups_small).
72#
73#ftbl_root_size_shift = 12;
74
75#
76# Number of flow table entries allocated to each L1 VLAN filtering table.
77#
78# This places a limit on the number of VLANs that one MAC address can be
79# associated with before falling back to software classification. Two entries
80# are always reserved for the non-VLAN catch-all and promisc entries.
81#
82# Note: illumos MAC today only supports giving a single VLAN per MAC address
83# to hardware drivers anyway, so setting this higher is pointless until that
84# changes.
85#
86#ftbl_vlan_size_shift = 4;
87
88#
89# Interrupt and completion moderation.
90#
91#cqemod_period_usec = 50;
92#cqemod_count = <80% of cq_size>;
93#intrmod_period_usec = 10;
94
95#
96# Minimum packet size before we use a ddi_dma_bind_addr() rather than bcopy()
97# of the packet data. DMA binds are expensive and involve taking locks in the
98# PCI nexus driver, so it's seldom worth doing them for small packets.
99#
100#tx_bind_threshold = 2048;
101
102#
103# Interval between periodic double-checks of queue status against hardware
104# state. This is used to detect hardware stalls or errors, as well as guard
105# against driver bugs.
106#
107# If set to too high a frequency, checks may impact NIC performance. Can be
108# set to zero to disable periodic checking entirely.
109#
110#eq_check_interval_sec = 30;
111#cq_check_interval_sec = 300;
112#wq_check_interval_sec = 300;
113
114#
115# To provide some level of moderation and aid latencies, after
116# "rx_limit_per_completion" packets are received in a single completion
117# event, the interrupt handler will pass the chain up the receive stack.
118#
119#rx_limit_per_completion = 256;
120
121#
122# Minimum size of packet buffers allowed to be loaned to MAC when the ring
123# has reached >=50% of its buffers already on loan. Packet buffers smaller than
124# this will be copied. At >= 75% of buffers on loan, all packets will be
125# copied instead of loaned.
126#
127# If your workload involves lots of very deep socket queues, you may find some
128# performance gains in adjusting this.
129#
130#rx_p50_loan_min_size = 256;
131