xref: /linux/net/mac80211/nan.c (revision 91a4855d6c03e770e42f17c798a36a3c46e63de2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * NAN mode implementation
4  * Copyright(c) 2025-2026 Intel Corporation
5  */
6 #include <net/mac80211.h>
7 
8 #include "ieee80211_i.h"
9 #include "driver-ops.h"
10 #include "sta_info.h"
11 
12 static void
13 ieee80211_nan_init_channel(struct ieee80211_nan_channel *nan_channel,
14 			   struct cfg80211_nan_channel *cfg_nan_channel)
15 {
16 	memset(nan_channel, 0, sizeof(*nan_channel));
17 
18 	nan_channel->chanreq.oper = cfg_nan_channel->chandef;
19 	memcpy(nan_channel->channel_entry, cfg_nan_channel->channel_entry,
20 	       sizeof(nan_channel->channel_entry));
21 	nan_channel->needed_rx_chains = cfg_nan_channel->rx_nss;
22 }
23 
24 static void
25 ieee80211_nan_update_channel(struct ieee80211_local *local,
26 			     struct ieee80211_nan_channel *nan_channel,
27 			     struct cfg80211_nan_channel *cfg_nan_channel,
28 			     bool deferred)
29 {
30 	struct ieee80211_chanctx_conf *conf;
31 	bool reducing_nss;
32 
33 	if (WARN_ON(!cfg80211_chandef_identical(&nan_channel->chanreq.oper,
34 						&cfg_nan_channel->chandef)))
35 		return;
36 
37 	if (WARN_ON(memcmp(nan_channel->channel_entry,
38 			   cfg_nan_channel->channel_entry,
39 			   sizeof(nan_channel->channel_entry))))
40 		return;
41 
42 	if (nan_channel->needed_rx_chains == cfg_nan_channel->rx_nss)
43 		return;
44 
45 	reducing_nss = nan_channel->needed_rx_chains > cfg_nan_channel->rx_nss;
46 	nan_channel->needed_rx_chains = cfg_nan_channel->rx_nss;
47 
48 	conf = nan_channel->chanctx_conf;
49 
50 	/*
51 	 * If we are adding NSSs, we need to be ready before notifying the peer,
52 	 * if we are reducing NSSs, we need to wait until the peer is notified.
53 	 */
54 	if (!conf || (deferred && reducing_nss))
55 		return;
56 
57 	ieee80211_recalc_smps_chanctx(local, container_of(conf,
58 							  struct ieee80211_chanctx,
59 							  conf));
60 }
61 
62 static int
63 ieee80211_nan_use_chanctx(struct ieee80211_sub_if_data *sdata,
64 			  struct ieee80211_nan_channel *nan_channel,
65 			  bool assign_on_failure)
66 {
67 	struct ieee80211_chanctx *ctx;
68 	bool reused_ctx;
69 
70 	if (!nan_channel->chanreq.oper.chan)
71 		return -EINVAL;
72 
73 	if (ieee80211_check_combinations(sdata, &nan_channel->chanreq.oper,
74 					 IEEE80211_CHANCTX_SHARED, 0, -1))
75 		return -EBUSY;
76 
77 	ctx = ieee80211_find_or_create_chanctx(sdata, &nan_channel->chanreq,
78 					       IEEE80211_CHANCTX_SHARED,
79 					       assign_on_failure,
80 					       &reused_ctx);
81 	if (IS_ERR(ctx))
82 		return PTR_ERR(ctx);
83 
84 	nan_channel->chanctx_conf = &ctx->conf;
85 
86 	/*
87 	 * In case an existing channel context is being used, we marked it as
88 	 * will_be_used, now that it is assigned - clear this indication
89 	 */
90 	if (reused_ctx) {
91 		WARN_ON(!ctx->will_be_used);
92 		ctx->will_be_used = false;
93 	}
94 	ieee80211_recalc_chanctx_min_def(sdata->local, ctx);
95 	ieee80211_recalc_smps_chanctx(sdata->local, ctx);
96 
97 	return 0;
98 }
99 
100 static void
101 ieee80211_nan_update_peer_channels(struct ieee80211_sub_if_data *sdata,
102 				   struct ieee80211_chanctx_conf *removed_conf)
103 {
104 	struct ieee80211_local *local = sdata->local;
105 	struct sta_info *sta;
106 
107 	lockdep_assert_wiphy(local->hw.wiphy);
108 
109 	list_for_each_entry(sta, &local->sta_list, list) {
110 		struct ieee80211_nan_peer_sched *peer_sched;
111 		int write_idx = 0;
112 		bool updated = false;
113 
114 		if (sta->sdata != sdata)
115 			continue;
116 
117 		peer_sched = sta->sta.nan_sched;
118 		if (!peer_sched)
119 			continue;
120 
121 		/* NULL out map slots for channels being removed */
122 		for (int i = 0; i < peer_sched->n_channels; i++) {
123 			if (peer_sched->channels[i].chanctx_conf != removed_conf)
124 				continue;
125 
126 			for (int m = 0; m < CFG80211_NAN_MAX_PEER_MAPS; m++) {
127 				struct ieee80211_nan_peer_map *map =
128 					&peer_sched->maps[m];
129 
130 				if (map->map_id == CFG80211_NAN_INVALID_MAP_ID)
131 					continue;
132 
133 				for (int s = 0; s < ARRAY_SIZE(map->slots); s++)
134 					if (map->slots[s] == &peer_sched->channels[i])
135 						map->slots[s] = NULL;
136 			}
137 		}
138 
139 		/* Compact channels array, removing those with removed_conf */
140 		for (int i = 0; i < peer_sched->n_channels; i++) {
141 			if (peer_sched->channels[i].chanctx_conf == removed_conf) {
142 				updated = true;
143 				continue;
144 			}
145 
146 			if (write_idx != i) {
147 				/* Update map pointers before moving */
148 				for (int m = 0; m < CFG80211_NAN_MAX_PEER_MAPS; m++) {
149 					struct ieee80211_nan_peer_map *map =
150 						&peer_sched->maps[m];
151 
152 					if (map->map_id == CFG80211_NAN_INVALID_MAP_ID)
153 						continue;
154 
155 					for (int s = 0; s < ARRAY_SIZE(map->slots); s++)
156 						if (map->slots[s] == &peer_sched->channels[i])
157 							map->slots[s] = &peer_sched->channels[write_idx];
158 				}
159 
160 				peer_sched->channels[write_idx] = peer_sched->channels[i];
161 			}
162 			write_idx++;
163 		}
164 
165 		/* Clear any remaining entries at the end */
166 		for (int i = write_idx; i < peer_sched->n_channels; i++)
167 			memset(&peer_sched->channels[i], 0, sizeof(peer_sched->channels[i]));
168 
169 		peer_sched->n_channels = write_idx;
170 
171 		if (updated)
172 			drv_nan_peer_sched_changed(local, sdata, sta);
173 	}
174 }
175 
176 static void
177 ieee80211_nan_remove_channel(struct ieee80211_sub_if_data *sdata,
178 			     struct ieee80211_nan_channel *nan_channel)
179 {
180 	struct ieee80211_chanctx_conf *conf;
181 	struct ieee80211_chanctx *ctx;
182 	struct ieee80211_nan_sched_cfg *sched_cfg = &sdata->vif.cfg.nan_sched;
183 
184 	if (WARN_ON(!nan_channel))
185 		return;
186 
187 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
188 
189 	if (!nan_channel->chanreq.oper.chan)
190 		return;
191 
192 	for (int slot = 0; slot < ARRAY_SIZE(sched_cfg->schedule); slot++)
193 		if (sched_cfg->schedule[slot] == nan_channel)
194 			sched_cfg->schedule[slot] = NULL;
195 
196 	conf = nan_channel->chanctx_conf;
197 
198 	/* If any peer nan schedule uses this chanctx, update them */
199 	if (conf)
200 		ieee80211_nan_update_peer_channels(sdata, conf);
201 
202 	memset(nan_channel, 0, sizeof(*nan_channel));
203 
204 	/* Update the driver before (possibly) releasing the channel context */
205 	drv_vif_cfg_changed(sdata->local, sdata, BSS_CHANGED_NAN_LOCAL_SCHED);
206 
207 	/* Channel might not have a chanctx if it was ULWed */
208 	if (!conf)
209 		return;
210 
211 	ctx = container_of(conf, struct ieee80211_chanctx, conf);
212 
213 	if (ieee80211_chanctx_num_assigned(sdata->local, ctx) > 0) {
214 		ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
215 		ieee80211_recalc_smps_chanctx(sdata->local, ctx);
216 		ieee80211_recalc_chanctx_min_def(sdata->local, ctx);
217 	}
218 
219 	if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0)
220 		ieee80211_free_chanctx(sdata->local, ctx, false);
221 }
222 
223 static void
224 ieee80211_nan_update_all_ndi_carriers(struct ieee80211_local *local)
225 {
226 	struct ieee80211_sub_if_data *sdata;
227 
228 	lockdep_assert_wiphy(local->hw.wiphy);
229 
230 	/* Iterate all interfaces and update carrier for NDI interfaces */
231 	list_for_each_entry(sdata, &local->interfaces, list) {
232 		if (!ieee80211_sdata_running(sdata) ||
233 		    sdata->vif.type != NL80211_IFTYPE_NAN_DATA)
234 			continue;
235 
236 		ieee80211_nan_update_ndi_carrier(sdata);
237 	}
238 }
239 
240 static struct ieee80211_nan_channel *
241 ieee80211_nan_find_free_channel(struct ieee80211_nan_sched_cfg *sched_cfg)
242 {
243 	for (int i = 0; i < ARRAY_SIZE(sched_cfg->channels); i++) {
244 		if (!sched_cfg->channels[i].chanreq.oper.chan)
245 			return &sched_cfg->channels[i];
246 	}
247 
248 	return NULL;
249 }
250 
251 int ieee80211_nan_set_local_sched(struct ieee80211_sub_if_data *sdata,
252 				  struct cfg80211_nan_local_sched *sched)
253 {
254 	struct ieee80211_nan_channel *sched_idx_to_chan[IEEE80211_NAN_MAX_CHANNELS] = {};
255 	struct ieee80211_nan_sched_cfg *sched_cfg = &sdata->vif.cfg.nan_sched;
256 	struct ieee80211_nan_sched_cfg backup_sched;
257 	int ret;
258 
259 	if (sched->n_channels > IEEE80211_NAN_MAX_CHANNELS)
260 		return -EOPNOTSUPP;
261 
262 	if (sched->nan_avail_blob_len > IEEE80211_NAN_AVAIL_BLOB_MAX_LEN)
263 		return -EINVAL;
264 
265 	/*
266 	 * If a deferred schedule update is pending completion, new updates are
267 	 * not allowed. Only allow to configure an empty schedule so NAN can be
268 	 * stopped in the middle of a deferred update. This is fine because
269 	 * empty schedule means the local NAN device will not be available for
270 	 * peers anymore so there is no need to update peers about a new
271 	 * schedule.
272 	 */
273 	if (WARN_ON(sched_cfg->deferred && sched->n_channels))
274 		return -EBUSY;
275 
276 	bitmap_zero(sdata->u.nan.removed_channels, IEEE80211_NAN_MAX_CHANNELS);
277 
278 	memcpy(backup_sched.schedule, sched_cfg->schedule,
279 	       sizeof(backup_sched.schedule));
280 	memcpy(backup_sched.channels, sched_cfg->channels,
281 	       sizeof(backup_sched.channels));
282 	memcpy(backup_sched.avail_blob, sched_cfg->avail_blob,
283 	       sizeof(backup_sched.avail_blob));
284 	backup_sched.avail_blob_len = sched_cfg->avail_blob_len;
285 
286 	memcpy(sched_cfg->avail_blob, sched->nan_avail_blob,
287 	       sched->nan_avail_blob_len);
288 	sched_cfg->avail_blob_len = sched->nan_avail_blob_len;
289 
290 	/*
291 	 * Remove channels that are no longer in the new schedule to free up
292 	 * resources before adding new channels. For deferred schedule, channels
293 	 * will be removed when the schedule is applied.
294 	 * Create a mapping from sched index to sched_cfg channel
295 	 */
296 	for (int i = 0; i < ARRAY_SIZE(sched_cfg->channels); i++) {
297 		bool still_needed = false;
298 
299 		if (!sched_cfg->channels[i].chanreq.oper.chan)
300 			continue;
301 
302 		for (int j = 0; j < sched->n_channels; j++) {
303 			if (cfg80211_chandef_identical(&sched_cfg->channels[i].chanreq.oper,
304 						       &sched->nan_channels[j].chandef)) {
305 				sched_idx_to_chan[j] =
306 					&sched_cfg->channels[i];
307 				still_needed = true;
308 				break;
309 			}
310 		}
311 
312 		if (!still_needed) {
313 			__set_bit(i, sdata->u.nan.removed_channels);
314 			if (!sched->deferred)
315 				ieee80211_nan_remove_channel(sdata,
316 							     &sched_cfg->channels[i]);
317 		}
318 	}
319 
320 	for (int i = 0; i < sched->n_channels; i++) {
321 		struct ieee80211_nan_channel *chan = sched_idx_to_chan[i];
322 
323 		if (chan) {
324 			ieee80211_nan_update_channel(sdata->local, chan,
325 						     &sched->nan_channels[i],
326 						     sched->deferred);
327 		} else {
328 			chan = ieee80211_nan_find_free_channel(sched_cfg);
329 			if (WARN_ON(!chan)) {
330 				ret = -EINVAL;
331 				goto err;
332 			}
333 
334 			sched_idx_to_chan[i] = chan;
335 			ieee80211_nan_init_channel(chan,
336 						   &sched->nan_channels[i]);
337 
338 			ret = ieee80211_nan_use_chanctx(sdata, chan, false);
339 			if (ret) {
340 				memset(chan, 0, sizeof(*chan));
341 				goto err;
342 			}
343 		}
344 	}
345 
346 	for (int s = 0; s < ARRAY_SIZE(sched_cfg->schedule); s++) {
347 		if (sched->schedule[s] < ARRAY_SIZE(sched_idx_to_chan))
348 			sched_cfg->schedule[s] =
349 				sched_idx_to_chan[sched->schedule[s]];
350 		else
351 			sched_cfg->schedule[s] = NULL;
352 	}
353 
354 	sched_cfg->deferred = sched->deferred;
355 
356 	drv_vif_cfg_changed(sdata->local, sdata, BSS_CHANGED_NAN_LOCAL_SCHED);
357 
358 	/*
359 	 * For deferred update, don't update NDI carriers yet as the new
360 	 * schedule is not yet applied so common slots don't change. The NDI
361 	 * carrier will be updated once the driver notifies the new schedule is
362 	 * applied.
363 	 */
364 	if (sched_cfg->deferred)
365 		return 0;
366 
367 	ieee80211_nan_update_all_ndi_carriers(sdata->local);
368 	bitmap_zero(sdata->u.nan.removed_channels, IEEE80211_NAN_MAX_CHANNELS);
369 
370 	return 0;
371 err:
372 	/* Remove newly added channels */
373 	for (int i = 0; i < ARRAY_SIZE(sched_cfg->channels); i++) {
374 		struct cfg80211_chan_def *chan_def =
375 			&sched_cfg->channels[i].chanreq.oper;
376 
377 		if (!chan_def->chan)
378 			continue;
379 
380 		if (!cfg80211_chandef_identical(&backup_sched.channels[i].chanreq.oper,
381 						chan_def))
382 			ieee80211_nan_remove_channel(sdata,
383 						     &sched_cfg->channels[i]);
384 	}
385 
386 	/* Re-add all backed up channels */
387 	for (int i = 0; i < ARRAY_SIZE(backup_sched.channels); i++) {
388 		struct ieee80211_nan_channel *chan = &sched_cfg->channels[i];
389 
390 		*chan = backup_sched.channels[i];
391 
392 		/*
393 		 * For deferred update, no channels were removed and the channel
394 		 * context didn't change, so nothing else to do.
395 		 */
396 		if (!chan->chanctx_conf || sched->deferred)
397 			continue;
398 
399 		if (test_bit(i, sdata->u.nan.removed_channels)) {
400 			/* Clear the stale chanctx pointer */
401 			chan->chanctx_conf = NULL;
402 			/*
403 			 * We removed the newly added channels so we don't lack
404 			 * resources. So the only reason that this would fail
405 			 * is a FW error which we ignore. Therefore, this
406 			 * should never fail.
407 			 */
408 			WARN_ON(ieee80211_nan_use_chanctx(sdata, chan, true));
409 		} else {
410 			struct ieee80211_chanctx_conf *conf = chan->chanctx_conf;
411 
412 			/* FIXME: detect no-op? */
413 			/* Channel was not removed but may have been updated */
414 			ieee80211_recalc_smps_chanctx(sdata->local,
415 						     container_of(conf,
416 								  struct ieee80211_chanctx,
417 								  conf));
418 		}
419 	}
420 
421 	memcpy(sched_cfg->schedule, backup_sched.schedule,
422 	       sizeof(backup_sched.schedule));
423 	memcpy(sched_cfg->avail_blob, backup_sched.avail_blob,
424 	       sizeof(backup_sched.avail_blob));
425 	sched_cfg->avail_blob_len = backup_sched.avail_blob_len;
426 	sched_cfg->deferred = false;
427 	bitmap_zero(sdata->u.nan.removed_channels, IEEE80211_NAN_MAX_CHANNELS);
428 
429 	drv_vif_cfg_changed(sdata->local, sdata, BSS_CHANGED_NAN_LOCAL_SCHED);
430 	ieee80211_nan_update_all_ndi_carriers(sdata->local);
431 	return ret;
432 }
433 
434 void ieee80211_nan_sched_update_done(struct ieee80211_vif *vif)
435 {
436 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
437 	struct ieee80211_nan_sched_cfg *sched_cfg = &vif->cfg.nan_sched;
438 	unsigned int i;
439 
440 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
441 
442 	if (WARN_ON(!sched_cfg->deferred))
443 		return;
444 
445 	ieee80211_nan_update_all_ndi_carriers(sdata->local);
446 
447 	/*
448 	 * Clear the deferred flag before removing channels. Removing channels
449 	 * will trigger another schedule update to the driver, and there is no
450 	 * need for this update to be deferred since removed channels are not
451 	 * part of the schedule anymore, so no need to notify peers about
452 	 * removing them.
453 	 */
454 	sched_cfg->deferred = false;
455 
456 	for (i = 0; i < ARRAY_SIZE(sched_cfg->channels); i++) {
457 		struct ieee80211_nan_channel *chan = &sched_cfg->channels[i];
458 		struct ieee80211_chanctx_conf *conf = chan->chanctx_conf;
459 
460 		if (!chan->chanreq.oper.chan)
461 			continue;
462 
463 		if (test_bit(i, sdata->u.nan.removed_channels))
464 			ieee80211_nan_remove_channel(sdata, chan);
465 		else if (conf)
466 			/*
467 			 * We might have called this already for some channels,
468 			 * but this knows to handle a no-op.
469 			 */
470 			ieee80211_recalc_smps_chanctx(sdata->local,
471 						      container_of(conf,
472 								   struct ieee80211_chanctx,
473 								   conf));
474 	}
475 
476 	bitmap_zero(sdata->u.nan.removed_channels, IEEE80211_NAN_MAX_CHANNELS);
477 	cfg80211_nan_sched_update_done(ieee80211_vif_to_wdev(vif), true,
478 				       GFP_KERNEL);
479 }
480 EXPORT_SYMBOL(ieee80211_nan_sched_update_done);
481 
482 void ieee80211_nan_free_peer_sched(struct ieee80211_nan_peer_sched *sched)
483 {
484 	if (!sched)
485 		return;
486 
487 	kfree(sched->init_ulw);
488 	kfree(sched);
489 }
490 
491 static int
492 ieee80211_nan_init_peer_channel(struct ieee80211_sub_if_data *sdata,
493 				const struct sta_info *sta,
494 				const struct cfg80211_nan_channel *cfg_chan,
495 				struct ieee80211_nan_channel *new_chan)
496 {
497 	struct ieee80211_nan_sched_cfg *sched_cfg = &sdata->vif.cfg.nan_sched;
498 
499 	/* Find compatible local channel */
500 	for (int j = 0; j < ARRAY_SIZE(sched_cfg->channels); j++) {
501 		struct ieee80211_nan_channel *local_chan =
502 			&sched_cfg->channels[j];
503 		const struct cfg80211_chan_def *compat;
504 
505 		if (!local_chan->chanreq.oper.chan)
506 			continue;
507 
508 		compat = cfg80211_chandef_compatible(&local_chan->chanreq.oper,
509 						     &cfg_chan->chandef);
510 		if (!compat)
511 			continue;
512 
513 		/* compat is the wider chandef, and we want the narrower one */
514 		new_chan->chanreq.oper = compat == &local_chan->chanreq.oper ?
515 					 cfg_chan->chandef : local_chan->chanreq.oper;
516 		new_chan->needed_rx_chains = min(local_chan->needed_rx_chains,
517 						 cfg_chan->rx_nss);
518 		new_chan->chanctx_conf = local_chan->chanctx_conf;
519 
520 		break;
521 	}
522 
523 	/*
524 	 * nl80211 already validated that each peer channel is compatible
525 	 * with at least one local channel, so this should never happen.
526 	 */
527 	if (WARN_ON(!new_chan->chanreq.oper.chan))
528 		return -EINVAL;
529 
530 	memcpy(new_chan->channel_entry, cfg_chan->channel_entry,
531 	       sizeof(new_chan->channel_entry));
532 
533 	return 0;
534 }
535 
536 static void
537 ieee80211_nan_init_peer_map(struct ieee80211_nan_peer_sched *peer_sched,
538 			    const struct cfg80211_nan_peer_map *cfg_map,
539 			    struct ieee80211_nan_peer_map *new_map)
540 {
541 	new_map->map_id = cfg_map->map_id;
542 
543 	if (new_map->map_id == CFG80211_NAN_INVALID_MAP_ID)
544 		return;
545 
546 	/* Set up the slots array */
547 	for (int slot = 0; slot < ARRAY_SIZE(new_map->slots); slot++) {
548 		u8 chan_idx = cfg_map->schedule[slot];
549 
550 		if (chan_idx < peer_sched->n_channels)
551 			new_map->slots[slot] = &peer_sched->channels[chan_idx];
552 	}
553 }
554 
555 /*
556  * Check if the local schedule and a peer schedule have at least one common
557  * slot - a slot where both schedules are active on compatible channels.
558  */
559 static bool
560 ieee80211_nan_has_common_slots(struct ieee80211_sub_if_data *sdata,
561 			       struct ieee80211_nan_peer_sched *peer_sched)
562 {
563 	for (int slot = 0; slot < CFG80211_NAN_SCHED_NUM_TIME_SLOTS; slot++) {
564 		struct ieee80211_nan_channel *local_chan =
565 			sdata->vif.cfg.nan_sched.schedule[slot];
566 
567 		if (!local_chan || !local_chan->chanctx_conf)
568 			continue;
569 
570 		/* Check all peer maps for this slot */
571 		for (int m = 0; m < CFG80211_NAN_MAX_PEER_MAPS; m++) {
572 			struct ieee80211_nan_peer_map *map = &peer_sched->maps[m];
573 			struct ieee80211_nan_channel *peer_chan;
574 
575 			if (map->map_id == CFG80211_NAN_INVALID_MAP_ID)
576 				continue;
577 
578 			peer_chan = map->slots[slot];
579 			if (!peer_chan)
580 				continue;
581 
582 			if (local_chan->chanctx_conf == peer_chan->chanctx_conf)
583 				return true;
584 		}
585 	}
586 
587 	return false;
588 }
589 
590 void ieee80211_nan_update_ndi_carrier(struct ieee80211_sub_if_data *ndi_sdata)
591 {
592 	struct ieee80211_local *local = ndi_sdata->local;
593 	struct ieee80211_sub_if_data *nmi_sdata;
594 	struct sta_info *sta;
595 
596 	lockdep_assert_wiphy(local->hw.wiphy);
597 
598 	if (WARN_ON(ndi_sdata->vif.type != NL80211_IFTYPE_NAN_DATA ||
599 		    !ndi_sdata->dev) || !ieee80211_sdata_running(ndi_sdata))
600 		return;
601 
602 	nmi_sdata = wiphy_dereference(local->hw.wiphy, ndi_sdata->u.nan_data.nmi);
603 	if (WARN_ON(!nmi_sdata))
604 		return;
605 
606 	list_for_each_entry(sta, &local->sta_list, list) {
607 		struct ieee80211_sta *nmi_sta;
608 
609 		if (sta->sdata != ndi_sdata ||
610 		    !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
611 			continue;
612 
613 		nmi_sta = wiphy_dereference(local->hw.wiphy, sta->sta.nmi);
614 		if (WARN_ON(!nmi_sta) || !nmi_sta->nan_sched)
615 			continue;
616 
617 		if (ieee80211_nan_has_common_slots(nmi_sdata, nmi_sta->nan_sched)) {
618 			netif_carrier_on(ndi_sdata->dev);
619 			return;
620 		}
621 	}
622 
623 	netif_carrier_off(ndi_sdata->dev);
624 }
625 
626 static void
627 ieee80211_nan_update_peer_ndis_carrier(struct ieee80211_local *local,
628 				       struct sta_info *nmi_sta)
629 {
630 	struct sta_info *sta;
631 
632 	lockdep_assert_wiphy(local->hw.wiphy);
633 
634 	list_for_each_entry(sta, &local->sta_list, list) {
635 		if (rcu_access_pointer(sta->sta.nmi) == &nmi_sta->sta)
636 			ieee80211_nan_update_ndi_carrier(sta->sdata);
637 	}
638 }
639 
640 int ieee80211_nan_set_peer_sched(struct ieee80211_sub_if_data *sdata,
641 				 struct cfg80211_nan_peer_sched *sched)
642 {
643 	struct ieee80211_nan_peer_sched *new_sched, *old_sched, *to_free;
644 	struct sta_info *sta;
645 	int ret;
646 
647 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
648 
649 	if (!sdata->u.nan.started)
650 		return -EINVAL;
651 
652 	sta = sta_info_get(sdata, sched->peer_addr);
653 	if (!sta)
654 		return -ENOENT;
655 
656 	new_sched = kzalloc(struct_size(new_sched, channels, sched->n_channels),
657 			    GFP_KERNEL);
658 	if (!new_sched)
659 		return -ENOMEM;
660 
661 	to_free = new_sched;
662 
663 	new_sched->seq_id = sched->seq_id;
664 	new_sched->committed_dw = sched->committed_dw;
665 	new_sched->max_chan_switch = sched->max_chan_switch;
666 	new_sched->n_channels = sched->n_channels;
667 
668 	if (sched->ulw_size && sched->init_ulw) {
669 		new_sched->init_ulw = kmemdup(sched->init_ulw, sched->ulw_size,
670 					      GFP_KERNEL);
671 		if (!new_sched->init_ulw) {
672 			ret = -ENOMEM;
673 			goto out;
674 		}
675 		new_sched->ulw_size = sched->ulw_size;
676 	}
677 
678 	for (int i = 0; i < sched->n_channels; i++) {
679 		ret = ieee80211_nan_init_peer_channel(sdata, sta,
680 						      &sched->nan_channels[i],
681 						      &new_sched->channels[i]);
682 		if (ret)
683 			goto out;
684 	}
685 
686 	for (int m = 0; m < ARRAY_SIZE(sched->maps); m++)
687 		ieee80211_nan_init_peer_map(new_sched, &sched->maps[m],
688 					    &new_sched->maps[m]);
689 
690 	/* Install the new schedule before calling the driver */
691 	old_sched = sta->sta.nan_sched;
692 	sta->sta.nan_sched = new_sched;
693 
694 	ret = drv_nan_peer_sched_changed(sdata->local, sdata, sta);
695 	if (ret) {
696 		/* Revert to old schedule */
697 		sta->sta.nan_sched = old_sched;
698 		goto out;
699 	}
700 
701 	ieee80211_nan_update_peer_ndis_carrier(sdata->local, sta);
702 
703 	/* Success - free old schedule */
704 	to_free = old_sched;
705 	ret = 0;
706 
707 out:
708 	ieee80211_nan_free_peer_sched(to_free);
709 	return ret;
710 }
711