xref: /linux/drivers/scsi/libsas/sas_init.c (revision 92d33063c081a82d25dd08a9cce03947c8ed9164)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Serial Attached SCSI (SAS) Transport Layer initialization
4   *
5   * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
6   * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7   */
8  
9  #include <linux/module.h>
10  #include <linux/slab.h>
11  #include <linux/init.h>
12  #include <linux/device.h>
13  #include <linux/spinlock.h>
14  #include <scsi/sas_ata.h>
15  #include <scsi/scsi_host.h>
16  #include <scsi/scsi_device.h>
17  #include <scsi/scsi_transport.h>
18  #include <scsi/scsi_transport_sas.h>
19  
20  #include "sas_internal.h"
21  
22  #include "scsi_sas_internal.h"
23  
24  static struct kmem_cache *sas_task_cache;
25  static struct kmem_cache *sas_event_cache;
26  
27  struct sas_task *sas_alloc_task(gfp_t flags)
28  {
29  	struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
30  
31  	if (task) {
32  		spin_lock_init(&task->task_state_lock);
33  		task->task_state_flags = SAS_TASK_STATE_PENDING;
34  	}
35  
36  	return task;
37  }
38  
39  struct sas_task *sas_alloc_slow_task(gfp_t flags)
40  {
41  	struct sas_task *task = sas_alloc_task(flags);
42  	struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
43  
44  	if (!task || !slow) {
45  		if (task)
46  			kmem_cache_free(sas_task_cache, task);
47  		kfree(slow);
48  		return NULL;
49  	}
50  
51  	task->slow_task = slow;
52  	slow->task = task;
53  	timer_setup(&slow->timer, NULL, 0);
54  	init_completion(&slow->completion);
55  
56  	return task;
57  }
58  
59  void sas_free_task(struct sas_task *task)
60  {
61  	if (task) {
62  		kfree(task->slow_task);
63  		kmem_cache_free(sas_task_cache, task);
64  	}
65  }
66  
67  /*------------ SAS addr hash -----------*/
68  void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
69  {
70  	const u32 poly = 0x00DB2777;
71  	u32 r = 0;
72  	int i;
73  
74  	for (i = 0; i < SAS_ADDR_SIZE; i++) {
75  		int b;
76  
77  		for (b = (SAS_ADDR_SIZE - 1); b >= 0; b--) {
78  			r <<= 1;
79  			if ((1 << b) & sas_addr[i]) {
80  				if (!(r & 0x01000000))
81  					r ^= poly;
82  			} else if (r & 0x01000000) {
83  				r ^= poly;
84  			}
85  		}
86  	}
87  
88  	hashed[0] = (r >> 16) & 0xFF;
89  	hashed[1] = (r >> 8) & 0xFF;
90  	hashed[2] = r & 0xFF;
91  }
92  
93  int sas_register_ha(struct sas_ha_struct *sas_ha)
94  {
95  	char name[64];
96  	int error = 0;
97  
98  	mutex_init(&sas_ha->disco_mutex);
99  	spin_lock_init(&sas_ha->phy_port_lock);
100  	sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
101  
102  	set_bit(SAS_HA_REGISTERED, &sas_ha->state);
103  	spin_lock_init(&sas_ha->lock);
104  	mutex_init(&sas_ha->drain_mutex);
105  	init_waitqueue_head(&sas_ha->eh_wait_q);
106  	INIT_LIST_HEAD(&sas_ha->defer_q);
107  	INIT_LIST_HEAD(&sas_ha->eh_dev_q);
108  
109  	sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
110  
111  	error = sas_register_phys(sas_ha);
112  	if (error) {
113  		pr_notice("couldn't register sas phys:%d\n", error);
114  		return error;
115  	}
116  
117  	error = sas_register_ports(sas_ha);
118  	if (error) {
119  		pr_notice("couldn't register sas ports:%d\n", error);
120  		goto Undo_phys;
121  	}
122  
123  	error = -ENOMEM;
124  	snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
125  	sas_ha->event_q = create_singlethread_workqueue(name);
126  	if (!sas_ha->event_q)
127  		goto Undo_ports;
128  
129  	snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
130  	sas_ha->disco_q = create_singlethread_workqueue(name);
131  	if (!sas_ha->disco_q)
132  		goto Undo_event_q;
133  
134  	INIT_LIST_HEAD(&sas_ha->eh_done_q);
135  	INIT_LIST_HEAD(&sas_ha->eh_ata_q);
136  
137  	return 0;
138  
139  Undo_event_q:
140  	destroy_workqueue(sas_ha->event_q);
141  Undo_ports:
142  	sas_unregister_ports(sas_ha);
143  Undo_phys:
144  
145  	return error;
146  }
147  EXPORT_SYMBOL_GPL(sas_register_ha);
148  
149  static void sas_disable_events(struct sas_ha_struct *sas_ha)
150  {
151  	/* Set the state to unregistered to avoid further unchained
152  	 * events to be queued, and flush any in-progress drainers
153  	 */
154  	mutex_lock(&sas_ha->drain_mutex);
155  	spin_lock_irq(&sas_ha->lock);
156  	clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
157  	spin_unlock_irq(&sas_ha->lock);
158  	__sas_drain_work(sas_ha);
159  	mutex_unlock(&sas_ha->drain_mutex);
160  }
161  
162  int sas_unregister_ha(struct sas_ha_struct *sas_ha)
163  {
164  	sas_disable_events(sas_ha);
165  	sas_unregister_ports(sas_ha);
166  
167  	/* flush unregistration work */
168  	mutex_lock(&sas_ha->drain_mutex);
169  	__sas_drain_work(sas_ha);
170  	mutex_unlock(&sas_ha->drain_mutex);
171  
172  	destroy_workqueue(sas_ha->disco_q);
173  	destroy_workqueue(sas_ha->event_q);
174  
175  	return 0;
176  }
177  EXPORT_SYMBOL_GPL(sas_unregister_ha);
178  
179  static int sas_get_linkerrors(struct sas_phy *phy)
180  {
181  	if (scsi_is_sas_phy_local(phy)) {
182  		struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
183  		struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
184  		struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
185  		struct sas_internal *i =
186  			to_sas_internal(sas_ha->core.shost->transportt);
187  
188  		return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
189  	}
190  
191  	return sas_smp_get_phy_events(phy);
192  }
193  
194  int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
195  {
196  	struct domain_device *dev = NULL;
197  
198  	/* try to route user requested link resets through libata */
199  	if (asd_phy->port)
200  		dev = asd_phy->port->port_dev;
201  
202  	/* validate that dev has been probed */
203  	if (dev)
204  		dev = sas_find_dev_by_rphy(dev->rphy);
205  
206  	if (dev && dev_is_sata(dev)) {
207  		sas_ata_schedule_reset(dev);
208  		sas_ata_wait_eh(dev);
209  		return 0;
210  	}
211  
212  	return -ENODEV;
213  }
214  
215  /*
216   * transport_sas_phy_reset - reset a phy and permit libata to manage the link
217   *
218   * phy reset request via sysfs in host workqueue context so we know we
219   * can block on eh and safely traverse the domain_device topology
220   */
221  static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
222  {
223  	enum phy_func reset_type;
224  
225  	if (hard_reset)
226  		reset_type = PHY_FUNC_HARD_RESET;
227  	else
228  		reset_type = PHY_FUNC_LINK_RESET;
229  
230  	if (scsi_is_sas_phy_local(phy)) {
231  		struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
232  		struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
233  		struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
234  		struct sas_internal *i =
235  			to_sas_internal(sas_ha->core.shost->transportt);
236  
237  		if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
238  			return 0;
239  		return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
240  	} else {
241  		struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
242  		struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
243  		struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
244  
245  		if (ata_dev && !hard_reset) {
246  			sas_ata_schedule_reset(ata_dev);
247  			sas_ata_wait_eh(ata_dev);
248  			return 0;
249  		} else
250  			return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
251  	}
252  }
253  
254  int sas_phy_enable(struct sas_phy *phy, int enable)
255  {
256  	int ret;
257  	enum phy_func cmd;
258  
259  	if (enable)
260  		cmd = PHY_FUNC_LINK_RESET;
261  	else
262  		cmd = PHY_FUNC_DISABLE;
263  
264  	if (scsi_is_sas_phy_local(phy)) {
265  		struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
266  		struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
267  		struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
268  		struct sas_internal *i =
269  			to_sas_internal(sas_ha->core.shost->transportt);
270  
271  		if (enable)
272  			ret = transport_sas_phy_reset(phy, 0);
273  		else
274  			ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
275  	} else {
276  		struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
277  		struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
278  
279  		if (enable)
280  			ret = transport_sas_phy_reset(phy, 0);
281  		else
282  			ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
283  	}
284  	return ret;
285  }
286  EXPORT_SYMBOL_GPL(sas_phy_enable);
287  
288  int sas_phy_reset(struct sas_phy *phy, int hard_reset)
289  {
290  	int ret;
291  	enum phy_func reset_type;
292  
293  	if (!phy->enabled)
294  		return -ENODEV;
295  
296  	if (hard_reset)
297  		reset_type = PHY_FUNC_HARD_RESET;
298  	else
299  		reset_type = PHY_FUNC_LINK_RESET;
300  
301  	if (scsi_is_sas_phy_local(phy)) {
302  		struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
303  		struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
304  		struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
305  		struct sas_internal *i =
306  			to_sas_internal(sas_ha->core.shost->transportt);
307  
308  		ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
309  	} else {
310  		struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
311  		struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
312  		ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
313  	}
314  	return ret;
315  }
316  EXPORT_SYMBOL_GPL(sas_phy_reset);
317  
318  int sas_set_phy_speed(struct sas_phy *phy,
319  		      struct sas_phy_linkrates *rates)
320  {
321  	int ret;
322  
323  	if ((rates->minimum_linkrate &&
324  	     rates->minimum_linkrate > phy->maximum_linkrate) ||
325  	    (rates->maximum_linkrate &&
326  	     rates->maximum_linkrate < phy->minimum_linkrate))
327  		return -EINVAL;
328  
329  	if (rates->minimum_linkrate &&
330  	    rates->minimum_linkrate < phy->minimum_linkrate_hw)
331  		rates->minimum_linkrate = phy->minimum_linkrate_hw;
332  
333  	if (rates->maximum_linkrate &&
334  	    rates->maximum_linkrate > phy->maximum_linkrate_hw)
335  		rates->maximum_linkrate = phy->maximum_linkrate_hw;
336  
337  	if (scsi_is_sas_phy_local(phy)) {
338  		struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
339  		struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
340  		struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
341  		struct sas_internal *i =
342  			to_sas_internal(sas_ha->core.shost->transportt);
343  
344  		ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
345  					       rates);
346  	} else {
347  		struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
348  		struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
349  		ret = sas_smp_phy_control(ddev, phy->number,
350  					  PHY_FUNC_LINK_RESET, rates);
351  
352  	}
353  
354  	return ret;
355  }
356  
357  void sas_prep_resume_ha(struct sas_ha_struct *ha)
358  {
359  	int i;
360  
361  	set_bit(SAS_HA_REGISTERED, &ha->state);
362  	set_bit(SAS_HA_RESUMING, &ha->state);
363  
364  	/* clear out any stale link events/data from the suspension path */
365  	for (i = 0; i < ha->num_phys; i++) {
366  		struct asd_sas_phy *phy = ha->sas_phy[i];
367  
368  		memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
369  		phy->frame_rcvd_size = 0;
370  	}
371  }
372  EXPORT_SYMBOL(sas_prep_resume_ha);
373  
374  static int phys_suspended(struct sas_ha_struct *ha)
375  {
376  	int i, rc = 0;
377  
378  	for (i = 0; i < ha->num_phys; i++) {
379  		struct asd_sas_phy *phy = ha->sas_phy[i];
380  
381  		if (phy->suspended)
382  			rc++;
383  	}
384  
385  	return rc;
386  }
387  
388  static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha)
389  {
390  	int i;
391  
392  	for (i = 0; i < ha->num_phys; i++) {
393  		struct asd_sas_port *port = ha->sas_port[i];
394  		struct domain_device *dev = port->port_dev;
395  
396  		if (dev && dev_is_expander(dev->dev_type)) {
397  			struct asd_sas_phy *first_phy;
398  
399  			spin_lock(&port->phy_list_lock);
400  			first_phy = list_first_entry_or_null(
401  				&port->phy_list, struct asd_sas_phy,
402  				port_phy_el);
403  			spin_unlock(&port->phy_list_lock);
404  
405  			if (first_phy)
406  				sas_notify_port_event(first_phy,
407  					PORTE_BROADCAST_RCVD, GFP_KERNEL);
408  		}
409  	}
410  }
411  
412  static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
413  {
414  	const unsigned long tmo = msecs_to_jiffies(25000);
415  	int i;
416  
417  	/* deform ports on phys that did not resume
418  	 * at this point we may be racing the phy coming back (as posted
419  	 * by the lldd).  So we post the event and once we are in the
420  	 * libsas context check that the phy remains suspended before
421  	 * tearing it down.
422  	 */
423  	i = phys_suspended(ha);
424  	if (i)
425  		dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
426  			 i, i > 1 ? "s" : "");
427  	wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
428  	for (i = 0; i < ha->num_phys; i++) {
429  		struct asd_sas_phy *phy = ha->sas_phy[i];
430  
431  		if (phy->suspended) {
432  			dev_warn(&phy->phy->dev, "resume timeout\n");
433  			sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT,
434  					     GFP_KERNEL);
435  		}
436  	}
437  
438  	/* all phys are back up or timed out, turn on i/o so we can
439  	 * flush out disks that did not return
440  	 */
441  	scsi_unblock_requests(ha->core.shost);
442  	if (drain)
443  		sas_drain_work(ha);
444  	clear_bit(SAS_HA_RESUMING, &ha->state);
445  
446  	sas_queue_deferred_work(ha);
447  	/* send event PORTE_BROADCAST_RCVD to identify some new inserted
448  	 * disks for expander
449  	 */
450  	sas_resume_insert_broadcast_ha(ha);
451  }
452  
453  void sas_resume_ha(struct sas_ha_struct *ha)
454  {
455  	_sas_resume_ha(ha, true);
456  }
457  EXPORT_SYMBOL(sas_resume_ha);
458  
459  /* A no-sync variant, which does not call sas_drain_ha(). */
460  void sas_resume_ha_no_sync(struct sas_ha_struct *ha)
461  {
462  	_sas_resume_ha(ha, false);
463  }
464  EXPORT_SYMBOL(sas_resume_ha_no_sync);
465  
466  void sas_suspend_ha(struct sas_ha_struct *ha)
467  {
468  	int i;
469  
470  	sas_disable_events(ha);
471  	scsi_block_requests(ha->core.shost);
472  	for (i = 0; i < ha->num_phys; i++) {
473  		struct asd_sas_port *port = ha->sas_port[i];
474  
475  		sas_discover_event(port, DISCE_SUSPEND);
476  	}
477  
478  	/* flush suspend events while unregistered */
479  	mutex_lock(&ha->drain_mutex);
480  	__sas_drain_work(ha);
481  	mutex_unlock(&ha->drain_mutex);
482  }
483  EXPORT_SYMBOL(sas_suspend_ha);
484  
485  static void sas_phy_release(struct sas_phy *phy)
486  {
487  	kfree(phy->hostdata);
488  	phy->hostdata = NULL;
489  }
490  
491  static void phy_reset_work(struct work_struct *work)
492  {
493  	struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
494  
495  	d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
496  }
497  
498  static void phy_enable_work(struct work_struct *work)
499  {
500  	struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
501  
502  	d->enable_result = sas_phy_enable(d->phy, d->enable);
503  }
504  
505  static int sas_phy_setup(struct sas_phy *phy)
506  {
507  	struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
508  
509  	if (!d)
510  		return -ENOMEM;
511  
512  	mutex_init(&d->event_lock);
513  	INIT_SAS_WORK(&d->reset_work, phy_reset_work);
514  	INIT_SAS_WORK(&d->enable_work, phy_enable_work);
515  	d->phy = phy;
516  	phy->hostdata = d;
517  
518  	return 0;
519  }
520  
521  static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
522  {
523  	struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
524  	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
525  	struct sas_phy_data *d = phy->hostdata;
526  	int rc;
527  
528  	if (!d)
529  		return -ENOMEM;
530  
531  	pm_runtime_get_sync(ha->dev);
532  	/* libsas workqueue coordinates ata-eh reset with discovery */
533  	mutex_lock(&d->event_lock);
534  	d->reset_result = 0;
535  	d->hard_reset = hard_reset;
536  
537  	spin_lock_irq(&ha->lock);
538  	sas_queue_work(ha, &d->reset_work);
539  	spin_unlock_irq(&ha->lock);
540  
541  	rc = sas_drain_work(ha);
542  	if (rc == 0)
543  		rc = d->reset_result;
544  	mutex_unlock(&d->event_lock);
545  	pm_runtime_put_sync(ha->dev);
546  
547  	return rc;
548  }
549  
550  static int queue_phy_enable(struct sas_phy *phy, int enable)
551  {
552  	struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
553  	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
554  	struct sas_phy_data *d = phy->hostdata;
555  	int rc;
556  
557  	if (!d)
558  		return -ENOMEM;
559  
560  	pm_runtime_get_sync(ha->dev);
561  	/* libsas workqueue coordinates ata-eh reset with discovery */
562  	mutex_lock(&d->event_lock);
563  	d->enable_result = 0;
564  	d->enable = enable;
565  
566  	spin_lock_irq(&ha->lock);
567  	sas_queue_work(ha, &d->enable_work);
568  	spin_unlock_irq(&ha->lock);
569  
570  	rc = sas_drain_work(ha);
571  	if (rc == 0)
572  		rc = d->enable_result;
573  	mutex_unlock(&d->event_lock);
574  	pm_runtime_put_sync(ha->dev);
575  
576  	return rc;
577  }
578  
579  static struct sas_function_template sft = {
580  	.phy_enable = queue_phy_enable,
581  	.phy_reset = queue_phy_reset,
582  	.phy_setup = sas_phy_setup,
583  	.phy_release = sas_phy_release,
584  	.set_phy_speed = sas_set_phy_speed,
585  	.get_linkerrors = sas_get_linkerrors,
586  	.smp_handler = sas_smp_handler,
587  };
588  
589  static inline ssize_t phy_event_threshold_show(struct device *dev,
590  			struct device_attribute *attr, char *buf)
591  {
592  	struct Scsi_Host *shost = class_to_shost(dev);
593  	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
594  
595  	return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
596  }
597  
598  static inline ssize_t phy_event_threshold_store(struct device *dev,
599  			struct device_attribute *attr,
600  			const char *buf, size_t count)
601  {
602  	struct Scsi_Host *shost = class_to_shost(dev);
603  	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
604  
605  	sha->event_thres = simple_strtol(buf, NULL, 10);
606  
607  	/* threshold cannot be set too small */
608  	if (sha->event_thres < 32)
609  		sha->event_thres = 32;
610  
611  	return count;
612  }
613  
614  DEVICE_ATTR(phy_event_threshold,
615  	S_IRUGO|S_IWUSR,
616  	phy_event_threshold_show,
617  	phy_event_threshold_store);
618  EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
619  
620  struct scsi_transport_template *
621  sas_domain_attach_transport(struct sas_domain_function_template *dft)
622  {
623  	struct scsi_transport_template *stt = sas_attach_transport(&sft);
624  	struct sas_internal *i;
625  
626  	if (!stt)
627  		return stt;
628  
629  	i = to_sas_internal(stt);
630  	i->dft = dft;
631  	stt->create_work_queue = 1;
632  	stt->eh_strategy_handler = sas_scsi_recover_host;
633  
634  	return stt;
635  }
636  EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
637  
638  struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy,
639  				      gfp_t gfp_flags)
640  {
641  	struct asd_sas_event *event;
642  	struct sas_ha_struct *sas_ha = phy->ha;
643  	struct sas_internal *i =
644  		to_sas_internal(sas_ha->core.shost->transportt);
645  
646  	event = kmem_cache_zalloc(sas_event_cache, gfp_flags);
647  	if (!event)
648  		return NULL;
649  
650  	atomic_inc(&phy->event_nr);
651  
652  	if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
653  		if (i->dft->lldd_control_phy) {
654  			if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
655  				pr_notice("The phy%d bursting events, shut it down.\n",
656  					  phy->id);
657  				sas_notify_phy_event(phy, PHYE_SHUTDOWN,
658  						     gfp_flags);
659  			}
660  		} else {
661  			/* Do not support PHY control, stop allocating events */
662  			WARN_ONCE(1, "PHY control not supported.\n");
663  			kmem_cache_free(sas_event_cache, event);
664  			atomic_dec(&phy->event_nr);
665  			event = NULL;
666  		}
667  	}
668  
669  	return event;
670  }
671  
672  void sas_free_event(struct asd_sas_event *event)
673  {
674  	struct asd_sas_phy *phy = event->phy;
675  
676  	kmem_cache_free(sas_event_cache, event);
677  	atomic_dec(&phy->event_nr);
678  }
679  
680  /* ---------- SAS Class register/unregister ---------- */
681  
682  static int __init sas_class_init(void)
683  {
684  	sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
685  	if (!sas_task_cache)
686  		goto out;
687  
688  	sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
689  	if (!sas_event_cache)
690  		goto free_task_kmem;
691  
692  	return 0;
693  free_task_kmem:
694  	kmem_cache_destroy(sas_task_cache);
695  out:
696  	return -ENOMEM;
697  }
698  
699  static void __exit sas_class_exit(void)
700  {
701  	kmem_cache_destroy(sas_task_cache);
702  	kmem_cache_destroy(sas_event_cache);
703  }
704  
705  MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
706  MODULE_DESCRIPTION("SAS Transport Layer");
707  MODULE_LICENSE("GPL v2");
708  
709  module_init(sas_class_init);
710  module_exit(sas_class_exit);
711  
712