summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/scsi/pm8001/pm8001_sas.h
blob: 8dd8b7840f04259293180e8e77c44c51507fe7e4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
/*
 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
 *
 * Copyright (c) 2008-2009 USI Co., Ltd.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions, and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    substantially similar to the "NO WARRANTY" disclaimer below
 *    ("Disclaimer") and any redistribution must be conditioned upon
 *    including a substantially similar Disclaimer requirement for further
 *    binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGES.
 *
 */

#ifndef _PM8001_SAS_H_
#define _PM8001_SAS_H_

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <scsi/libsas.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
#include "pm8001_defs.h"

#define DRV_NAME		"pm80xx"
#define DRV_VERSION		"0.1.37"
#define PM8001_FAIL_LOGGING	0x01 /* Error message logging */
#define PM8001_INIT_LOGGING	0x02 /* driver init logging */
#define PM8001_DISC_LOGGING	0x04 /* discovery layer logging */
#define PM8001_IO_LOGGING	0x08 /* I/O path logging */
#define PM8001_EH_LOGGING	0x10 /* libsas EH function logging*/
#define PM8001_IOCTL_LOGGING	0x20 /* IOCTL message logging */
#define PM8001_MSG_LOGGING	0x40 /* misc message logging */
#define pm8001_printk(format, arg...)	printk(KERN_INFO "pm80xx %s %d:" \
			format, __func__, __LINE__, ## arg)
#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD)	\
do {						\
	if (unlikely(HBA->logging_level & LEVEL))	\
		do {					\
			CMD;				\
		} while (0);				\
} while (0);

#define PM8001_EH_DBG(HBA, CMD)			\
	PM8001_CHECK_LOGGING(HBA, PM8001_EH_LOGGING, CMD)

#define PM8001_INIT_DBG(HBA, CMD)		\
	PM8001_CHECK_LOGGING(HBA, PM8001_INIT_LOGGING, CMD)

#define PM8001_DISC_DBG(HBA, CMD)		\
	PM8001_CHECK_LOGGING(HBA, PM8001_DISC_LOGGING, CMD)

#define PM8001_IO_DBG(HBA, CMD)		\
	PM8001_CHECK_LOGGING(HBA, PM8001_IO_LOGGING, CMD)

#define PM8001_FAIL_DBG(HBA, CMD)		\
	PM8001_CHECK_LOGGING(HBA, PM8001_FAIL_LOGGING, CMD)

#define PM8001_IOCTL_DBG(HBA, CMD)		\
	PM8001_CHECK_LOGGING(HBA, PM8001_IOCTL_LOGGING, CMD)

#define PM8001_MSG_DBG(HBA, CMD)		\
	PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)


#define PM8001_USE_TASKLET
#define PM8001_USE_MSIX
#define PM8001_READ_VPD


#define DEV_IS_EXPANDER(type)	((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
#define IS_SPCV_12G(dev)	((dev->device == 0X8074)		\
				|| (dev->device == 0X8076)		\
				|| (dev->device == 0X8077))

#define PM8001_NAME_LENGTH		32/* generic length of strings */
extern struct list_head hba_list;
extern const struct pm8001_dispatch pm8001_8001_dispatch;
extern const struct pm8001_dispatch pm8001_80xx_dispatch;

struct pm8001_hba_info;
struct pm8001_ccb_info;
struct pm8001_device;
/* define task management IU */
struct pm8001_tmf_task {
	u8	tmf;
	u32	tag_of_task_to_be_managed;
};
struct pm8001_ioctl_payload {
	u32	signature;
	u16	major_function;
	u16	minor_function;
	u16	length;
	u16	status;
	u16	offset;
	u16	id;
	u8	*func_specific;
};

#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF
#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24)
#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET            0x00     /* HNFBUFL */
#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET            0x04     /* HNFBUFH */
#define MPI_FATAL_EDUMP_TABLE_LENGTH               0x08     /* HNFBLEN */
#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE            0x0C     /* FDDHSHK */
#define MPI_FATAL_EDUMP_TABLE_STATUS               0x10     /* FDDTSTAT */
#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN            0x14     /* ACCDDLEN */
#define MPI_FATAL_EDUMP_HANDSHAKE_RDY              0x1
#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY             0x0
#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD                 0x0
#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED           0x1
#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2
#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE      0x3
#define TYPE_GSM_SPACE        1
#define TYPE_QUEUE            2
#define TYPE_FATAL            3
#define TYPE_NON_FATAL        4
#define TYPE_INBOUND          1
#define TYPE_OUTBOUND         2
struct forensic_data {
	u32  data_type;
	union {
		struct {
			u32  direct_len;
			u32  direct_offset;
			void  *direct_data;
		} gsm_buf;
		struct {
			u16  queue_type;
			u16  queue_index;
			u32  direct_len;
			void  *direct_data;
		} queue_buf;
		struct {
			u32  direct_len;
			u32  direct_offset;
			u32  read_len;
			void  *direct_data;
		} data_buf;
	};
};

/* bit31-26 - mask bar */
#define SCRATCH_PAD0_BAR_MASK                    0xFC000000
/* bit25-0  - offset mask */
#define SCRATCH_PAD0_OFFSET_MASK                 0x03FFFFFF
/* if AAP error state */
#define SCRATCH_PAD0_AAPERR_MASK                 0xFFFFFFFF
/* Inbound doorbell bit7 */
#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP	 0x80
/* Inbound doorbell bit7 SPCV */
#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO  0x80
#define MAIN_MERRDCTO_MERRDCES		         0xA0/* DWORD 0x28) */

struct pm8001_dispatch {
	char *name;
	int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
	int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
	void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
	int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
	void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
	irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
	u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
	int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
	void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
	void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
	void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
	int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
		struct pm8001_ccb_info *ccb);
	int (*ssp_io_req)(struct pm8001_hba_info *pm8001_ha,
		struct pm8001_ccb_info *ccb);
	int (*sata_req)(struct pm8001_hba_info *pm8001_ha,
		struct pm8001_ccb_info *ccb);
	int (*phy_start_req)(struct pm8001_hba_info *pm8001_ha,	u8 phy_id);
	int (*phy_stop_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id);
	int (*reg_dev_req)(struct pm8001_hba_info *pm8001_ha,
		struct pm8001_device *pm8001_dev, u32 flag);
	int (*dereg_dev_req)(struct pm8001_hba_info *pm8001_ha, u32 device_id);
	int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha,
		u32 phy_id, u32 phy_op);
	int (*task_abort)(struct pm8001_hba_info *pm8001_ha,
		struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag,
		u32 cmd_tag);
	int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha,
		struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf);
	int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
	int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
	int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha,
		void *payload);
	int (*set_dev_state_req)(struct pm8001_hba_info *pm8001_ha,
		struct pm8001_device *pm8001_dev, u32 state);
	int (*sas_diag_start_end_req)(struct pm8001_hba_info *pm8001_ha,
		u32 state);
	int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha,
		u32 state);
	int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha);
};

struct pm8001_chip_info {
	u32     encrypt;
	u32	n_phy;
	const struct pm8001_dispatch	*dispatch;
};
#define PM8001_CHIP_DISP	(pm8001_ha->chip->dispatch)

struct pm8001_port {
	struct asd_sas_port	sas_port;
	u8			port_attached;
	u8			wide_port_phymap;
	u8			port_state;
	struct list_head	list;
};

struct pm8001_phy {
	struct pm8001_hba_info	*pm8001_ha;
	struct pm8001_port	*port;
	struct asd_sas_phy	sas_phy;
	struct sas_identify	identify;
	struct scsi_device	*sdev;
	u64			dev_sas_addr;
	u32			phy_type;
	struct completion	*enable_completion;
	u32			frame_rcvd_size;
	u8			frame_rcvd[32];
	u8			phy_attached;
	u8			phy_state;
	enum sas_linkrate	minimum_linkrate;
	enum sas_linkrate	maximum_linkrate;
};

struct pm8001_device {
	enum sas_device_type	dev_type;
	struct domain_device	*sas_device;
	u32			attached_phy;
	u32			id;
	struct completion	*dcompletion;
	struct completion	*setds_completion;
	u32			device_id;
	u32			running_req;
};

struct pm8001_prd_imt {
	__le32			len;
	__le32			e;
};

struct pm8001_prd {
	__le64			addr;		/* 64-bit buffer address */
	struct pm8001_prd_imt	im_len;		/* 64-bit length */
} __attribute__ ((packed));
/*
 * CCB(Command Control Block)
 */
struct pm8001_ccb_info {
	struct list_head	entry;
	struct sas_task		*task;
	u32			n_elem;
	u32			ccb_tag;
	dma_addr_t		ccb_dma_handle;
	struct pm8001_device	*device;
	struct pm8001_prd	buf_prd[PM8001_MAX_DMA_SG];
	struct fw_control_ex	*fw_control_context;
	u8			open_retry;
};

struct mpi_mem {
	void			*virt_ptr;
	dma_addr_t		phys_addr;
	u32			phys_addr_hi;
	u32			phys_addr_lo;
	u32			total_len;
	u32			num_elements;
	u32			element_size;
	u32			alignment;
};

struct mpi_mem_req {
	/* The number of element in the  mpiMemory array */
	u32			count;
	/* The array of structures that define memroy regions*/
	struct mpi_mem		region[USI_MAX_MEMCNT];
};

struct encrypt {
	u32	cipher_mode;
	u32	sec_mode;
	u32	status;
	u32	flag;
};

struct sas_phy_attribute_table {
	u32	phystart1_16[16];
	u32	outbound_hw_event_pid1_16[16];
};

union main_cfg_table {
	struct {
	u32			signature;
	u32			interface_rev;
	u32			firmware_rev;
	u32			max_out_io;
	u32			max_sgl;
	u32			ctrl_cap_flag;
	u32			gst_offset;
	u32			inbound_queue_offset;
	u32			outbound_queue_offset;
	u32			inbound_q_nppd_hppd;
	u32			outbound_hw_event_pid0_3;
	u32			outbound_hw_event_pid4_7;
	u32			outbound_ncq_event_pid0_3;
	u32			outbound_ncq_event_pid4_7;
	u32			outbound_tgt_ITNexus_event_pid0_3;
	u32			outbound_tgt_ITNexus_event_pid4_7;
	u32			outbound_tgt_ssp_event_pid0_3;
	u32			outbound_tgt_ssp_event_pid4_7;
	u32			outbound_tgt_smp_event_pid0_3;
	u32			outbound_tgt_smp_event_pid4_7;
	u32			upper_event_log_addr;
	u32			lower_event_log_addr;
	u32			event_log_size;
	u32			event_log_option;
	u32			upper_iop_event_log_addr;
	u32			lower_iop_event_log_addr;
	u32			iop_event_log_size;
	u32			iop_event_log_option;
	u32			fatal_err_interrupt;
	u32			fatal_err_dump_offset0;
	u32			fatal_err_dump_length0;
	u32			fatal_err_dump_offset1;
	u32			fatal_err_dump_length1;
	u32			hda_mode_flag;
	u32			anolog_setup_table_offset;
	u32			rsvd[4];
	} pm8001_tbl;

	struct {
	u32			signature;
	u32			interface_rev;
	u32			firmware_rev;
	u32			max_out_io;
	u32			max_sgl;
	u32			ctrl_cap_flag;
	u32			gst_offset;
	u32			inbound_queue_offset;
	u32			outbound_queue_offset;
	u32			inbound_q_nppd_hppd;
	u32			rsvd[8];
	u32			crc_core_dump;
	u32			rsvd1;
	u32			upper_event_log_addr;
	u32			lower_event_log_addr;
	u32			event_log_size;
	u32			event_log_severity;
	u32			upper_pcs_event_log_addr;
	u32			lower_pcs_event_log_addr;
	u32			pcs_event_log_size;
	u32			pcs_event_log_severity;
	u32			fatal_err_interrupt;
	u32			fatal_err_dump_offset0;
	u32			fatal_err_dump_length0;
	u32			fatal_err_dump_offset1;
	u32			fatal_err_dump_length1;
	u32			gpio_led_mapping;
	u32			analog_setup_table_offset;
	u32			int_vec_table_offset;
	u32			phy_attr_table_offset;
	u32			port_recovery_timer;
	u32			interrupt_reassertion_delay;
	u32			fatal_n_non_fatal_dump;	        /* 0x28 */
	} pm80xx_tbl;
};

union general_status_table {
	struct {
	u32			gst_len_mpistate;
	u32			iq_freeze_state0;
	u32			iq_freeze_state1;
	u32			msgu_tcnt;
	u32			iop_tcnt;
	u32			rsvd;
	u32			phy_state[8];
	u32			gpio_input_val;
	u32			rsvd1[2];
	u32			recover_err_info[8];
	} pm8001_tbl;
	struct {
	u32			gst_len_mpistate;
	u32			iq_freeze_state0;
	u32			iq_freeze_state1;
	u32			msgu_tcnt;
	u32			iop_tcnt;
	u32			rsvd[9];
	u32			gpio_input_val;
	u32			rsvd1[2];
	u32			recover_err_info[8];
	} pm80xx_tbl;
};
struct inbound_queue_table {
	u32			element_pri_size_cnt;
	u32			upper_base_addr;
	u32			lower_base_addr;
	u32			ci_upper_base_addr;
	u32			ci_lower_base_addr;
	u32			pi_pci_bar;
	u32			pi_offset;
	u32			total_length;
	void			*base_virt;
	void			*ci_virt;
	u32			reserved;
	__le32			consumer_index;
	u32			producer_idx;
};
struct outbound_queue_table {
	u32			element_size_cnt;
	u32			upper_base_addr;
	u32			lower_base_addr;
	void			*base_virt;
	u32			pi_upper_base_addr;
	u32			pi_lower_base_addr;
	u32			ci_pci_bar;
	u32			ci_offset;
	u32			total_length;
	void			*pi_virt;
	u32			interrup_vec_cnt_delay;
	u32			dinterrup_to_pci_offset;
	__le32			producer_index;
	u32			consumer_idx;
};
struct pm8001_hba_memspace {
	void __iomem  		*memvirtaddr;
	u64			membase;
	u32			memsize;
};
struct isr_param {
	struct pm8001_hba_info *drv_inst;
	u32 irq_id;
};
struct pm8001_hba_info {
	char			name[PM8001_NAME_LENGTH];
	struct list_head	list;
	unsigned long		flags;
	spinlock_t		lock;/* host-wide lock */
	spinlock_t		bitmap_lock;
	struct pci_dev		*pdev;/* our device */
	struct device		*dev;
	struct pm8001_hba_memspace io_mem[6];
	struct mpi_mem_req	memoryMap;
	struct encrypt		encrypt_info; /* support encryption */
	struct forensic_data	forensic_info;
	u32			fatal_bar_loc;
	u32			forensic_last_offset;
	u32			fatal_forensic_shift_offset;
	u32			forensic_fatal_step;
	u32			evtlog_ib_offset;
	u32			evtlog_ob_offset;
	void __iomem	*msg_unit_tbl_addr;/*Message Unit Table Addr*/
	void __iomem	*main_cfg_tbl_addr;/*Main Config Table Addr*/
	void __iomem	*general_stat_tbl_addr;/*General Status Table Addr*/
	void __iomem	*inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
	void __iomem	*outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
	void __iomem	*pspa_q_tbl_addr;
			/*MPI SAS PHY attributes Queue Config Table Addr*/
	void __iomem	*ivt_tbl_addr; /*MPI IVT Table Addr */
	void __iomem	*fatal_tbl_addr; /*MPI IVT Table Addr */
	union main_cfg_table	main_cfg_tbl;
	union general_status_table	gs_tbl;
	struct inbound_queue_table	inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
	struct outbound_queue_table	outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
	struct sas_phy_attribute_table	phy_attr_table;
					/* MPI SAS PHY attributes */
	u8			sas_addr[SAS_ADDR_SIZE];
	struct sas_ha_struct	*sas;/* SCSI/SAS glue */
	struct Scsi_Host	*shost;
	u32			chip_id;
	const struct pm8001_chip_info	*chip;
	struct completion	*nvmd_completion;
	int			tags_num;
	unsigned long		*tags;
	struct pm8001_phy	phy[PM8001_MAX_PHYS];
	struct pm8001_port	port[PM8001_MAX_PHYS];
	u32			id;
	u32			irq;
	u32			iomb_size; /* SPC and SPCV IOMB size */
	struct pm8001_device	*devices;
	struct pm8001_ccb_info	*ccb_info;
#ifdef PM8001_USE_MSIX
	struct msix_entry	msix_entries[PM8001_MAX_MSIX_VEC];
					/*for msi-x interrupt*/
	int			number_of_intr;/*will be used in remove()*/
#endif
#ifdef PM8001_USE_TASKLET
	struct tasklet_struct	tasklet[PM8001_MAX_MSIX_VEC];
#endif
	u32			logging_level;
	u32			fw_status;
	u32			smp_exp_mode;
	const struct firmware 	*fw_image;
	struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
};

struct pm8001_work {
	struct work_struct work;
	struct pm8001_hba_info *pm8001_ha;
	void *data;
	int handler;
};

struct pm8001_fw_image_header {
	u8 vender_id[8];
	u8 product_id;
	u8 hardware_rev;
	u8 dest_partition;
	u8 reserved;
	u8 fw_rev[4];
	__be32  image_length;
	__be32 image_crc;
	__be32 startup_entry;
} __attribute__((packed, aligned(4)));


/**
 * FW Flash Update status values
 */
#define FLASH_UPDATE_COMPLETE_PENDING_REBOOT	0x00
#define FLASH_UPDATE_IN_PROGRESS		0x01
#define FLASH_UPDATE_HDR_ERR			0x02
#define FLASH_UPDATE_OFFSET_ERR			0x03
#define FLASH_UPDATE_CRC_ERR			0x04
#define FLASH_UPDATE_LENGTH_ERR			0x05
#define FLASH_UPDATE_HW_ERR			0x06
#define FLASH_UPDATE_DNLD_NOT_SUPPORTED		0x10
#define FLASH_UPDATE_DISABLED			0x11

#define	NCQ_READ_LOG_FLAG			0x80000000
#define	NCQ_ABORT_ALL_FLAG			0x40000000
#define	NCQ_2ND_RLE_FLAG			0x20000000
/**
 * brief param structure for firmware flash update.
 */
struct fw_flash_updata_info {
	u32			cur_image_offset;
	u32			cur_image_len;
	u32			total_image_len;
	struct pm8001_prd	sgl;
};

struct fw_control_info {
	u32			retcode;/*ret code (status)*/
	u32			phase;/*ret code phase*/
	u32			phaseCmplt;/*percent complete for the current
	update phase */
	u32			version;/*Hex encoded firmware version number*/
	u32			offset;/*Used for downloading firmware	*/
	u32			len; /*len of buffer*/
	u32			size;/* Used in OS VPD and Trace get size
	operations.*/
	u32			reserved;/* padding required for 64 bit
	alignment */
	u8			buffer[1];/* Start of buffer */
};
struct fw_control_ex {
	struct fw_control_info *fw_control;
	void			*buffer;/* keep buffer pointer to be
	freed when the response comes*/
	void			*virtAddr;/* keep virtual address of the data */
	void			*usrAddr;/* keep virtual address of the
	user data */
	dma_addr_t		phys_addr;
	u32			len; /* len of buffer  */
	void			*payload; /* pointer to IOCTL Payload */
	u8			inProgress;/*if 1 - the IOCTL request is in
	progress */
	void			*param1;
	void			*param2;
	void			*param3;
};

/* pm8001 workqueue */
extern struct workqueue_struct *pm8001_wq;

/******************** function prototype *********************/
int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
	struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
	void *funcdata);
void pm8001_scan_start(struct Scsi_Host *shost);
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags);
int pm8001_abort_task(struct sas_task *task);
int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
int pm8001_clear_aca(struct domain_device *dev, u8 *lun);
int pm8001_clear_task_set(struct domain_device *dev, u8 *lun);
int pm8001_dev_found(struct domain_device *dev);
void pm8001_dev_gone(struct domain_device *dev);
int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
int pm8001_I_T_nexus_reset(struct domain_device *dev);
int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
int pm8001_query_task(struct sas_task *task);
void pm8001_open_reject_retry(
	struct pm8001_hba_info *pm8001_ha,
	struct sas_task *task_to_close,
	struct pm8001_device *device_to_close);
int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
	dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
	u32 mem_size, u32 align);

void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
			struct inbound_queue_table *circularQ,
			u32 opCode, void *payload, u32 responseQueue);
int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
				u16 messageSize, void **messagePtr);
u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
			struct outbound_queue_table *circularQ, u8 bc);
u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
			struct outbound_queue_table *circularQ,
			void **messagePtr1, u8 *pBC);
int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
			struct pm8001_device *pm8001_dev, u32 state);
int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
					void *payload);
int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
					void *fw_flash_updata_info, u32 tag);
int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
				struct pm8001_ccb_info *ccb,
				struct pm8001_tmf_task *tmf);
int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
				struct pm8001_device *pm8001_dev,
				u8 flag, u32 task_tag, u32 cmd_tag);
int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
void pm8001_work_fn(struct work_struct *work);
int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha,
					void *data, int handler);
void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
							void *piomb);
void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
							void *piomb);
void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
							void *piomb);
int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha,
							void *piomb);
void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate);
void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr);
void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i);
int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
							void *piomb);
int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
struct sas_task *pm8001_alloc_task(void);
void pm8001_task_done(struct sas_task *task);
void pm8001_free_task(struct sas_task *task);
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
					u32 device_id);
int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);

int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
	u32 length, u8 *buf);
int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
ssize_t pm80xx_get_fatal_dump(struct device *cdev,
		struct device_attribute *attr, char *buf);
ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];

static inline void
pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
			struct sas_task *task, struct pm8001_ccb_info *ccb,
			u32 ccb_idx)
{
	pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
	smp_mb(); /*in order to force CPU ordering*/
	spin_unlock(&pm8001_ha->lock);
	task->task_done(task);
	spin_lock(&pm8001_ha->lock);
}

#endif
>/* If the device is not writing memory then we don't have any * concerns about the cpu consuming stale data. This mitigates * legitimate usages of overlapping mappings. */ if (entry->direction == DMA_TO_DEVICE) return 0; spin_lock_irqsave(&radix_lock, flags); rc = radix_tree_insert(&dma_active_cacheline, cln, entry); if (rc == -EEXIST) active_cacheline_inc_overlap(cln); spin_unlock_irqrestore(&radix_lock, flags); return rc; } static void active_cacheline_remove(struct dma_debug_entry *entry) { phys_addr_t cln = to_cacheline_number(entry); unsigned long flags; /* ...mirror the insert case */ if (entry->direction == DMA_TO_DEVICE) return; spin_lock_irqsave(&radix_lock, flags); /* since we are counting overlaps the final put of the * cacheline will occur when the overlap count is 0. * active_cacheline_dec_overlap() returns -1 in that case */ if (active_cacheline_dec_overlap(cln) < 0) radix_tree_delete(&dma_active_cacheline, cln); spin_unlock_irqrestore(&radix_lock, flags); } /** * debug_dma_assert_idle() - assert that a page is not undergoing dma * @page: page to lookup in the dma_active_cacheline tree * * Place a call to this routine in cases where the cpu touching the page * before the dma completes (page is dma_unmapped) will lead to data * corruption. */ void debug_dma_assert_idle(struct page *page) { static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; struct dma_debug_entry *entry = NULL; void **results = (void **) &ents; unsigned int nents, i; unsigned long flags; phys_addr_t cln; if (dma_debug_disabled()) return; if (!page) return; cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; spin_lock_irqsave(&radix_lock, flags); nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, CACHELINES_PER_PAGE); for (i = 0; i < nents; i++) { phys_addr_t ent_cln = to_cacheline_number(ents[i]); if (ent_cln == cln) { entry = ents[i]; break; } else if (ent_cln >= cln + CACHELINES_PER_PAGE) break; } spin_unlock_irqrestore(&radix_lock, flags); if (!entry) return; cln = to_cacheline_number(entry); err_printk(entry->dev, entry, "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", &cln); } /* * Wrapper function for adding an entry to the hash. * This function takes care of locking itself. */ static void add_dma_entry(struct dma_debug_entry *entry) { struct hash_bucket *bucket; unsigned long flags; int rc; bucket = get_hash_bucket(entry, &flags); hash_bucket_add(bucket, entry); put_hash_bucket(bucket, &flags); rc = active_cacheline_insert(entry); if (rc == -ENOMEM) { pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; } /* TODO: report -EEXIST errors here as overlapping mappings are * not supported by the DMA API */ } static struct dma_debug_entry *__dma_entry_alloc(void) { struct dma_debug_entry *entry; entry = list_entry(free_entries.next, struct dma_debug_entry, list); list_del(&entry->list); memset(entry, 0, sizeof(*entry)); num_free_entries -= 1; if (num_free_entries < min_free_entries) min_free_entries = num_free_entries; return entry; } /* struct dma_entry allocator * * The next two functions implement the allocator for * struct dma_debug_entries. */ static struct dma_debug_entry *dma_entry_alloc(void) { struct dma_debug_entry *entry; unsigned long flags; spin_lock_irqsave(&free_entries_lock, flags); if (list_empty(&free_entries)) { pr_err("DMA-API: debugging out of memory - disabling\n"); global_disable = true; spin_unlock_irqrestore(&free_entries_lock, flags); return NULL; } entry = __dma_entry_alloc(); spin_unlock_irqrestore(&free_entries_lock, flags); #ifdef CONFIG_STACKTRACE entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; entry->stacktrace.entries = entry->st_entries; entry->stacktrace.skip = 2; save_stack_trace(&entry->stacktrace); #endif return entry; } static void dma_entry_free(struct dma_debug_entry *entry) { unsigned long flags; active_cacheline_remove(entry); /* * add to beginning of the list - this way the entries are * more likely cache hot when they are reallocated. */ spin_lock_irqsave(&free_entries_lock, flags); list_add(&entry->list, &free_entries); num_free_entries += 1; spin_unlock_irqrestore(&free_entries_lock, flags); } int dma_debug_resize_entries(u32 num_entries) { int i, delta, ret = 0; unsigned long flags; struct dma_debug_entry *entry; LIST_HEAD(tmp); spin_lock_irqsave(&free_entries_lock, flags); if (nr_total_entries < num_entries) { delta = num_entries - nr_total_entries; spin_unlock_irqrestore(&free_entries_lock, flags); for (i = 0; i < delta; i++) { entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) break; list_add_tail(&entry->list, &tmp); } spin_lock_irqsave(&free_entries_lock, flags); list_splice(&tmp, &free_entries); nr_total_entries += i; num_free_entries += i; } else { delta = nr_total_entries - num_entries; for (i = 0; i < delta && !list_empty(&free_entries); i++) { entry = __dma_entry_alloc(); kfree(entry); } nr_total_entries -= i; } if (nr_total_entries != num_entries) ret = 1; spin_unlock_irqrestore(&free_entries_lock, flags); return ret; } EXPORT_SYMBOL(dma_debug_resize_entries); /* * DMA-API debugging init code * * The init code does two things: * 1. Initialize core data structures * 2. Preallocate a given number of dma_debug_entry structs */ static int prealloc_memory(u32 num_entries) { struct dma_debug_entry *entry, *next_entry; int i; for (i = 0; i < num_entries; ++i) { entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto out_err; list_add_tail(&entry->list, &free_entries); } num_free_entries = num_entries; min_free_entries = num_entries; pr_info("DMA-API: preallocated %d debug entries\n", num_entries); return 0; out_err: list_for_each_entry_safe(entry, next_entry, &free_entries, list) { list_del(&entry->list); kfree(entry); } return -ENOMEM; } static ssize_t filter_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[NAME_MAX_LEN + 1]; unsigned long flags; int len; if (!current_driver_name[0]) return 0; /* * We can't copy to userspace directly because current_driver_name can * only be read under the driver_name_lock with irqs disabled. So * create a temporary copy first. */ read_lock_irqsave(&driver_name_lock, flags); len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); read_unlock_irqrestore(&driver_name_lock, flags); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t filter_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { char buf[NAME_MAX_LEN]; unsigned long flags; size_t len; int i; /* * We can't copy from userspace directly. Access to * current_driver_name is protected with a write_lock with irqs * disabled. Since copy_from_user can fault and may sleep we * need to copy to temporary buffer first */ len = min(count, (size_t)(NAME_MAX_LEN - 1)); if (copy_from_user(buf, userbuf, len)) return -EFAULT; buf[len] = 0; write_lock_irqsave(&driver_name_lock, flags); /* * Now handle the string we got from userspace very carefully. * The rules are: * - only use the first token we got * - token delimiter is everything looking like a space * character (' ', '\n', '\t' ...) * */ if (!isalnum(buf[0])) { /* * If the first character userspace gave us is not * alphanumerical then assume the filter should be * switched off. */ if (current_driver_name[0]) pr_info("DMA-API: switching off dma-debug driver filter\n"); current_driver_name[0] = 0; current_driver = NULL; goto out_unlock; } /* * Now parse out the first token and use it as the name for the * driver to filter for. */ for (i = 0; i < NAME_MAX_LEN - 1; ++i) { current_driver_name[i] = buf[i]; if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) break; } current_driver_name[i] = 0; current_driver = NULL; pr_info("DMA-API: enable driver filter for driver [%s]\n", current_driver_name); out_unlock: write_unlock_irqrestore(&driver_name_lock, flags); return count; } static const struct file_operations filter_fops = { .read = filter_read, .write = filter_write, .llseek = default_llseek, }; static int dma_debug_fs_init(void) { dma_debug_dent = debugfs_create_dir("dma-api", NULL); if (!dma_debug_dent) { pr_err("DMA-API: can not create debugfs directory\n"); return -ENOMEM; } global_disable_dent = debugfs_create_bool("disabled", 0444, dma_debug_dent, &global_disable); if (!global_disable_dent) goto out_err; error_count_dent = debugfs_create_u32("error_count", 0444, dma_debug_dent, &error_count); if (!error_count_dent) goto out_err; show_all_errors_dent = debugfs_create_u32("all_errors", 0644, dma_debug_dent, &show_all_errors); if (!show_all_errors_dent) goto out_err; show_num_errors_dent = debugfs_create_u32("num_errors", 0644, dma_debug_dent, &show_num_errors); if (!show_num_errors_dent) goto out_err; num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, dma_debug_dent, &num_free_entries); if (!num_free_entries_dent) goto out_err; min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, dma_debug_dent, &min_free_entries); if (!min_free_entries_dent) goto out_err; filter_dent = debugfs_create_file("driver_filter", 0644, dma_debug_dent, NULL, &filter_fops); if (!filter_dent) goto out_err; return 0; out_err: debugfs_remove_recursive(dma_debug_dent); return -ENOMEM; } static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) { struct dma_debug_entry *entry; unsigned long flags; int count = 0, i; local_irq_save(flags); for (i = 0; i < HASH_SIZE; ++i) { spin_lock(&dma_entry_hash[i].lock); list_for_each_entry(entry, &dma_entry_hash[i].list, list) { if (entry->dev == dev) { count += 1; *out_entry = entry; } } spin_unlock(&dma_entry_hash[i].lock); } local_irq_restore(flags); return count; } static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct dma_debug_entry *uninitialized_var(entry); int count; if (dma_debug_disabled()) return 0; switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: count = device_dma_allocations(dev, &entry); if (count == 0) break; err_printk(dev, entry, "DMA-API: device driver has pending " "DMA allocations while released from device " "[count=%d]\n" "One of leaked entries details: " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [mapped as %s]\n", count, entry->dev_addr, entry->size, dir2name[entry->direction], type2name[entry->type]); break; default: break; } return 0; } void dma_debug_add_bus(struct bus_type *bus) { struct notifier_block *nb; if (dma_debug_disabled()) return; nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); if (nb == NULL) { pr_err("dma_debug_add_bus: out of memory\n"); return; } nb->notifier_call = dma_debug_device_change; bus_register_notifier(bus, nb); } /* * Let the architectures decide how many entries should be preallocated. */ void dma_debug_init(u32 num_entries) { int i; /* Do not use dma_debug_initialized here, since we really want to be * called to set dma_debug_initialized */ if (global_disable) return; for (i = 0; i < HASH_SIZE; ++i) { INIT_LIST_HEAD(&dma_entry_hash[i].list); spin_lock_init(&dma_entry_hash[i].lock); } if (dma_debug_fs_init() != 0) { pr_err("DMA-API: error creating debugfs entries - disabling\n"); global_disable = true; return; } if (req_entries) num_entries = req_entries; if (prealloc_memory(num_entries) != 0) { pr_err("DMA-API: debugging out of memory error - disabled\n"); global_disable = true; return; } nr_total_entries = num_free_entries; dma_debug_initialized = true; pr_info("DMA-API: debugging enabled by kernel config\n"); } static __init int dma_debug_cmdline(char *str) { if (!str) return -EINVAL; if (strncmp(str, "off", 3) == 0) { pr_info("DMA-API: debugging disabled on kernel command line\n"); global_disable = true; } return 0; } static __init int dma_debug_entries_cmdline(char *str) { int res; if (!str) return -EINVAL; res = get_option(&str, &req_entries); if (!res) req_entries = 0; return 0; } __setup("dma_debug=", dma_debug_cmdline); __setup("dma_debug_entries=", dma_debug_entries_cmdline); static void check_unmap(struct dma_debug_entry *ref) { struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; bucket = get_hash_bucket(ref, &flags); entry = bucket_find_exact(bucket, ref); if (!entry) { /* must drop lock before calling dma_mapping_error */ put_hash_bucket(bucket, &flags); if (dma_mapping_error(ref->dev, ref->dev_addr)) { err_printk(ref->dev, NULL, "DMA-API: device driver tries to free an " "invalid DMA memory address\n"); } else { err_printk(ref->dev, NULL, "DMA-API: device driver tries to free DMA " "memory it has not allocated [device " "address=0x%016llx] [size=%llu bytes]\n", ref->dev_addr, ref->size); } return; } if (ref->size != entry->size) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different size " "[device address=0x%016llx] [map size=%llu bytes] " "[unmap size=%llu bytes]\n", ref->dev_addr, entry->size, ref->size); } if (ref->type != entry->type) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with wrong function " "[device address=0x%016llx] [size=%llu bytes] " "[mapped as %s] [unmapped as %s]\n", ref->dev_addr, ref->size, type2name[entry->type], type2name[ref->type]); } else if ((entry->type == dma_debug_coherent) && (phys_addr(ref) != phys_addr(entry))) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different CPU address " "[device address=0x%016llx] [size=%llu bytes] " "[cpu alloc address=0x%016llx] " "[cpu free address=0x%016llx]", ref->dev_addr, ref->size, phys_addr(entry), phys_addr(ref)); } if (ref->sg_call_ents && ref->type == dma_debug_sg && ref->sg_call_ents != entry->sg_call_ents) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA sg list with different entry count " "[map count=%d] [unmap count=%d]\n", entry->sg_call_ents, ref->sg_call_ents); } /* * This may be no bug in reality - but most implementations of the * DMA API don't handle this properly, so check for it here */ if (ref->direction != entry->direction) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different direction " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [unmapped with %s]\n", ref->dev_addr, ref->size, dir2name[entry->direction], dir2name[ref->direction]); } if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { err_printk(ref->dev, entry, "DMA-API: device driver failed to check map error" "[device address=0x%016llx] [size=%llu bytes] " "[mapped as %s]", ref->dev_addr, ref->size, type2name[entry->type]); } hash_bucket_del(entry); dma_entry_free(entry); put_hash_bucket(bucket, &flags); } static void check_for_stack(struct device *dev, void *addr) { if (object_is_on_stack(addr)) err_printk(dev, NULL, "DMA-API: device driver maps memory from " "stack [addr=%p]\n", addr); } static inline bool overlap(void *addr, unsigned long len, void *start, void *end) { unsigned long a1 = (unsigned long)addr; unsigned long b1 = a1 + len; unsigned long a2 = (unsigned long)start; unsigned long b2 = (unsigned long)end; return !(b1 <= a2 || a1 >= b2); } static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) { if (overlap(addr, len, _text, _etext) || overlap(addr, len, __start_rodata, __end_rodata)) err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); } static void check_sync(struct device *dev, struct dma_debug_entry *ref, bool to_cpu) { struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; bucket = get_hash_bucket(ref, &flags); entry = bucket_find_contain(&bucket, ref, &flags); if (!entry) { err_printk(dev, NULL, "DMA-API: device driver tries " "to sync DMA memory it has not allocated " "[device address=0x%016llx] [size=%llu bytes]\n", (unsigned long long)ref->dev_addr, ref->size); goto out; } if (ref->size > entry->size) { err_printk(dev, entry, "DMA-API: device driver syncs" " DMA memory outside allocated range " "[device address=0x%016llx] " "[allocation size=%llu bytes] " "[sync offset+size=%llu]\n", entry->dev_addr, entry->size, ref->size); } if (entry->direction == DMA_BIDIRECTIONAL) goto out; if (ref->direction != entry->direction) { err_printk(dev, entry, "DMA-API: device driver syncs " "DMA memory with different direction " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", (unsigned long long)ref->dev_addr, entry->size, dir2name[entry->direction], dir2name[ref->direction]); } if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && !(ref->direction == DMA_TO_DEVICE)) err_printk(dev, entry, "DMA-API: device driver syncs " "device read-only DMA memory for cpu " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", (unsigned long long)ref->dev_addr, entry->size, dir2name[entry->direction], dir2name[ref->direction]); if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && !(ref->direction == DMA_FROM_DEVICE)) err_printk(dev, entry, "DMA-API: device driver syncs " "device write-only DMA memory to device " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", (unsigned long long)ref->dev_addr, entry->size, dir2name[entry->direction], dir2name[ref->direction]); out: put_hash_bucket(bucket, &flags); } void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, bool map_single) { struct dma_debug_entry *entry; if (unlikely(dma_debug_disabled())) return; if (dma_mapping_error(dev, dma_addr)) return; entry = dma_entry_alloc(); if (!entry) return; entry->dev = dev; entry->type = dma_debug_page; entry->pfn = page_to_pfn(page); entry->offset = offset, entry->dev_addr = dma_addr; entry->size = size; entry->direction = direction; entry->map_err_type = MAP_ERR_NOT_CHECKED; if (map_single) entry->type = dma_debug_single; if (!PageHighMem(page)) { void *addr = page_address(page) + offset; check_for_stack(dev, addr); check_for_illegal_area(dev, addr, size); } add_dma_entry(entry); } EXPORT_SYMBOL(debug_dma_map_page); void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { struct dma_debug_entry ref; struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; if (unlikely(dma_debug_disabled())) return; ref.dev = dev; ref.dev_addr = dma_addr; bucket = get_hash_bucket(&ref, &flags); list_for_each_entry(entry, &bucket->list, list) { if (!exact_match(&ref, entry)) continue; /* * The same physical address can be mapped multiple * times. Without a hardware IOMMU this results in the * same device addresses being put into the dma-debug * hash multiple times too. This can result in false * positives being reported. Therefore we implement a * best-fit algorithm here which updates the first entry * from the hash which fits the reference value and is * not currently listed as being checked. */ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { entry->map_err_type = MAP_ERR_CHECKED; break; } } put_hash_bucket(bucket, &flags); } EXPORT_SYMBOL(debug_dma_mapping_error); void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, int direction, bool map_single) { struct dma_debug_entry ref = { .type = dma_debug_page, .dev = dev, .dev_addr = addr, .size = size, .direction = direction, }; if (unlikely(dma_debug_disabled())) return; if (map_single) ref.type = dma_debug_single; check_unmap(&ref); } EXPORT_SYMBOL(debug_dma_unmap_page); void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int mapped_ents, int direction) { struct dma_debug_entry *entry; struct scatterlist *s; int i; if (unlikely(dma_debug_disabled())) return; for_each_sg(sg, s, mapped_ents, i) { entry = dma_entry_alloc(); if (!entry) return; entry->type = dma_debug_sg; entry->dev = dev; entry->pfn = page_to_pfn(sg_page(s)); entry->offset = s->offset, entry->size = sg_dma_len(s); entry->dev_addr = sg_dma_address(s); entry->direction = direction; entry->sg_call_ents = nents; entry->sg_mapped_ents = mapped_ents; if (!PageHighMem(sg_page(s))) { check_for_stack(dev, sg_virt(s)); check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); } add_dma_entry(entry); } } EXPORT_SYMBOL(debug_dma_map_sg); static int get_nr_mapped_entries(struct device *dev, struct dma_debug_entry *ref) { struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; int mapped_ents; bucket = get_hash_bucket(ref, &flags); entry = bucket_find_exact(bucket, ref); mapped_ents = 0; if (entry) mapped_ents = entry->sg_mapped_ents; put_hash_bucket(bucket, &flags); return mapped_ents; } void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, int dir) { struct scatterlist *s; int mapped_ents = 0, i; if (unlikely(dma_debug_disabled())) return; for_each_sg(sglist, s, nelems, i) { struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, .pfn = page_to_pfn(sg_page(s)), .offset = s->offset, .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = dir, .sg_call_ents = nelems, }; if (mapped_ents && i >= mapped_ents) break; if (!i) mapped_ents = get_nr_mapped_entries(dev, &ref); check_unmap(&ref); } } EXPORT_SYMBOL(debug_dma_unmap_sg); void debug_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t dma_addr, void *virt) { struct dma_debug_entry *entry; if (unlikely(dma_debug_disabled())) return; if (unlikely(virt == NULL)) return; entry = dma_entry_alloc(); if (!entry) return; entry->type = dma_debug_coherent; entry->dev = dev; entry->pfn = page_to_pfn(virt_to_page(virt)); entry->offset = (size_t) virt & PAGE_MASK; entry->size = size; entry->dev_addr = dma_addr; entry->direction = DMA_BIDIRECTIONAL; add_dma_entry(entry); } EXPORT_SYMBOL(debug_dma_alloc_coherent); void debug_dma_free_coherent(struct device *dev, size_t size, void *virt, dma_addr_t addr) { struct dma_debug_entry ref = { .type = dma_debug_coherent, .dev = dev, .pfn = page_to_pfn(virt_to_page(virt)), .offset = (size_t) virt & PAGE_MASK, .dev_addr = addr, .size = size, .direction = DMA_BIDIRECTIONAL, }; if (unlikely(dma_debug_disabled())) return; check_unmap(&ref); } EXPORT_SYMBOL(debug_dma_free_coherent); void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { struct dma_debug_entry ref; if (unlikely(dma_debug_disabled())) return; ref.type = dma_debug_single; ref.dev = dev; ref.dev_addr = dma_handle; ref.size = size; ref.direction = direction; ref.sg_call_ents = 0; check_sync(dev, &ref, true); } EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); void debug_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { struct dma_debug_entry ref; if (unlikely(dma_debug_disabled())) return; ref.type = dma_debug_single; ref.dev = dev; ref.dev_addr = dma_handle; ref.size = size; ref.direction = direction; ref.sg_call_ents = 0; check_sync(dev, &ref, false); } EXPORT_SYMBOL(debug_dma_sync_single_for_device); void debug_dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { struct dma_debug_entry ref; if (unlikely(dma_debug_disabled())) return; ref.type = dma_debug_single; ref.dev = dev; ref.dev_addr = dma_handle; ref.size = offset + size; ref.direction = direction; ref.sg_call_ents = 0; check_sync(dev, &ref, true); } EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); void debug_dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { struct dma_debug_entry ref; if (unlikely(dma_debug_disabled())) return; ref.type = dma_debug_single; ref.dev = dev; ref.dev_addr = dma_handle; ref.size = offset + size; ref.direction = direction; ref.sg_call_ents = 0; check_sync(dev, &ref, false); } EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, int direction) { struct scatterlist *s; int mapped_ents = 0, i; if (unlikely(dma_debug_disabled())) return; for_each_sg(sg, s, nelems, i) { struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, .pfn = page_to_pfn(sg_page(s)), .offset = s->offset, .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = direction, .sg_call_ents = nelems, }; if (!i) mapped_ents = get_nr_mapped_entries(dev, &ref); if (i >= mapped_ents) break; check_sync(dev, &ref, true); } } EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, int direction) { struct scatterlist *s; int mapped_ents = 0, i; if (unlikely(dma_debug_disabled())) return; for_each_sg(sg, s, nelems, i) { struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, .pfn = page_to_pfn(sg_page(s)), .offset = s->offset, .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = direction, .sg_call_ents = nelems, }; if (!i) mapped_ents = get_nr_mapped_entries(dev, &ref); if (i >= mapped_ents) break; check_sync(dev, &ref, false); } } EXPORT_SYMBOL(debug_dma_sync_sg_for_device); static int __init dma_debug_driver_setup(char *str) { int i; for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { current_driver_name[i] = *str; if (*str == 0) break; } if (current_driver_name[0]) pr_info("DMA-API: enable driver filter for driver [%s]\n", current_driver_name); return 1; } __setup("dma_debug_driver=", dma_debug_driver_setup);