summaryrefslogtreecommitdiffstats
path: root/kernel/net/sched/em_canid.c
blob: ddd883ca55b2c1f18bd24500d6af457db5b0cd4a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
/*
 * em_canid.c  Ematch rule to match CAN frames according to their CAN IDs
 *
 *              This program is free software; you can distribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 *
 * Idea:       Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
 * Copyright:  (c) 2011 Czech Technical University in Prague
 *             (c) 2011 Volkswagen Group Research
 * Authors:    Michal Sojka <sojkam1@fel.cvut.cz>
 *             Pavel Pisa <pisa@cmp.felk.cvut.cz>
 *             Rostislav Lisovy <lisovy@gmail.cz>
 * Funded by:  Volkswagen Group Research
 */

#include <linux/slab.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <net/pkt_cls.h>
#include <linux/can.h>

#define EM_CAN_RULES_MAX 500

struct canid_match {
	/* For each SFF CAN ID (11 bit) there is one record in this bitfield */
	DECLARE_BITMAP(match_sff, (1 << CAN_SFF_ID_BITS));

	int rules_count;
	int sff_rules_count;
	int eff_rules_count;

	/*
	 * Raw rules copied from netlink message; Used for sending
	 * information to userspace (when 'tc filter show' is invoked)
	 * AND when matching EFF frames
	 */
	struct can_filter rules_raw[];
};

/**
 * em_canid_get_id() - Extracts Can ID out of the sk_buff structure.
 */
static canid_t em_canid_get_id(struct sk_buff *skb)
{
	/* CAN ID is stored within the data field */
	struct can_frame *cf = (struct can_frame *)skb->data;

	return cf->can_id;
}

static void em_canid_sff_match_add(struct canid_match *cm, u32 can_id,
					u32 can_mask)
{
	int i;

	/*
	 * Limit can_mask and can_id to SFF range to
	 * protect against write after end of array
	 */
	can_mask &= CAN_SFF_MASK;
	can_id &= can_mask;

	/* Single frame */
	if (can_mask == CAN_SFF_MASK) {
		set_bit(can_id, cm->match_sff);
		return;
	}

	/* All frames */
	if (can_mask == 0) {
		bitmap_fill(cm->match_sff, (1 << CAN_SFF_ID_BITS));
		return;
	}

	/*
	 * Individual frame filter.
	 * Add record (set bit to 1) for each ID that
	 * conforms particular rule
	 */
	for (i = 0; i < (1 << CAN_SFF_ID_BITS); i++) {
		if ((i & can_mask) == can_id)
			set_bit(i, cm->match_sff);
	}
}

static inline struct canid_match *em_canid_priv(struct tcf_ematch *m)
{
	return (struct canid_match *)m->data;
}

static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
			 struct tcf_pkt_info *info)
{
	struct canid_match *cm = em_canid_priv(m);
	canid_t can_id;
	int match = 0;
	int i;
	const struct can_filter *lp;

	can_id = em_canid_get_id(skb);

	if (can_id & CAN_EFF_FLAG) {
		for (i = 0, lp = cm->rules_raw;
		     i < cm->eff_rules_count; i++, lp++) {
			if (!(((lp->can_id ^ can_id) & lp->can_mask))) {
				match = 1;
				break;
			}
		}
	} else { /* SFF */
		can_id &= CAN_SFF_MASK;
		match = (test_bit(can_id, cm->match_sff) ? 1 : 0);
	}

	return match;
}

static int em_canid_change(struct net *net, void *data, int len,
			  struct tcf_ematch *m)
{
	struct can_filter *conf = data; /* Array with rules */
	struct canid_match *cm;
	int i;

	if (!len)
		return -EINVAL;

	if (len % sizeof(struct can_filter))
		return -EINVAL;

	if (len > sizeof(struct can_filter) * EM_CAN_RULES_MAX)
		return -EINVAL;

	cm = kzalloc(sizeof(struct canid_match) + len, GFP_KERNEL);
	if (!cm)
		return -ENOMEM;

	cm->rules_count = len / sizeof(struct can_filter);

	/*
	 * We need two for() loops for copying rules into two contiguous
	 * areas in rules_raw to process all eff rules with a simple loop.
	 * NB: The configuration interface supports sff and eff rules.
	 * We do not support filters here that match for the same can_id
	 * provided in a SFF and EFF frame (e.g. 0x123 / 0x80000123).
	 * For this (unusual case) two filters have to be specified. The
	 * SFF/EFF separation is done with the CAN_EFF_FLAG in the can_id.
	 */

	/* Fill rules_raw with EFF rules first */
	for (i = 0; i < cm->rules_count; i++) {
		if (conf[i].can_id & CAN_EFF_FLAG) {
			memcpy(cm->rules_raw + cm->eff_rules_count,
				&conf[i],
				sizeof(struct can_filter));

			cm->eff_rules_count++;
		}
	}

	/* append SFF frame rules */
	for (i = 0; i < cm->rules_count; i++) {
		if (!(conf[i].can_id & CAN_EFF_FLAG)) {
			memcpy(cm->rules_raw
				+ cm->eff_rules_count
				+ cm->sff_rules_count,
				&conf[i], sizeof(struct can_filter));

			cm->sff_rules_count++;

			em_canid_sff_match_add(cm,
				conf[i].can_id, conf[i].can_mask);
		}
	}

	m->datalen = sizeof(struct canid_match) + len;
	m->data = (unsigned long)cm;
	return 0;
}

static void em_canid_destroy(struct tcf_ematch *m)
{
	struct canid_match *cm = em_canid_priv(m);

	kfree(cm);
}

static int em_canid_dump(struct sk_buff *skb, struct tcf_ematch *m)
{
	struct canid_match *cm = em_canid_priv(m);

	/*
	 * When configuring this ematch 'rules_count' is set not to exceed
	 * 'rules_raw' array size
	 */
	if (nla_put_nohdr(skb, sizeof(struct can_filter) * cm->rules_count,
	    &cm->rules_raw) < 0)
		return -EMSGSIZE;

	return 0;
}

static struct tcf_ematch_ops em_canid_ops = {
	.kind	  = TCF_EM_CANID,
	.change	  = em_canid_change,
	.match	  = em_canid_match,
	.destroy  = em_canid_destroy,
	.dump	  = em_canid_dump,
	.owner	  = THIS_MODULE,
	.link	  = LIST_HEAD_INIT(em_canid_ops.link)
};

static int __init init_em_canid(void)
{
	return tcf_em_register(&em_canid_ops);
}

static void __exit exit_em_canid(void)
{
	tcf_em_unregister(&em_canid_ops);
}

MODULE_LICENSE("GPL");

module_init(init_em_canid);
module_exit(exit_em_canid);

MODULE_ALIAS_TCF_EMATCH(TCF_EM_CANID);
ss="n">i; if (cmd->tf_flags & IDE_TFLAG_WRITE) cmd->sg_dma_direction = DMA_TO_DEVICE; else cmd->sg_dma_direction = DMA_FROM_DEVICE; i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction); if (i) { cmd->orig_sg_nents = cmd->sg_nents; cmd->sg_nents = i; } return i; } /** * ide_dma_unmap_sg - clean up DMA mapping * @drive: The drive to unmap * * Teardown mappings after DMA has completed. This must be called * after the completion of each use of ide_build_dmatable and before * the next use of ide_build_dmatable. Failure to do so will cause * an oops as only one mapping can be live for each target at a given * time. */ void ide_dma_unmap_sg(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents, cmd->sg_dma_direction); } EXPORT_SYMBOL_GPL(ide_dma_unmap_sg); /** * ide_dma_off_quietly - Generic DMA kill * @drive: drive to control * * Turn off the current DMA on this IDE controller. */ void ide_dma_off_quietly(ide_drive_t *drive) { drive->dev_flags &= ~IDE_DFLAG_USING_DMA; ide_toggle_bounce(drive, 0); drive->hwif->dma_ops->dma_host_set(drive, 0); } EXPORT_SYMBOL(ide_dma_off_quietly); /** * ide_dma_off - disable DMA on a device * @drive: drive to disable DMA on * * Disable IDE DMA for a device on this IDE controller. * Inform the user that DMA has been disabled. */ void ide_dma_off(ide_drive_t *drive) { printk(KERN_INFO "%s: DMA disabled\n", drive->name); ide_dma_off_quietly(drive); } EXPORT_SYMBOL(ide_dma_off); /** * ide_dma_on - Enable DMA on a device * @drive: drive to enable DMA on * * Enable IDE DMA for a device on this IDE controller. */ void ide_dma_on(ide_drive_t *drive) { drive->dev_flags |= IDE_DFLAG_USING_DMA; ide_toggle_bounce(drive, 1); drive->hwif->dma_ops->dma_host_set(drive, 1); } int __ide_dma_bad_drive(ide_drive_t *drive) { u16 *id = drive->id; int blacklist = ide_in_drive_list(id, drive_blacklist); if (blacklist) { printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n", drive->name, (char *)&id[ATA_ID_PROD]); return blacklist; } return 0; } EXPORT_SYMBOL(__ide_dma_bad_drive); static const u8 xfer_mode_bases[] = { XFER_UDMA_0, XFER_MW_DMA_0, XFER_SW_DMA_0, }; static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) { u16 *id = drive->id; ide_hwif_t *hwif = drive->hwif; const struct ide_port_ops *port_ops = hwif->port_ops; unsigned int mask = 0; switch (base) { case XFER_UDMA_0: if ((id[ATA_ID_FIELD_VALID] & 4) == 0) break; mask = id[ATA_ID_UDMA_MODES]; if (port_ops && port_ops->udma_filter) mask &= port_ops->udma_filter(drive); else mask &= hwif->ultra_mask; /* * avoid false cable warning from eighty_ninty_three() */ if (req_mode > XFER_UDMA_2) { if ((mask & 0x78) && (eighty_ninty_three(drive) == 0)) mask &= 0x07; } break; case XFER_MW_DMA_0: mask = id[ATA_ID_MWDMA_MODES]; /* Also look for the CF specific MWDMA modes... */ if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 0x38)) { u8 mode = ((id[ATA_ID_CFA_MODES] & 0x38) >> 3) - 1; mask |= ((2 << mode) - 1) << 3; } if (port_ops && port_ops->mdma_filter) mask &= port_ops->mdma_filter(drive); else mask &= hwif->mwdma_mask; break; case XFER_SW_DMA_0: mask = id[ATA_ID_SWDMA_MODES]; if (!(mask & ATA_SWDMA2) && (id[ATA_ID_OLD_DMA_MODES] >> 8)) { u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8; /* * if the mode is valid convert it to the mask * (the maximum allowed mode is XFER_SW_DMA_2) */ if (mode <= 2) mask = (2 << mode) - 1; } mask &= hwif->swdma_mask; break; default: BUG(); break; } return mask; } /** * ide_find_dma_mode - compute DMA speed * @drive: IDE device * @req_mode: requested mode * * Checks the drive/host capabilities and finds the speed to use for * the DMA transfer. The speed is then limited by the requested mode. * * Returns 0 if the drive/host combination is incapable of DMA transfers * or if the requested mode is not a DMA mode. */ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) { ide_hwif_t *hwif = drive->hwif; unsigned int mask; int x, i; u8 mode = 0; if (drive->media != ide_disk) { if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) return 0; } for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) { if (req_mode < xfer_mode_bases[i]) continue; mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode); x = fls(mask) - 1; if (x >= 0) { mode = xfer_mode_bases[i] + x; break; } } if (hwif->chipset == ide_acorn && mode == 0) { /* * is this correct? */ if (ide_dma_good_drive(drive) && drive->id[ATA_ID_EIDE_DMA_TIME] < 150) mode = XFER_MW_DMA_1; } mode = min(mode, req_mode); printk(KERN_INFO "%s: %s mode selected\n", drive->name, mode ? ide_xfer_verbose(mode) : "no DMA"); return mode; } static int ide_tune_dma(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 speed; if (ata_id_has_dma(drive->id) == 0 || (drive->dev_flags & IDE_DFLAG_NODMA)) return 0; /* consult the list of known "bad" drives */ if (__ide_dma_bad_drive(drive)) return 0; if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) return config_drive_for_dma(drive); speed = ide_max_dma_mode(drive); if (!speed) return 0; if (ide_set_dma_mode(drive, speed)) return 0; return 1; } static int ide_dma_check(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; if (ide_tune_dma(drive)) return 0; /* TODO: always do PIO fallback */ if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) return -1; ide_set_max_pio(drive); return -1; } int ide_set_dma(ide_drive_t *drive) { int rc; /* * Force DMAing for the beginning of the check. * Some chipsets appear to do interesting * things, if not checked and cleared. * PARANOIA!!! */ ide_dma_off_quietly(drive); rc = ide_dma_check(drive); if (rc) return rc; ide_dma_on(drive); return 0; } void ide_check_dma_crc(ide_drive_t *drive) { u8 mode; ide_dma_off_quietly(drive); drive->crc_count = 0; mode = drive->current_speed; /* * Don't try non Ultra-DMA modes without iCRC's. Force the * device to PIO and make the user enable SWDMA/MWDMA modes. */ if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7) mode--; else mode = XFER_PIO_4; ide_set_xfer_rate(drive, mode); if (drive->current_speed >= XFER_SW_DMA_0) ide_dma_on(drive); } void ide_dma_lost_irq(ide_drive_t *drive) { printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name); } EXPORT_SYMBOL_GPL(ide_dma_lost_irq); /* * un-busy the port etc, and clear any pending DMA status. we want to * retry the current request in pio mode instead of risking tossing it * all away */ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { ide_hwif_t *hwif = drive->hwif; const struct ide_dma_ops *dma_ops = hwif->dma_ops; struct ide_cmd *cmd = &hwif->cmd; ide_startstop_t ret = ide_stopped; /* * end current dma transaction */ if (error < 0) { printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); drive->waiting_for_dma = 0; (void)dma_ops->dma_end(drive); ide_dma_unmap_sg(drive, cmd); ret = ide_error(drive, "dma timeout error", hwif->tp_ops->read_status(hwif)); } else { printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); if (dma_ops->dma_clear) dma_ops->dma_clear(drive); printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); if (dma_ops->dma_test_irq(drive) == 0) { ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif)); drive->waiting_for_dma = 0; (void)dma_ops->dma_end(drive); ide_dma_unmap_sg(drive, cmd); } } /* * disable dma for now, but remember that we did so because of * a timeout -- we'll reenable after we finish this next request * (or rather the first chunk of it) in pio. */ drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY; drive->retry_pio++; ide_dma_off_quietly(drive); /* * make sure request is sane */ if (hwif->rq) hwif->rq->errors = 0; return ret; } void ide_release_dma_engine(ide_hwif_t *hwif) { if (hwif->dmatable_cpu) { int prd_size = hwif->prd_max_nents * hwif->prd_ent_size; dma_free_coherent(hwif->dev, prd_size, hwif->dmatable_cpu, hwif->dmatable_dma); hwif->dmatable_cpu = NULL; } } EXPORT_SYMBOL_GPL(ide_release_dma_engine); int ide_allocate_dma_engine(ide_hwif_t *hwif) { int prd_size; if (hwif->prd_max_nents == 0) hwif->prd_max_nents = PRD_ENTRIES; if (hwif->prd_ent_size == 0) hwif->prd_ent_size = PRD_BYTES; prd_size = hwif->prd_max_nents * hwif->prd_ent_size; hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size, &hwif->dmatable_dma, GFP_ATOMIC); if (hwif->dmatable_cpu == NULL) { printk(KERN_ERR "%s: unable to allocate PRD table\n", hwif->name); return -ENOMEM; } return 0; } EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd) { const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops; if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 || (dma_ops->dma_check && dma_ops->dma_check(drive, cmd))) goto out; ide_map_sg(drive, cmd); if (ide_dma_map_sg(drive, cmd) == 0) goto out_map; if (dma_ops->dma_setup(drive, cmd)) goto out_dma_unmap; drive->waiting_for_dma = 1; return 0; out_dma_unmap: ide_dma_unmap_sg(drive, cmd); out_map: ide_map_sg(drive, cmd); out: return 1; }