/* * net/sched/sch_generic.c Generic packet scheduler routines. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, * Jamal Hadi Salim, 990601 * - Ingress support */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Qdisc to use by default */ const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; EXPORT_SYMBOL(default_qdisc_ops); /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with * qdisc_lock(qdisc) spinlock. * * The idea is the following: * - enqueue, dequeue are serialized via qdisc root lock * - ingress filtering is also serialized via qdisc root lock * - updates to tree and tree walking are only done under the rtnl mutex. */ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) { q->gso_skb = skb; q->qstats.requeues++; q->q.qlen++; /* it's still part of the queue */ __netif_schedule(q); return 0; } static void try_bulk_dequeue_skb(struct Qdisc *q, struct sk_buff *skb, const struct netdev_queue *txq, int *packets) { int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; while (bytelimit > 0) { struct sk_buff *nskb = q->dequeue(q); if (!nskb) break; bytelimit -= nskb->len; /* covers GSO len */ skb->next = nskb; skb = nskb; (*packets)++; /* GSO counts as one pkt */ } skb->next = NULL; } /* Note that dequeue_skb can possibly return a SKB list (via skb->next). * A requeued skb (via q->gso_skb) can also be a SKB list. */ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, int *packets) { struct sk_buff *skb = q->gso_skb; const struct netdev_queue *txq = q->dev_queue; *packets = 1; *validate = true; if (unlikely(skb)) { /* check the reason of requeuing without tx lock first */ txq = skb_get_tx_queue(txq->dev, skb); if (!netif_xmit_frozen_or_stopped(txq)) { q->gso_skb = NULL; q->q.qlen--; } else skb = NULL; /* skb in gso_skb were already validated */ *validate = false; } else { if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq)) { skb = q->dequeue(q); if (skb && qdisc_may_bulk(q)) try_bulk_dequeue_skb(q, skb, txq, packets); } } return skb; } static inline int handle_dev_cpu_collision(struct sk_buff *skb, struct netdev_queue *dev_queue, struct Qdisc *q) { int ret; if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { /* * Same CPU holding the lock. It may be a transient * configuration error, when hard_start_xmit() recurses. We * detect it by checking xmit owner and drop the packet when * deadloop is detected. Return OK to try the next skb. */ kfree_skb_list(skb); net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n", dev_queue->dev->name); ret = qdisc_qlen(q); } else { /* * Another cpu is holding lock, requeue & delay xmits for * some time. */ __this_cpu_inc(softnet_data.cpu_collision); ret = dev_requeue_skb(skb, q); } return ret; } /* * Transmit possibly several skbs, and handle the return status as * required. Holding the __QDISC___STATE_RUNNING bit guarantees that * only one CPU can execute this function. * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. */ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, spinlock_t *root_lock, bool validate) { int ret = NETDEV_TX_BUSY; /* And release qdisc */ spin_unlock(root_lock); /* Note that we validate skb (GSO, checksum, ...) outside of locks */ if (validate) skb = validate_xmit_skb_list(skb, dev); if (skb) { HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) skb = dev_hard_start_xmit(skb, dev, txq, &ret); HARD_TX_UNLOCK(dev, txq); } spin_lock(root_lock); if (dev_xmit_complete(ret)) { /* Driver sent out skb successfully or skb was consumed */ ret = qdisc_qlen(q); } else if (ret == NETDEV_TX_LOCKED) { /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, txq, q); } else { /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely(ret != NETDEV_TX_BUSY)) net_warn_ratelimited("BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, q); } if (ret && netif_xmit_frozen_or_stopped(txq)) ret = 0; return ret; } /* * NOTE: Called under qdisc_lock(q) with locally disabled BH. * * __QDISC___STATE_RUNNING guarantees only one CPU can process * this qdisc at a time. qdisc_lock(q) serializes queue accesses for * this queue. * * netif_tx_lock serializes accesses to device driver. * * qdisc_lock(q) and netif_tx_lock are mutually exclusive, * if one is grabbed, another must be free. * * Note, that this procedure can be called by a watchdog timer * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. * */ static inline int qdisc_restart(struct Qdisc *q, int *packets) { struct netdev_queue *txq; struct net_device *dev; spinlock_t *root_lock; struct sk_buff *skb; bool validate; /* Dequeue packet */ skb = dequeue_skb(q, &validate, packets); if (unlikely(!skb)) return 0; root_lock = qdisc_lock(q); dev = qdisc_dev(q); txq = skb_get_tx_queue(dev, skb); return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); } void __qdisc_run(struct Qdisc *q) { int quota = weight_p; int packets; while (qdisc_restart(q, &packets)) { /* * Ordered by possible occurrence: Postpone processing if * 1. we've exceeded packet quota * 2. another process needs the CPU; */ quota -= packets; if (quota <= 0 || need_resched()) { __netif_schedule(q); break; } } qdisc_run_end(q); } unsigned long dev_trans_start(struct net_device *dev) { unsigned long val, res; unsigned int i; if (is_vlan_dev(dev)) dev = vlan_dev_real_dev(dev); res = dev->trans_start; for (i = 0; i < dev->num_tx_queues; i++) { val = n
/*
 * Set up the interrupt priorities
 *
 * Copyright 2005-2009 Analog Devices Inc.
 *
 * Licensed under the GPL-2 or later.
 */

#include <linux/module.h>
#include <linux/irq.h>
#include <asm/blackfin.h>

void __init program_IAR(void)
{
	/* Program the IAR0 Register with the configured priority */
	bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) |
			     ((CONFIG_IRQ_DMA1_ERROR - 7) << IRQ_DMA1_ERROR_POS) |
			     ((CONFIG_IRQ_DMA2_ERROR - 7) << IRQ_DMA2_ERROR_POS) |
			     ((CONFIG_IRQ_IMDMA_ERROR - 7) << IRQ_IMDMA_ERROR_POS) |
			     ((CONFIG_IRQ_PPI0_ERROR - 7) << IRQ_PPI0_ERROR_POS) |
			     ((CONFIG_IRQ_PPI1_ERROR - 7) << IRQ_PPI1_ERROR_POS) |
			     ((CONFIG_IRQ_SPORT0_ERROR - 7) << IRQ_SPORT0_ERROR_POS) |
			     ((CONFIG_IRQ_SPORT1_ERROR - 7) << IRQ_SPORT1_ERROR_POS));

	bfin_write_SIC_IAR1(((CONFIG_IRQ_SPI_ERROR - 7) << IRQ_SPI_ERROR_POS) |
			     ((CONFIG_IRQ_UART_ERROR - 7) << IRQ_UART_ERROR_POS) |
			     ((CONFIG_IRQ_RESERVED_ERROR - 7) << IRQ_RESERVED_ERROR_POS) |
			     ((CONFIG_IRQ_DMA1_0 - 7) << IRQ_DMA1_0_POS) |
			     ((CONFIG_IRQ_DMA1_1 - 7) << IRQ_DMA1_1_POS) |
			     ((CONFIG_IRQ_DMA1_2 - 7) << IRQ_DMA1_2_POS) |
			     ((CONFIG_IRQ_DMA1_3 - 7) << IRQ_DMA1_3_POS) |
			     ((CONFIG_IRQ_DMA1_4 - 7) << IRQ_DMA1_4_POS));

	bfin_write_SIC_IAR2(((CONFIG_IRQ_DMA1_5 - 7) << IRQ_DMA1_5_POS) |
			     ((CONFIG_IRQ_DMA1_6 - 7) << IRQ_DMA1_6_POS) |
			     ((CONFIG_IRQ_DMA1_7 - 7) << IRQ_DMA1_7_POS) |
			     ((CONFIG_IRQ_DMA1_8 - 7) << IRQ_DMA1_8_POS) |
			     ((CONFIG_IRQ_DMA1_9 - 7) << IRQ_DMA1_9_POS) |
			     ((CONFIG_IRQ_DMA1_10 - 7) << IRQ_DMA1_10_POS) |
			     ((CONFIG_IRQ_DMA1_11 - 7) << IRQ_DMA1_11_POS) |
			     ((CONFIG_IRQ_DMA2_0 - 7) << IRQ_DMA2_0_POS));

	bfin_write_SIC_IAR3(((CONFIG_IRQ_DMA2_1 - 7) << IRQ_DMA2_1_POS) |
			     ((CONFIG_IRQ_DMA2_2 - 7) << IRQ_DMA2_2_POS) |
			     ((CONFIG_IRQ_DMA2_3 - 7) << IRQ_DMA2_3_POS) |
			     ((CONFIG_IRQ_DMA2_4 - 7) << IRQ_DMA2_4_POS) |
			     ((CONFIG_IRQ_DMA2_5 - 7) <