/* // Copyright (c) 2010-2017 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. */ #include #include #include #include #include #include "prox_lua.h" #include "prox_lua_types.h" #include "handle_qinq_decap4.h" #include "handle_qinq_encap4.h" #include "stats.h" #include "tx_pkt.h" #include "defines.h" #include "handle_routing.h" #include "prox_assert.h" #include "task_init.h" #include "quit.h" #include "pkt_prototypes.h" #include "task_base.h" #include "task_init.h" #include "bng_pkts.h" #include "prox_cksum.h" #include "expire_cpe.h" #include "prox_port_cfg.h" #include "prefetch.h" #include "prox_cfg.h" #include "lconf.h" #include "prox_cfg.h" #include "prox_shared.h" #include "prox_compat.h" struct task_qinq_decap4 { struct task_base base; struct rte_table_hash *cpe_table; struct rte_table_hash *qinq_gre_table; struct qinq_gre_data *qinq_gre_data; struct next_hop *next_hops; struct rte_lpm *ipv4_lpm; uint32_t local_ipv4; uint16_t qinq_tag; uint8_t runtime_flags; int offload_crc; uint64_t keys[64]; uint64_t src_mac[PROX_MAX_PORTS]; struct rte_mbuf* fake_packets[64]; struct expire_cpe expire_cpe; uint64_t cpe_timeout; uint8_t mapping[PROX_MAX_PORTS]; }; static uint8_t handle_qinq_decap4(struct task_qinq_decap4 *task, struct rte_mbuf *mbuf, struct qinq_gre_data* entry); /* Convert IPv4 packets to GRE and optionally store QinQ Tags */ static void arp_update(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs); static void init_task_qinq_decap4(struct task_base *tbase, struct task_args *targ) { struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase; const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); struct lpm4 *lpm; task->cpe_table = targ->cpe_table; task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms); PROX_PANIC(!strcmp(targ->route_table, ""), "route table not specified\n"); lpm = prox_sh_find_socket(socket_id, targ->route_table); if (!lpm) { int ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm); PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors()); prox_sh_add_socket(socket_id, targ->route_table, lpm); } task->ipv4_lpm = lpm->rte_lpm; task->next_hops = lpm->next_hops; task->qinq_tag = targ->qinq_tag; task->local_ipv4 = targ->local_ipv4; task->runtime_flags = targ->runtime_flags; if (strcmp(targ->task_init->sub_mode_str, "pe")) PROX_PANIC(targ->qinq_gre_table == NULL, "can't set up qinq gre\n"); task->qinq_gre_table = targ->qinq_gre_table; if (targ->cpe_table_timeout_ms) { targ->lconf->period_func = check_expire_cpe; task->expire_cpe.cpe_table = task->cpe_table; targ->lconf->period_data = &task->expire_cpe; targ->lconf->period_timeout = msec_to_tsc(500) / NUM_VCPES; } for (uint32_t i = 0; i < 64; ++i) { task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf)); } if (task->runtime_flags & TASK_ROUTING) { if (targ->nb_txrings) { struct task_args *dtarg; struct core_task ct; for (uint32_t i = 0; i < targ->nb_txrings; ++i) { ct = targ->core_task_set[0].core_task[i]; dtarg = core_targ_get(ct.core, ct.task); dtarg = find_reachable_task_sending_to_port(dtarg); PROX_PANIC(dtarg == NULL, "Error finding destination port through other tasks for outgoing ring %u\n", i); task->src_mac[i] = *(uint64_t*)&prox_port_cfg[dtarg->tx_port_queue[0].port].eth_addr; } } else { for (uint32_t i = 0; i < targ->nb_txports; ++i) { task->src_mac[i] = *(uint64_t*)&prox_port_cfg[targ->tx_port_queue[i].port].eth_addr; } } } if (targ->runtime_flags & TASK_CTRL_HANDLE_ARP) { targ->lconf->ctrl_func_p[targ->task] = arp_update; } /* Copy the mapping from a sibling task which is configured with mode encap4. The mapping is constant, so it is faster to apply it when entries are added (least common case) instead of re-applying it for every packet (most common case). */ for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) { enum task_mode smode = targ->lconf->targs[task_id].mode; if (QINQ_ENCAP4 == smode) { for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) { task->mapping[i] = targ->lconf->targs[task_id].mapping[i]; } } } struct prox_port_cfg
#!/bin/bash

set -ex

wget_opts="-N --tries=1 --connect-timeout=30"

cat << EOF  | wget ${wget_opts} -i - -P ${1:-/home/opnfv/functest/images}
http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img
https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2
http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-lxc.tar.gz
http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-disk.img
http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-initramfs
http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-kernel
https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-arm64-uefi1.img
http://cloud.centos.org/altarch/7/images/aarch64/CentOS-7-aarch64-GenericCloud.qcow2.xz
https://sourceforge.net/projects/ool-opnfv/files/vyos-1.1.7.img
http://marketplace.openbaton.org:8080/api/v1/images/52e2ccc0-1dce-4663-894d-28aab49323aa/img
EOF

xz --decompress --force --keep ${1:-/home/opnfv/functest/images}/CentOS-7-aarch64-GenericCloud.qcow2.xz
el_ip_hdr.src_addr = src_ipv4; packet->tunnel_ip_hdr.dst_addr = task->next_hops[next_hop_index].ip_dst; if (task->runtime_flags & TASK_TX_CRC) { #ifdef MPLS_ROUTING prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc); #else prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); #endif } /* Add GRE Header values */ rte_memcpy(&packet->gre_hdr, &gre_hdr_proto, sizeof(struct gre_hdr)); packet->gre_hdr.gre_id = rte_be_to_cpu_32(gre_id); return port_id; } static void extract_key_data(struct rte_mbuf* mbuf, struct cpe_key* key, struct cpe_data* data, const struct qinq_gre_data* entry, uint64_t cpe_timeout, uint8_t *mapping) { struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt *); uint8_t port_id; #ifndef USE_QINQ const uint32_t tmp = rte_bswap32(packet->ipv4_hdr.src_addr) & 0x00FFFFFF; const uint32_t svlan = rte_bswap16(tmp >> 12); const uint32_t cvlan = rte_bswap16(tmp & 0x0FFF); #endif #ifdef USE_QINQ key->ip = packet->ipv4_hdr.src_addr; #else key->ip = 0; #endif key->gre_id = entry->gre_id; #ifdef USE_QINQ data->mac_port_8bytes = *((const uint64_t *)(&packet->qinq_hdr.s_addr)); data->qinq_svlan = packet->qinq_hdr.svlan.vlan_tci & 0xFF0F; data->qinq_cvlan = packet->qinq_hdr.cvlan.vlan_tci & 0xFF0F; #else data->mac_port_8bytes = *((const uint64_t *)(&packet->ether_hdr.s_addr)); data->qinq_svlan = svlan; data->qinq_cvlan = cvlan; #endif #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) port_id = mbuf->port; #else port_id = mbuf->pkt.in_port; #endif uint8_t mapped = mapping[port_id]; data->mac_port.out_idx = mapped; if (unlikely(mapped == 255)) { /* This error only occurs if the system is configured incorrectly */ plog_warn("Failed adding packet: unknown mapping for port %d", port_id); data->mac_port.out_idx = 0; } else { data->mac_port.out_idx = mapped; } data->user = entry->user; data->tsc = rte_rdtsc() + cpe_timeout; } static uint8_t handle_qinq_decap4(struct task_qinq_decap4 *task, struct rte_mbuf *mbuf, struct qinq_gre_data* entry) { if (!(task->runtime_flags & (TASK_CTRL_HANDLE_ARP|TASK_FP_HANDLE_ARP))) { // We learn CPE MAC addresses on every packets struct cpe_key key; struct cpe_data data; extract_key_data(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping); //plogx_err("Adding key ip=%x/gre_id=%x data (svlan|cvlan)=%x|%x, rss=%x, gre_id=%x\n", key.ip, key.gre_id, data.qinq_svlan,data.qinq_cvlan, mbuf->hash.rss, entry->gre_id); if (add_cpe_entry(task->cpe_table, &key, &data)) { plog_warn("Failed to add ARP entry\n"); return OUT_DISCARD; } } if (task->runtime_flags & TASK_FP_HANDLE_ARP) { // We learn CPE MAC addresses on ARP packets in Fast Path #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) if (mbuf->packet_type == 0xB) { struct cpe_key key; struct cpe_data data; extract_key_data_arp(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping); if (add_cpe_entry(task->cpe_table, &key, &data)) { plog_warn("Failed to add ARP entry\n"); return OUT_DISCARD; } return OUT_HANDLED; } else #endif { #ifdef USE_QINQ struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt*); if (packet->qinq_hdr.svlan.eth_proto == task->qinq_tag && packet->qinq_hdr.ether_type == ETYPE_ARP) { struct cpe_key key; struct cpe_data data; extract_key_data_arp(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping); if (add_cpe_entry(task->cpe_table, &key, &data)) { plog_warn("Failed to add ARP entry\n"); return OUT_DISCARD; } return OUT_HANDLED; } #endif } } if (task->runtime_flags & TASK_ROUTING) { uint8_t tx_portid; tx_portid = gre_encap_route(task->local_ipv4, mbuf, entry->gre_id, task); return tx_portid == ROUTE_ERR? OUT_DISCARD : tx_portid; } else { gre_encap(task, task->local_ipv4, mbuf, entry->gre_id); return 0; } } static void flow_iter_next(struct flow_iter *iter, struct task_args *targ) { do { iter->idx++; } while (iter->idx < (int)get_qinq_gre_map(targ)->count && get_qinq_gre_map(targ)->entries[iter->idx].gre_id % targ->nb_slave_threads != targ->worker_thread_id); } static void flow_iter_beg(struct flow_iter *iter, struct task_args *targ) { iter->idx = -1; flow_iter_next(iter, targ); } static int flow_iter_is_end(struct flow_iter *iter, struct task_args *targ) { return iter->idx == (int)get_qinq_gre_map(targ)->count; } static uint16_t flow_iter_get_svlan(struct flow_iter *iter, struct task_args *targ) { return get_qinq_gre_map(targ)->entries[iter->idx].svlan; } static uint16_t flow_iter_get_cvlan(struct flow_iter *iter, struct task_args *targ) { return get_qinq_gre_map(targ)->entries[iter->idx].cvlan; } static struct task_init task_init_qinq_decapv4_table = { .mode = QINQ_DECAP4, .mode_str = "qinqdecapv4", .early_init = early_init_table, .init = init_task_qinq_decap4, .handle = handle_qinq_decap4_bulk, .flag_features = TASK_FEATURE_ROUTING, .flow_iter = { .beg = flow_iter_beg, .is_end = flow_iter_is_end, .next = flow_iter_next, .get_svlan = flow_iter_get_svlan, .get_cvlan = flow_iter_get_cvlan, }, .size = sizeof(struct task_qinq_decap4) }; __attribute__((constructor)) static void reg_task_qinq_decap4(void) { reg_task(&task_init_qinq_decapv4_table); }