summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX/prox_ipv6.c
blob: 90538230c9c52d426debca79be43fcfe3f087885 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
/*
// Copyright (c) 2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
*/

#include "task_base.h"
#include "handle_master.h"
#include "prox_cfg.h"
#include "prox_ipv6.h"

struct ipv6_addr null_addr = {{0}};
char ip6_str[40]; // 8 blocks of 2 bytes (4 char) + 1x ":" between blocks

void set_mcast_mac_from_ipv6(prox_rte_ether_addr *mac, struct ipv6_addr *ipv6_addr)
{
	mac->addr_bytes[0] = 0x33;
	mac->addr_bytes[1] = 0x33;
	memcpy(((uint32_t *)&mac->addr_bytes[2]), (uint32_t *)(&ipv6_addr->bytes[12]), sizeof(uint32_t));
}

// Note that this function is not Mthread safe and would result in garbage if called simultaneously from multiple threads
// This function is however only used for debugging, printing errors...
char *IP6_Canonical(struct ipv6_addr *addr)
{
	uint8_t *a = (uint8_t *)addr;
	char *ptr = ip6_str;
	int field = -1, len = 0, stored_field = 0, stored_len = 0;

	// Find longest run of consecutive 16-bit 0 fields
	for (int i = 0; i < 8; i++) {
		if (((int)a[i * 2] == 0) && ((int)a[i * 2 + 1] == 0)) {
			len++;
			if (field == -1)
				field = i;	// Store where the first 0 field started
		} else {
			if (len > stored_len) {
				// the longest run of consecutive 16-bit 0 fields MUST be shortened
				stored_len = len;
				stored_field = field;
			}
			len = 0;
			field = -1;
		}
	}
	if (len > stored_len) {
		// the longest run of consecutive 16-bit 0 fields MUST be shortened
		stored_len = len;
		stored_field = field;
	}
	if (stored_len <= 1) {
		// The symbol "::" MUST NOT be used to shorten just one 16-bit 0 field.
		stored_len = 0;
		stored_field = -1;
	}
	for (int i = 0; i < 8; i++) {
		if (i == stored_field) {
			sprintf(ptr, ":");
			ptr++;
			if (i == 0) {
				sprintf(ptr, ":");
				ptr++;
			}
			i +=stored_len - 1;	// ++ done in for loop
			continue;
		}
		if ((int)a[i * 2] & 0xF0) {
			sprintf(ptr, "%02x%02x", (int)a[i * 2], (int)a[i * 2 + 1]);
			ptr+=4;
		} else if ((int)a[i * 2] & 0x0F) {
			sprintf(ptr, "%x%02x", (int)a[i * 2] >> 4, (int)a[i * 2] + 1);
			ptr+=3;
		} else if ((int)a[i * 2 + 1] & 0xF0) {
			sprintf(ptr, "%02x", (int)a[i * 2 + 1]);
			ptr+=2;
		} else {
			sprintf(ptr, "%x", ((int)a[i * 2 + 1]) & 0xF);
			ptr++;
		}
		if (i != 7) {
			sprintf(ptr, ":");
			ptr++;
		}
	}
	return ip6_str;
}

void set_link_local(struct ipv6_addr *ipv6_addr)
{
	ipv6_addr->bytes[0] = 0xfe;
	ipv6_addr->bytes[1] = 0x80;
}

// Create Extended Unique Identifier (RFC 2373)
// Store it in LSB of IPv6 address
void set_EUI(struct ipv6_addr *ipv6_addr, prox_rte_ether_addr *mac)
{
	memcpy(&ipv6_addr->bytes[8], mac, 3);						// Copy first 3 bytes of MAC
	ipv6_addr->bytes[8] = ipv6_addr->bytes[8] ^ 0x02; 				// Invert Universal/local bit
	ipv6_addr->bytes[11] = 0xff;							// Next 2 bytes are 0xfffe
	ipv6_addr->bytes[12] = 0xfe;
	memcpy(&ipv6_addr->bytes[13], &mac->addr_bytes[3], 3);				// Copy last 3 bytes
	// plog_info("mac = "MAC_BYTES_FMT", eui = "IPv6_BYTES_FMT"\n", MAC_BYTES(mac->addr_bytes), IPv6_BYTES(ipv6_addr->bytes));
}

void create_mac_from_EUI(struct ipv6_addr *ipv6_addr, prox_rte_ether_addr *mac)
{
	memcpy(mac, &ipv6_addr->bytes[8], 3);
	mac->addr_bytes[0] = mac->addr_bytes[0] ^ 0x02;
	memcpy(&mac->addr_bytes[3], &ipv6_addr->bytes[13], 3);
}

static inline prox_rte_ipv6_hdr *prox_set_vlan_ipv6(prox_rte_ether_hdr *peth, uint16_t vlan)
{
	prox_rte_ipv6_hdr *ipv6_hdr;

	if (vlan) {
		prox_rte_vlan_hdr *vlan_hdr = (prox_rte_vlan_hdr *)(peth + 1);
		ipv6_hdr = (prox_rte_ipv6_hdr *)(vlan_hdr + 1);
		peth->ether_type = ETYPE_VLAN;
		vlan_hdr->eth_proto = ETYPE_IPv6;
		vlan_hdr->vlan_tci = rte_cpu_to_be_16(vlan);
	} else {
		ipv6_hdr = (prox_rte_ipv6_hdr *)(peth + 1);
		peth->ether_type = ETYPE_IPv6;
	}
	return ipv6_hdr;
}

void build_router_advertisement(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, struct ipv6_addr *router_prefix, uint16_t vlan)
{
	prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
	init_mbuf_seg(mbuf);
	mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);  // Software calculates the checksum

	memcpy(peth->d_addr.addr_bytes, &prox_cfg.all_nodes_mac_addr, sizeof(prox_rte_ether_addr));
	memcpy(peth->s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr));

	prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);
	ipv6_hdr->vtc_flow = 0x00000060;
	ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_RA) + sizeof(struct icmpv6_prefix_option));
	ipv6_hdr->proto = ICMPv6;
	ipv6_hdr->hop_limits = 255;
	memcpy(ipv6_hdr->src_addr, ipv6_s_addr, sizeof(struct ipv6_addr));	// 0 = "Unspecified address" if unknown
	memcpy(ipv6_hdr->dst_addr, &prox_cfg.all_nodes_ipv6_mcast_addr, sizeof(struct ipv6_addr));

	struct icmpv6_RA *router_advertisement = (struct icmpv6_RA *)(ipv6_hdr + 1);
	router_advertisement->type = ICMPv6_RA;
	router_advertisement->code = 0;
	router_advertisement->hop_limit = 255;
	router_advertisement->bits = 0;	// M and O bits set to 0 => no dhcpv6
	router_advertisement->router_lifespan = rte_cpu_to_be_16(9000);		// 9000 sec
	router_advertisement->reachable_timeout = rte_cpu_to_be_32(30000);	// 1 sec
	router_advertisement->retrans_timeout = rte_cpu_to_be_32(1000);       // 30 sec

	struct icmpv6_option *option = &router_advertisement->options;
	option->type = ICMPv6_source_link_layer_address;
	option->length = 1;	// 8 bytes
	memcpy(&option->data, s_addr, sizeof(prox_rte_ether_addr));

	struct icmpv6_prefix_option *prefix_option = (struct icmpv6_prefix_option *)(option + 1);
	prefix_option->type = ICMPv6_prefix_information;
	prefix_option->length = 4;		// 32 bytes
	prefix_option->prefix_length = 64;	// 64 bits in prefix
	prefix_option->flag = 0xc0;		// on-link flag & autonamous address-configuration flag are set
	prefix_option->valid_lifetime = rte_cpu_to_be_32(86400);	// 1 day
	prefix_option->preferred_lifetime = rte_cpu_to_be_32(43200);	// 12 hours
	prefix_option->reserved = 0;
	memcpy(&prefix_option->prefix, router_prefix, sizeof(struct ipv6_addr));
	// Could Add MTU Option
	router_advertisement->checksum = 0;
	router_advertisement->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, router_advertisement);

	uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
	rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
	rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
}

void build_router_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, uint16_t vlan)
{
	prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);

	init_mbuf_seg(mbuf);
	mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);  // Software calculates the checksum

	memcpy(peth->d_addr.addr_bytes, &prox_cfg.all_routers_mac_addr, sizeof(prox_rte_ether_addr));
	memcpy(peth->s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr));

	prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);
	ipv6_hdr->vtc_flow = 0x00000060;
	ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_RS));
	ipv6_hdr->proto = ICMPv6;
	ipv6_hdr->hop_limits = 255;
	memcpy(ipv6_hdr->src_addr, ipv6_s_addr, sizeof(struct ipv6_addr));	// 0 = "Unspecified address" if unknown
	memcpy(ipv6_hdr->dst_addr, &prox_cfg.all_routers_ipv6_mcast_addr, sizeof(struct ipv6_addr));

	struct icmpv6_RS *router_sollicitation = (struct icmpv6_RS *)(ipv6_hdr + 1);
	router_sollicitation->type = ICMPv6_RS;
	router_sollicitation->code = 0;
	router_sollicitation->options.type = ICMPv6_source_link_layer_address;
	router_sollicitation->options.length = 1;	// 8 bytes
	memcpy(&router_sollicitation->options.data, s_addr, sizeof(prox_rte_ether_addr));

	router_sollicitation->checksum = 0;
	router_sollicitation->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, router_sollicitation);
	uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
	rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
	rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
}

void build_neighbour_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *dst, struct ipv6_addr *src, uint16_t vlan)
{
	prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
	prox_rte_ether_addr mac_dst;
	set_mcast_mac_from_ipv6(&mac_dst, dst);

	init_mbuf_seg(mbuf);
	mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);  // Software calculates the checksum

	memcpy(peth->d_addr.addr_bytes, &mac_dst, sizeof(prox_rte_ether_addr));
	memcpy(peth->s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr));

	prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);

	ipv6_hdr->vtc_flow = 0x00000060;
	ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_NS));
	ipv6_hdr->proto = ICMPv6;
	ipv6_hdr->hop_limits = 255;
	memcpy(ipv6_hdr->src_addr, src, 16);
	memcpy(ipv6_hdr->dst_addr, dst, 16);

	struct icmpv6_NS *neighbour_sollicitation = (struct icmpv6_NS *)(ipv6_hdr + 1);
	neighbour_sollicitation->type = ICMPv6_NS;
	neighbour_sollicitation->code = 0;
	neighbour_sollicitation->reserved = 0;
	memcpy(&neighbour_sollicitation->target_address, dst, sizeof(struct ipv6_addr));
	neighbour_sollicitation->options.type = ICMPv6_source_link_layer_address;
	neighbour_sollicitation->options.length = 1;	// 8 bytes
	memcpy(&neighbour_sollicitation->options.data, s_addr, sizeof(prox_rte_ether_addr));
	neighbour_sollicitation->checksum = 0;
	neighbour_sollicitation->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, neighbour_sollicitation);

	uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
	rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
	rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
}

void build_neighbour_advertisement(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ether_addr *target, struct ipv6_addr *src_ipv6_addr, int sollicited, uint16_t vlan)
{
	struct task_master *task = (struct task_master *)tbase;
	prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);

	uint8_t port_id = get_port(mbuf);

	init_mbuf_seg(mbuf);
	mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);  // Software calculates the checksum

	prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);

	// If source mac is null, use all_nodes_mac_addr.
	if ((!sollicited) || (memcmp(peth->s_addr.addr_bytes, &null_addr, sizeof(struct ipv6_addr)) == 0)) {
		memcpy(peth->d_addr.addr_bytes, &prox_cfg.all_nodes_mac_addr, sizeof(prox_rte_ether_addr));
		memcpy(ipv6_hdr->dst_addr, &prox_cfg.all_nodes_ipv6_mcast_addr, sizeof(struct ipv6_addr));
	} else {
		memcpy(peth->d_addr.addr_bytes, peth->s_addr.addr_bytes, sizeof(prox_rte_ether_addr));
		memcpy(ipv6_hdr->dst_addr, ipv6_hdr->src_addr, sizeof(struct ipv6_addr));
	}

	memcpy(peth->s_addr.addr_bytes, &task->internal_port_table[port_id].mac, sizeof(prox_rte_ether_addr));

	ipv6_hdr->vtc_flow = 0x00000060;
	ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_NA));
	ipv6_hdr->proto = ICMPv6;
	ipv6_hdr->hop_limits = 255;
	memcpy(ipv6_hdr->src_addr, src_ipv6_addr, sizeof(struct ipv6_addr));

	struct icmpv6_NA *neighbour_advertisement = (struct icmpv6_NA *)(ipv6_hdr + 1);
	neighbour_advertisement->type = ICMPv6_NA;
	neighbour_advertisement->code = 0;
	neighbour_advertisement->reserved = 0;
	if (task->internal_port_table[port_id].flags & IPV6_ROUTER)
		neighbour_advertisement->bits = 0xC0; // R+S bit set
	else
		neighbour_advertisement->bits = 0x40; // S bit set
	if (!sollicited) {
		memcpy(&neighbour_advertisement->destination_address, src_ipv6_addr, sizeof(struct ipv6_addr));
		neighbour_advertisement->bits &= 0xBF; // Clear S bit
		neighbour_advertisement->bits |= 0x20; // Overide bit
	}
	// else neighbour_advertisement->destination_address is already set to neighbour_sollicitation->target_address

	struct icmpv6_option *option = &neighbour_advertisement->options;
	// Do not think this is necessary
	// option->type = ICMPv6_source_link_layer_address;
	// option->length = 1;	// 8 bytes
	// memcpy(&option->data, &task->internal_port_table[port_id].mac, sizeof(prox_rte_ether_addr));

	// option = option + 1;
	option->type = ICMPv6_target_link_layer_address;
	option->length = 1;	// 8 bytes
	memcpy(&option->data, target, sizeof(prox_rte_ether_addr));

	neighbour_advertisement->checksum = 0;
	neighbour_advertisement->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, neighbour_advertisement);
	uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
	rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
	rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
}

prox_rte_ipv6_hdr *prox_get_ipv6_hdr(prox_rte_ether_hdr *hdr, uint16_t len, uint16_t *vlan)
{
	prox_rte_vlan_hdr *vlan_hdr;
	prox_rte_ipv6_hdr *ipv6_hdr;
	uint16_t ether_type = hdr->ether_type;
	uint16_t l2_len = sizeof(prox_rte_ether_hdr);
	ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr + 1);

	while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(prox_rte_vlan_hdr) < len)) {
		vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)hdr + l2_len);
		l2_len +=4;
		ether_type = vlan_hdr->eth_proto;
		*vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F);
		ipv6_hdr = (prox_rte_ipv6_hdr *)(vlan_hdr + 1);
	}
	if (ether_type == ETYPE_IPv6)
		return ipv6_hdr;
	else
		return NULL;
}
cur_mbufs_end - task->cur_mbufs_beg; struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg; j = task->cancelled; if (task->cancelled) { uint16_t pkt_len = mbuf_wire_size(mbufs[0]); if (token_time_take(&task->token_time, pkt_len) != 0) return -1; out[0] = task->out_saved; task->cancelled = 0; } /* Main proc loop */ for (; j < n_pkts; ++j) { if (parse_pkt(mbufs[j], &pkt_tuple, &l4_meta)) { plogdx_err(mbufs[j], "Unknown packet, parsing failed\n"); out[j] = OUT_DISCARD; } conn = NULL; ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple); if (ret >= 0) conn = task->bundle_ctx_pool.hash_entries[ret]; else { /* If not part of existing connection, try to create a connection */ struct new_tuple nt; nt.dst_addr = pkt_tuple.dst_addr; nt.proto_id = pkt_tuple.proto_id; nt.dst_port = pkt_tuple.dst_port; rte_memcpy(nt.l2_types, pkt_tuple.l2_types, sizeof(nt.l2_types)); const struct bundle_cfg *n; if (NULL != (n = server_accept(task, &nt))) { conn = bundle_ctx_pool_get(&task->bundle_ctx_pool); if (!conn) { out[j] = OUT_DISCARD; plogx_err("No more free bundles to accept new connection\n"); continue; } ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple); if (ret < 0) { out[j] = OUT_DISCARD; bundle_ctx_pool_put(&task->bundle_ctx_pool, conn); plog_err("Adding key failed while trying to accept connection\n"); continue; } task->bundle_ctx_pool.hash_entries[ret] = conn; bundle_init_w_cfg(conn, n, task->heap, PEER_SERVER, &task->seed); conn->tuple = pkt_tuple; if (conn->ctx.stream_cfg->proto == IPPROTO_TCP) task->l4_stats.tcp_created++; else task->l4_stats.udp_created++; } else { plog_err("Packet received for service that does not exist :\n" "source ip = %0x:%u\n" "dst ip = %0x:%u\n", pkt_tuple.src_addr, rte_bswap16(pkt_tuple.src_port), pkt_tuple.dst_addr, rte_bswap16(pkt_tuple.dst_port)); } } /* bundle contains either an active connection or a newly created connection. If it is NULL, then not listening. */ if (NULL != conn) { ret = bundle_proc_data(conn, mbufs[j], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[j] = ret == 0? 0: OUT_HANDLED; if (ret == 0) { uint16_t pkt_len = mbuf_wire_size(mbufs[j]); if (token_time_take(&task->token_time, pkt_len) != 0) { task->out_saved = out[j]; task->cancelled = 1; task->base.tx_pkt(&task->base, mbufs, j, out); task->cur_mbufs_beg += j; return -1; } } } else { pkt_tuple_debug(&pkt_tuple); plogd_dbg(mbufs[j], NULL); out[j] = OUT_DISCARD; } } task->base.tx_pkt(&task->base, mbufs, j, out); task->cur_mbufs_beg += j; return 0; } static int handle_gen_scheduled(struct task_gen_server *task) { struct bundle_ctx *conn; uint8_t out[MAX_PKT_BURST]; int ret; uint16_t n_called_back = 0; if (task->cancelled) { struct rte_mbuf *mbuf = task->mbuf_saved; uint16_t pkt_len = mbuf_wire_size(mbuf); if (token_time_take(&task->token_time, pkt_len) == 0) { task->cancelled = 0; out[0] = 0; task->base.tx_pkt(&task->base, &mbuf, 1, out); } else { return -1; } } if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs)) return -1; conn = NULL; while (heap_top_is_lower(task->heap, rte_rdtsc()) && n_called_back < task->n_new_mbufs) { conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); /* handle packet TX (retransmit or delayed transmit) */ ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); if (ret == 0) { struct rte_mbuf *mbuf = task->new_mbufs[n_called_back]; uint16_t pkt_len = mbuf_wire_size(mbuf); if (token_time_take(&task->token_time, pkt_len) == 0) { out[n_called_back] = 0; n_called_back++; } else { prox_rte_ether_hdr *eth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr*)(eth + 1); prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr*)(ip + 1); task->out_saved = 0; task->cancelled = 1; task->mbuf_saved = mbuf; task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out); /* The mbuf that is currently been processed (and which has been cancelled) is saved in task->mbuf_saved. It will be restored as the first mbuf when this function is called again. */ task->n_new_mbufs -= (n_called_back + 1); return -1; } } } task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out); task->n_new_mbufs -= n_called_back; return 0; } static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) { struct task_gen_server *task = (struct task_gen_server *)tbase; struct bundle_ctx *conn; int ret, ret2 = 0; token_time_update(&task->token_time, rte_rdtsc()); if ((ret = fqueue_put(task->fqueue, mbufs, n_pkts)) != n_pkts) { uint8_t out[MAX_PKT_BURST]; for (uint16_t j = 0; j < n_pkts - ret; ++j) out[j] = OUT_DISCARD; ret2 = task->base.tx_pkt(&task->base, mbufs + ret, n_pkts - ret, out); } if (task->handle_state == HANDLE_QUEUED) { if (handle_gen_queued(task) == 0) { if (handle_gen_scheduled(task) != 0) task->handle_state = HANDLE_SCHEDULED; } } else { if (handle_gen_scheduled(task) == 0) { if (handle_gen_queued(task) != 0) task->handle_state = HANDLE_QUEUED; } } return ret2; } static int lua_to_host_set(struct lua_State *L, enum lua_place from, const char *name, struct host_set *h) { int pop; if ((pop = lua_getfrom(L, from, name)) < 0) return -1; if (!lua_istable(L, -1)) return -1; uint32_t port = 0, port_mask = 0; if (lua_to_ip(L, TABLE, "ip", &h->ip) || lua_to_int(L, TABLE, "port", &port)) return -1; if (lua_to_int(L, TABLE, "ip_mask", &h->ip_mask)) h->ip_mask = 0; if (lua_to_int(L, TABLE, "port_mask", &port_mask)) h->port_mask = 0; h->port = rte_bswap16(port); h->port_mask = rte_bswap16(port_mask); h->ip = rte_bswap32(h->ip); h->ip_mask = rte_bswap32(h->ip_mask); lua_pop(L, pop); return 0; } static int file_read_cached(const char *file_name, uint8_t **mem, uint32_t beg, uint32_t len, uint32_t socket, struct hash_set *hs) { if (len == 0) { *mem = 0; return 0; } uint8_t *data_mem; /* Since the configuration can reference the same file from multiple places, use prox_shared infrastructure to detect this and return previously loaded data. */ char name[256]; snprintf(name, sizeof(name), "%u-%u:%s", beg, len, file_name); *mem = prox_sh_find_socket(socket, name); if (*mem) return 0; /* check if the file has been loaded on the other socket. */ if (socket == 1 && (data_mem = prox_sh_find_socket(0, name))) { uint8_t *data_find = hash_set_find(hs, data_mem, len); if (!data_find) { data_find = prox_zmalloc(len, socket); PROX_PANIC(data_find == NULL, "Failed to allocate memory (%u bytes) to hold header for peer\n", len); rte_memcpy(data_find, data_mem, len); hash_set_add(hs, data_find, len); } *mem = data_find; prox_sh_add_socket(socket, name, *mem); return 0; } /* It is possible that a file with a different name contains the same data. In that case, search all loaded files and compare the data to reduce memory utilization.*/ data_mem = malloc(len); PROX_PANIC(data_mem == NULL, "Failed to allocate temporary memory to hold data\n"); if (file_read_content(file_name, data_mem, beg, len)) { plog_err("%s\n", file_get_error()); return -1; } uint8_t *data_find = hash_set_find(hs, data_mem, len); if (!data_find) { data_find = prox_zmalloc(len, socket); PROX_PANIC(data_find == NULL, "Failed to allocate memory (%u bytes) to hold header for peer\n", len); rte_memcpy(data_find, data_mem, len); hash_set_add(hs, data_find, len); } free(data_mem); *mem = data_find; prox_sh_add_socket(socket, name, *mem); return 0; } static int lua_to_peer_data(struct lua_State *L, enum lua_place from, const char *name, uint32_t socket, struct peer_data *peer_data, size_t *cl, struct hash_set *hs) { uint32_t hdr_len, hdr_beg, content_len, content_beg; char hdr_file[256], content_file[256]; int pop; if ((pop = lua_getfrom(L, from, name)) < 0) return -1; if (!lua_istable(L, -1)) return -1; if (lua_getfrom(L, TABLE, "header") < 0) return -1; if (lua_to_int(L, TABLE, "len", &hdr_len) < 0) return -1; if (lua_to_int(L, TABLE, "beg", &hdr_beg) < 0) return -1; if (lua_to_string(L, TABLE, "file_name", hdr_file, sizeof(hdr_file)) < 0) return -1; lua_pop(L, 1); if (lua_getfrom(L, TABLE, "content") < 0) return -1; if (lua_to_int(L, TABLE, "len", &content_len) < 0) return -1; if (lua_to_int(L, TABLE, "beg", &content_beg) < 0) return -1; if (lua_to_string(L, TABLE, "file_name", content_file, sizeof(content_file)) < 0) return -1; lua_pop(L, 1); if (hdr_len == UINT32_MAX) { long ret = file_get_size(hdr_file); if (ret < 0) { plog_err("%s", file_get_error()); return -1; } hdr_len = ret - hdr_beg; } if (content_len == UINT32_MAX) { long ret = file_get_size(content_file); if (ret < 0) { plog_err("%s", file_get_error()); return -1; } content_len = ret - content_beg; } *cl = content_len; peer_data->hdr_len = hdr_len; if (file_read_cached(hdr_file, &peer_data->hdr, hdr_beg, hdr_len, socket, hs)) return -1; if (file_read_cached(content_file, &peer_data->content, content_beg, content_len, socket, hs)) return -1; lua_pop(L, pop); return 0; } static int lua_to_peer_action(struct lua_State *L, enum lua_place from, const char *name, struct peer_action *action, size_t client_contents_len, size_t server_contents_len) { int pop; if ((pop = lua_getfrom(L, from, name)) < 0) return -1; if (!lua_istable(L, -1)) return -1; uint32_t peer, beg, len; if (lua_to_int(L, TABLE, "peer", &peer) || lua_to_int(L, TABLE, "beg", &beg) || lua_to_int(L, TABLE, "len", &len)) { return -1; } size_t data_len = (peer == PEER_CLIENT? client_contents_len : server_contents_len); if (len == (uint32_t)-1) len = data_len - beg; PROX_PANIC(beg + len > data_len, "Accessing data past the end (starting at %u for %u bytes) while total length is %zu\n", beg, len, data_len); action->peer = peer; action->beg = beg; action->len = len; lua_pop(L, pop); return 0; } static int lua_to_stream_cfg(struct lua_State *L, enum lua_place from, const char *name, uint32_t socket, struct stream_cfg **stream_cfg, struct hash_set *hs) { int pop; struct stream_cfg *ret; if ((pop = lua_getfrom(L, from, name)) < 0) return -1; if (lua_getfrom(L, TABLE, "actions") < 0) return -1; lua_len(prox_lua(), -1); uint32_t n_actions = lua_tointeger(prox_lua(), -1); lua_pop(prox_lua(), 1); lua_pop(L, 1); size_t mem_size = 0; mem_size += sizeof(*ret); /* one additional action is allocated to allow inserting an additional "default" action to close down TCP sessions from the client side. */ mem_size += sizeof(ret->actions[0]) * (n_actions + 1); ret = prox_zmalloc(sizeof(*ret) + mem_size, socket); ret->n_actions = n_actions; size_t client_contents_len, server_contents_len; char proto[16]; uint32_t timeout_us, timeout_time_wait_us; plogx_dbg("loading stream\n"); if (lua_to_host_set(L, TABLE, "servers", &ret->servers)) return -1; if (lua_to_string(L, TABLE, "l4_proto", proto, sizeof(proto))) return -1; if (lua_to_peer_data(L, TABLE, "client_data", socket, &ret->data[PEER_CLIENT], &client_contents_len, hs)) return -1; if (lua_to_peer_data(L, TABLE, "server_data", socket, &ret->data[PEER_SERVER], &server_contents_len, hs)) return -1; if (lua_to_int(L, TABLE, "timeout", &timeout_us)) { timeout_us = 1000000; } ret->tsc_timeout = usec_to_tsc(timeout_us); double up, dn; if (lua_to_double(L, TABLE, "up_bps", &up)) up = 5000;// Default rate is 40 Mbps if (lua_to_double(L, TABLE, "dn_bps", &dn)) dn = 5000;// Default rate is 40 Mbps const uint64_t hz = rte_get_tsc_hz(); ret->tt_cfg[PEER_CLIENT] = token_time_cfg_create(up, hz, PROX_RTE_ETHER_MAX_LEN + 20); ret->tt_cfg[PEER_SERVER] = token_time_cfg_create(dn, hz, PROX_RTE_ETHER_MAX_LEN + 20); if (!strcmp(proto, "tcp")) { ret->proto = IPPROTO_TCP; ret->proc = stream_tcp_proc; ret->is_ended = stream_tcp_is_ended; if (lua_to_int(L, TABLE, "timeout_time_wait", &timeout_time_wait_us)) { timeout_time_wait_us = 2000000; } ret->tsc_timeout_time_wait = usec_to_tsc(timeout_time_wait_us); } else if (!strcmp(proto, "udp")) { plogx_dbg("loading UDP\n"); ret->proto = IPPROTO_UDP; ret->proc = stream_udp_proc; ret->is_ended = stream_udp_is_ended; } else return -1; /* get all actions */ if (lua_getfrom(L, TABLE, "actions") < 0) return -1; uint32_t idx = 0; lua_pushnil(L); while (lua_next(L, -2)) { if (lua_to_peer_action(L, STACK, NULL, &ret->actions[idx], client_contents_len, server_contents_len)) return -1; stream_cfg_verify_action(ret, &ret->actions[idx]); idx++; lua_pop(L, 1); } lua_pop(L, 1); /* For TCP, one of the peers initiates closing down the connection. This is signified by the last action having with zero length. If such an action is not specified in the configuration file, the default is for the client to close the connection. This means that the TCP connection at the client will go into a TIME_WAIT state and the server releases all the resources avoiding resource starvation at the server. */ if (ret->proto == IPPROTO_TCP && ret->actions[ret->n_actions - 1].len != 0) { ret->actions[ret->n_actions].len = 0; ret->actions[ret->n_actions].beg = 0; ret->actions[ret->n_actions].peer = PEER_CLIENT; ret->n_actions++; } if (IPPROTO_TCP == ret->proto) stream_tcp_calc_len(ret, &ret->n_pkts, &ret->n_bytes); else stream_udp_calc_len(ret, &ret->n_pkts, &ret->n_bytes); lua_pop(L, pop); *stream_cfg = ret; return 0; } static int lua_to_bundle_cfg(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct bundle_cfg *bundle, struct hash_set *hs) { int pop, pop2, idx; int clients_loaded = 0; if ((pop = lua_getfrom(L, from, name)) < 0) return -1; if (!lua_istable(L, -1)) return -1; lua_len(prox_lua(), -1); bundle->n_stream_cfgs = lua_tointeger(prox_lua(), -1); lua_pop(prox_lua(), 1); bundle->stream_cfgs = prox_zmalloc(sizeof(*bundle->stream_cfgs) * bundle->n_stream_cfgs, socket); plogx_dbg("loading bundle cfg with %d streams\n", bundle->n_stream_cfgs); idx = 0; lua_pushnil(L); while (lua_next(L, -2)) { if (!clients_loaded) { if (lua_to_host_set(L, TABLE, "clients", &bundle->clients)) { return -1; } clients_loaded = 1; } if (lua_to_stream_cfg(L, STACK, NULL, socket, &bundle->stream_cfgs[idx], hs)) { return -1; } ++idx; lua_pop(L, 1); } lua_pop(L, pop); return 0; } static void init_task_gen(struct task_base *tbase, struct task_args *targ) { struct task_gen_server *task = (struct task_gen_server *)tbase; const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); static char name[] = "server_mempool"; name[0]++; task->mempool = rte_mempool_create(name, 4*1024 - 1, TX_MBUF_SIZE, targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0, socket_id, 0); PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1); int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams); PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams); lua_len(prox_lua(), -1); uint32_t n_listen = lua_tointeger(prox_lua(), -1); lua_pop(prox_lua(), 1); PROX_PANIC(n_listen == 0, "No services specified to listen on\n"); task->bundle_cfgs = prox_zmalloc(n_listen * sizeof(task->bundle_cfgs[0]), socket_id); plogx_info("n_listen = %d\n", n_listen); struct hash_set *hs = prox_sh_find_socket(socket_id, "genl4_streams"); if (hs == NULL) { /* Expected number of streams per bundle = 1, hash_set will grow if full. */ hs = hash_set_create(n_listen, socket_id); prox_sh_add_socket(socket_id, "genl4_streams", hs); } const struct rte_hash_parameters listen_table = { .name = name, .entries = n_listen * 4, .key_len = sizeof(struct new_tuple), .hash_func = rte_hash_crc, .hash_func_init_val = 0, .socket_id = socket_id, }; name[0]++; task->listen_hash = rte_hash_create(&listen_table); task->listen_entries = prox_zmalloc(listen_table.entries * sizeof(task->listen_entries[0]), socket_id); int idx = 0; lua_pushnil(prox_lua()); while (lua_next(prox_lua(), -2)) { task->bundle_cfgs[idx].n_stream_cfgs = 1; task->bundle_cfgs[idx].stream_cfgs = prox_zmalloc(sizeof(*task->bundle_cfgs[idx].stream_cfgs), socket_id); int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0], hs); PROX_PANIC(ret, "Failed to load stream cfg\n"); struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0]; // TODO: check mask and add to hash for each host struct new_tuple nt = { .dst_addr = stream->servers.ip, .proto_id = stream->proto, .dst_port = stream->servers.port, .l2_types[0] = 0x0008, }; ret = rte_hash_add_key(task->listen_hash, &nt); PROX_PANIC(ret < 0, "Failed to add\n"); task->listen_entries[ret] = &task->bundle_cfgs[idx]; plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port)); ++idx; lua_pop(prox_lua(), 1); } static char name2[] = "task_gen_hash2"; name2[0]++; plogx_dbg("Creating bundle ctx pool\n"); if (bundle_ctx_pool_create(name2, targ->n_concur_conn * 2, &task->bundle_ctx_pool, NULL, 0, NULL, socket_id)) { cmd_mem_stats(); PROX_PANIC(1, "Failed to create conn_ctx_pool\n"); } task->heap = heap_create(targ->n_concur_conn * 2, socket_id); task->seed = rte_rdtsc(); /* TODO: calculate the CDF of the reply distribution and the number of replies as the number to cover for 99% of the replies. For now, assume that this is number is 2. */ uint32_t queue_size = rte_align32pow2(targ->n_concur_conn * 2); PROX_PANIC(queue_size == 0, "Overflow resulted in queue size 0\n"); task->fqueue = fqueue_create(queue_size, socket_id); PROX_PANIC(task->fqueue == NULL, "Failed to allocate local queue\n"); uint32_t n_descriptors; if (targ->nb_txports) { PROX_PANIC(targ->nb_txports != 1, "Need exactly one TX port for L4 generation\n"); n_descriptors = prox_port_cfg[targ->tx_port_queue[0].port].n_txd; } else { PROX_PANIC(targ->nb_txrings != 1, "Need exactly one TX ring for L4 generation\n"); n_descriptors = 256; } struct token_time_cfg tt_cfg = { .bpp = targ->rate_bps, .period = rte_get_tsc_hz(), .bytes_max = n_descriptors * (PROX_RTE_ETHER_MIN_LEN + 20), }; token_time_init(&task->token_time, &tt_cfg); } static void init_task_gen_client(struct task_base *tbase, struct task_args *targ) { struct task_gen_client *task = (struct task_gen_client *)tbase; static char name[] = "gen_pool"; const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id); name[0]++; task->mempool = rte_mempool_create(name, 4*1024 - 1, TX_MBUF_SIZE, targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0, socket, 0); PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1); /* streams contains a lua table. Go through it and read each stream with associated imix_fraction. */ uint32_t imix; uint32_t i = 0; int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams); PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams); lua_len(prox_lua(), -1); uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1); lua_pop(prox_lua(), 1); PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n"); plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs); struct hash_set *hs = prox_sh_find_socket(socket, "genl4_streams"); if (hs == NULL) { /* Expected number of streams per bundle = 8, hash_set will grow if full. */ hs = hash_set_create(n_bundle_cfgs * 8, socket); prox_sh_add_socket(socket, "genl4_streams", hs); } task->bundle_cfgs = prox_zmalloc(n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), socket); lua_pushnil(prox_lua()); int total_imix = 0; uint32_t *occur = prox_zmalloc(n_bundle_cfgs * sizeof(*occur), socket); struct cdf *cdf = cdf_create(n_bundle_cfgs, socket); while (lua_next(prox_lua(), -2)) { PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) || lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i], hs), "Failed to load bundle cfg:\n%s\n", get_lua_to_errors()); cdf_add(cdf, imix); occur[i] = imix; total_imix += imix; ++i; lua_pop(prox_lua(), 1); } lua_pop(prox_lua(), pop); cdf_setup(cdf); PROX_PANIC(targ->max_setup_rate == 0, "Max setup rate not set\n"); task->new_conn_cost = rte_get_tsc_hz()/targ->max_setup_rate; static char name2[] = "task_gen_hash"; name2[0]++; plogx_dbg("Creating bundle ctx pool\n"); if (bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, occur, n_bundle_cfgs, task->bundle_cfgs, socket)) { cmd_mem_stats(); PROX_PANIC(1, "Failed to create conn_ctx_pool\n"); } task->heap = heap_create(targ->n_concur_conn, socket); task->seed = rte_rdtsc(); /* task->token_time.bytes_max = MAX_PKT_BURST * (PROX_RTE_ETHER_MAX_LEN + 20); */ /* To avoid overflowing the tx descriptors, the token bucket size needs to be limited. The descriptors are filled most quickly with the smallest packets. For that reason, the token bucket size is given by "number of tx descriptors" * "smallest Ethernet packet". */ PROX_ASSERT(targ->nb_txports == 1); struct token_time_cfg tt_cfg = { .bpp = targ->rate_bps, .period = rte_get_tsc_hz(), .bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (PROX_RTE_ETHER_MIN_LEN + 20), }; token_time_init(&task->token_time, &tt_cfg); } static void start_task_gen_client(struct task_base *tbase) { struct task_gen_client *task = (struct task_gen_client *)tbase; token_time_reset(&task->token_time, rte_rdtsc(), 0); task->new_conn_tokens = 0; task->new_conn_last_tsc = rte_rdtsc(); } static void stop_task_gen_client(struct task_base *tbase) { struct task_gen_client *task = (struct task_gen_client *)tbase; struct bundle_ctx *bundle; while (!heap_is_empty(task->heap)) { bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats); } } static void start_task_gen_server(struct task_base *tbase) { struct task_gen_server *task = (struct task_gen_server *)tbase; token_time_reset(&task->token_time, rte_rdtsc(), 0); } static void stop_task_gen_server(struct task_base *tbase) { struct task_gen_server *task = (struct task_gen_server *)tbase; struct bundle_ctx *bundle; uint8_t out[MAX_PKT_BURST]; while (!heap_is_empty(task->heap)) { bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats); } if (task->cancelled) { struct rte_mbuf *mbuf = task->mbuf_saved; out[0] = OUT_DISCARD; task->cancelled = 0; task->base.tx_pkt(&task->base, &mbuf, 1, out); } do { if (task->cur_mbufs_beg == task->cur_mbufs_end) { task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST); task->cur_mbufs_beg = 0; if (task->cur_mbufs_end == 0) break; } uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg; struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg; if (n_pkts) { for (uint16_t j = 0; j < n_pkts; ++j) { out[j] = OUT_DISCARD; } task->base.tx_pkt(&task->base, mbufs, n_pkts, out); } } while (1); } static struct task_init task_init_gen1 = { .mode_str = "genl4", .sub_mode_str = "server", .init = init_task_gen, .handle = handle_gen_bulk, .start = start_task_gen_server, .stop = stop_task_gen_server, .flag_features = TASK_FEATURE_ZERO_RX, .size = sizeof(struct task_gen_server), }; static struct task_init task_init_gen2 = { .mode_str = "genl4", .init = init_task_gen_client, .handle = handle_gen_bulk_client, .start = start_task_gen_client, .stop = stop_task_gen_client, .flag_features = TASK_FEATURE_ZERO_RX, .size = sizeof(struct task_gen_client), }; __attribute__((constructor)) static void reg_task_gen(void) { reg_task(&task_init_gen1); reg_task(&task_init_gen2); }