diff options
Diffstat (limited to 'utils/infra_setup')
6 files changed, 734 insertions, 0 deletions
diff --git a/utils/infra_setup/heat_template/vstf_heat_template/bottleneck_vstf.yaml b/utils/infra_setup/heat_template/vstf_heat_template/bottleneck_vstf.yaml new file mode 100644 index 00000000..70bc6b31 --- /dev/null +++ b/utils/infra_setup/heat_template/vstf_heat_template/bottleneck_vstf.yaml @@ -0,0 +1,248 @@ +heat_template_version: 2013-05-23 +description: > + This template is used for creating a new environment on the Openstack Release L , + and the deployment will create three virtual machine on the compute node, one manager + and two agent vm included. Each vm will has a nic on the controlplane switch and two + agent vms will has a additional nic on the dataplane. +parameters: + #nova keypair-list to query available key pair + key_name: + type: string + description: Name of keypair to assign to servers + default: vstf-key + #nova image-list to query available images + image: + type: string + description: Name of image to use for servers + default: bottlenecks-trusty-server + #new addition image for the actual deployment + image_vstf_manager: + type: string + description: Name of image to use for servers + default: vstf-manager + image_vstf_agent: + type: string + description: Name of image to use for servers + default: vstf-agent + #nova flavor-list to query available flavors + flavor: + type: string + description: Flavor to use for servers + default: m1.large + #nova net-list to query available + public_net: + type: string + description: > + ID or name of public network for which floating IP addresses will be allocated + default: net04_ext + + #private controlplane + private_net_name: + type: string + description: Name of private network to be created + default: vstf-private + private_net_cidr: + type: string + description: Private network address (CIDR notation) + default: "10.0.11.0/24" + private_net_gateway: + type: string + description: Private network gateway address + default: "10.0.11.1" + private_net_pool_start: + type: string + description: Start of private network IP address allocation pool + default: "10.0.11.2" + private_net_pool_end: + type: string + description: End of private network IP address allocation pool + default: "10.0.11.199" + + #testing dataplane + testing_net_name: + type: string + description: Name of private network to be created + default: bottlenecks-testing + testing_net_cidr: + type: string + description: Private network address (CIDR notation) + default: "10.0.20.0/24" + testing_net_gateway: + type: string + description: Private network gateway address + default: "10.0.20.1" + testing_net_pool_start: + type: string + description: Start of private network IP address allocation pool + default: "10.0.20.2" + testing_net_pool_end: + type: string + description: End of private network IP address allocation pool + default: "10.0.20.199" + + +resources: + #control plane + private_net: + type: OS::Neutron::Net + properties: + name: { get_param: private_net_name } + private_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: private_net } + cidr: { get_param: private_net_cidr } + gateway_ip: { get_param: private_net_gateway } + allocation_pools: + - start: { get_param: private_net_pool_start } + end: { get_param: private_net_pool_end } + + #dataplane + testing_net: + type: OS::Neutron::Net + properties: + name: { get_param: testing_net_name } + testing_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: testing_net } + cidr: { get_param: testing_net_cidr } + gateway_ip: { get_param: testing_net_gateway } + allocation_pools: + - start: { get_param: testing_net_pool_start } + end: { get_param: testing_net_pool_end } + + #router info + router: + type: OS::Neutron::Router + properties: + external_gateway_info: + network: { get_param: public_net } + router_interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: private_subnet } + + #security_group + server_security_group: + type: OS::Neutron::SecurityGroup + properties: + description: vstf group for servers access. + name: vstf-security-group + rules: [ + {remote_ip_prefix: 0.0.0.0/0, + protocol: tcp, + port_range_min: 1, + port_range_max: 65535}, + {remote_ip_prefix: 0.0.0.0/0, + protocol: udp, + port_range_min: 1, + port_range_max: 65535}, + {remote_ip_prefix: 0.0.0.0/0, + protocol: icmp}] + + #nova server vstf manager definition info + vstf-manager: + type: OS::Nova::Server + properties: + name: vstf-manager + image: { get_param: image_vstf_manager } + flavor: { get_param: flavor } + key_name: { get_param: key_name } + networks: + - port: { get_resource: manager_control_port } + manager_control_port: + type: OS::Neutron::Port + properties: + network_id: { get_resource: private_net } + fixed_ips: + - subnet_id: { get_resource: private_subnet } + security_groups: [{ get_resource: server_security_group }] + manager_control_floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: public_net } + port_id: { get_resource: manager_control_port } + + #nova server vstf target definition info + vstf-target: + type: OS::Nova::Server + properties: + name: vstf-target + image: { get_param: image_vstf_agent } + flavor: { get_param: flavor } + key_name: { get_param: key_name } + networks: + - port: { get_resource: target_control_port } + - port: { get_resource: target_testing_port } + target_control_port: + type: OS::Neutron::Port + properties: + network_id: { get_resource: private_net } + fixed_ips: + - subnet_id: { get_resource: private_subnet } + security_groups: [{ get_resource: server_security_group }] + target_testing_port: + type: OS::Neutron::Port + properties: + network_id: { get_resource: testing_net } + fixed_ips: + - subnet_id: { get_resource: testing_subnet } + security_groups: [{ get_resource: server_security_group }] + target_control_floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: public_net } + port_id: { get_resource: target_control_port } + + #nova server vstf tester definition info + vstf-tester: + type: OS::Nova::Server + properties: + name: vstf-tester + image: { get_param: image_vstf_agent } + flavor: { get_param: flavor } + key_name: { get_param: key_name } + networks: + - port: { get_resource: tester_control_port } + - port: { get_resource: tester_testing_port } + tester_control_port: + type: OS::Neutron::Port + properties: + network_id: { get_resource: private_net } + fixed_ips: + - subnet_id: { get_resource: private_subnet } + security_groups: [{ get_resource: server_security_group }] + tester_testing_port: + type: OS::Neutron::Port + properties: + network_id: { get_resource: testing_net } + fixed_ips: + - subnet_id: { get_resource: testing_subnet } + security_groups: [{ get_resource: server_security_group }] + tester_control_floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: public_net } + port_id: { get_resource: tester_control_port } + +outputs: + manager_control_private_ip: + description: IP address of manager_control in private network + value: { get_attr: [ vstf-manager, first_address ] } + manager_control_public_ip: + description: Floating IP address of manager_control in public network + value: { get_attr: [ manager_control_floating_ip, floating_ip_address ] } + target_control_private_ip: + description: IP address of manager_control in private network + value: { get_attr: [ vstf-target, first_address ] } + target_control_public_ip: + description: Floating IP address of manager_control in public network + value: { get_attr: [ target_control_floating_ip, floating_ip_address ] } + tester_control_private_ip: + description: IP address of manager_control in private network + value: { get_attr: [ vstf-tester, first_address ] } + tester_control_public_ip: + description: Floating IP address of manager_control in public network + value: { get_attr: [ tester_control_floating_ip, floating_ip_address ] } diff --git a/utils/infra_setup/heat_template/vstf_heat_template/launch_vstf.sh b/utils/infra_setup/heat_template/vstf_heat_template/launch_vstf.sh new file mode 100644 index 00000000..4165e3b3 --- /dev/null +++ b/utils/infra_setup/heat_template/vstf_heat_template/launch_vstf.sh @@ -0,0 +1,114 @@ +#!/bin/bash + +set -x + +STACK_NAME="vstf" +VM_MANAGER_USER="root" +VM_MANAGER_PASSWD="root" +VM_TARGET_USER="root" +VM_TARGET_PASSWD="root" +VM_TESTER_USER="root" +VM_TESTER_PASSWD="root" +RABBITMQ_PORT="5672" + +#load func +source ./ssh.sh +source ./scp.sh + +function fn_parser_ipaddress(){ + #parser and get output ipaddress + manager_control_private_ip=`heat output-show ${STACK_NAME} manager_control_private_ip | sed 's/\"//g'` + manager_control_public_ip=`heat output-show ${STACK_NAME} manager_control_public_ip | sed 's/\"//g'` + echo "manager_control_private_ip = ${manager_control_private_ip}" + ping -c 5 ${manager_control_private_ip} + echo "manager_control_public_ip = ${manager_control_public_ip}" + ping -c 5 ${manager_control_public_ip} + target_control_private_ip=`heat output-show ${STACK_NAME} target_control_private_ip | sed 's/\"//g'` + target_control_public_ip=`heat output-show ${STACK_NAME} target_control_public_ip | sed 's/\"//g'` + echo "target_control_private_ip = ${target_control_private_ip}" + ping -c 5 ${target_control_private_ip} + echo "target_control_public_ip = ${target_control_public_ip}" + ping -c 5 ${target_control_public_ip} + tester_control_private_ip=`heat output-show ${STACK_NAME} tester_control_private_ip | sed 's/\"//g'` + tester_control_public_ip=`heat output-show ${STACK_NAME} tester_control_public_ip | sed 's/\"//g'` + echo "tester_control_private_ip = ${tester_control_private_ip}" + ping -c 5 ${tester_control_private_ip} + echo "tester_control_public_ip = ${tester_control_public_ip}" + ping -c 5 ${tester_control_public_ip} + + local ipaddr="" + for ipaddr in ${manager_control_private_ip} ${manager_control_public_ip} ${target_control_private_ip} \ + ${target_control_public_ip} ${tester_control_private_ip} ${tester_control_public_ip} + do + if [ "${ipaddr}x" == "x" ] + then + echo "[ERROR]The ipaddress is null ,get ip from heat output failed" + exit 1 + fi + done + + return 0 +} + +function fn_generate_amqp(){ + local node_type=$1 + if [ "${node_type}" == "manager" ] + then + return 0 + elif [ "${node_type}" == "target" -o "${node_type}" == "tester" ] + then + echo "[rabbit]" > ./vstf-${node_type}.ini + echo "user=guest" >> ./vstf-${node_type}.ini + echo "passwd=guest" >> ./vstf-${node_type}.ini + echo "host=${manager_control_private_ip}" >> ./vstf-${node_type}.ini + echo "port=${RABBITMQ_PORT}" >> ./vstf-${node_type}.ini + echo "id=\"${node_type}\"" >> ./vstf-${node_type}.ini + else + echo "[ERROR]node type ${node_type} does not exist" + exit 1 + fi + return 0 +} + +function fn_provision_agent_file(){ + + apt-get -y install expect + #manager + fn_generate_amqp "manager" + + #target + fn_generate_amqp "target" + #scp_cmd ${target_control_public_ip} ${VM_TARGET_USER} ${VM_TARGET_PASSWD} "./vstf-target.ini" "/etc/vstf/amqp/amqp.ini" "file" + + #tester + fn_generate_amqp "tester" + #scp_cmd ${tester_control_public_ip} ${VM_TESTER_USER} ${VM_TESTER_PASSWD} "./vstf-tester.ini" "/etc/vstf/amqp/amqp.ini" "file" + + return 0 +} + +function fn_launch_vstf_process(){ + + #launch manager + local manager_cmd="vstf-manager stop;pkill vstf-manager;rm -rf /opt/vstf/vstf-server.pid;vstf-manager start --monitor ${manager_control_private_ip} --port ${RABBITMQ_PORT}" + run_cmd ${manager_control_public_ip} ${VM_MANAGER_USER} ${VM_MANAGER_PASSWD} "${manager_cmd}" + + #launch target agent + local target_cmd="vstf-agent stop;pkill vstf-agent;rm -rf /tmp/esp_rpc_client.pid;vstf-agent start --config_file=/etc/vstf/amqp/amqp.ini" + run_cmd ${target_control_public_ip} ${VM_TARGET_USER} ${VM_TARGET_PASSWD} "${target_cmd}" + + #launch tester agent + run_cmd ${tester_control_public_ip} ${VM_TESTER_USER} ${VM_TESTER_PASSWD} "${target_cmd}" + + return 0 +} + +function main(){ + fn_parser_ipaddress + fn_provision_agent_file + #fn_launch_vstf_process + return 0 +} + +main +set +x diff --git a/utils/infra_setup/heat_template/vstf_heat_template/scp.sh b/utils/infra_setup/heat_template/vstf_heat_template/scp.sh new file mode 100644 index 00000000..b3d6a27b --- /dev/null +++ b/utils/infra_setup/heat_template/vstf_heat_template/scp.sh @@ -0,0 +1,93 @@ +function scp_cmd() +{ + local ip=$1 + local usr=$2 + local passwd=$3 + srcfile=$4 + desfile=$5 + opt=$6 + case $opt in + file) + expect -c " + spawn scp -r $srcfile $usr@$ip:$desfile + set timeout -1 + expect { + \"*no)?\" { + send \"yes\r\" + exp_continue + } + \"*assword:*\" { + send \"$passwd\r\" + exp_continue + } + } + " + ;; + dir) + expect -c " + spawn scp -r $srcfile $usr@$ip:$desfile + set timeout -1 + expect { + \"*no)?\" { + send \"yes\r\" + exp_continue + } + \"*assword:*\" { + send \"$passwd\r\" + exp_continue + } + } + " + ;; + *) + echo "err" + ;; + esac +} + +function remote_scp_cmd() +{ + local ip=$1 + local usr=$2 + local passwd=$3 + srcfile=$4 + desfile=$5 + opt=$6 + case $opt in + file) + expect -c " + spawn scp -r $usr@$ip:$srcfile $desfile + set timeout -1 + expect { + \"*no)?\" { + send \"yes\r\" + exp_continue + } + \"*assword:*\" { + send \"$passwd\r\" + exp_continue + } + } + " + ;; + dir) + expect -c " + spawn scp -r $usr@$ip:$srcfile $desfile + set timeout -1 + expect { + \"*no)?\" { + send \"yes\r\" + exp_continue + } + \"*assword:*\" { + send \"$passwd\r\" + exp_continue + } + } + " + ;; + *) + echo "err" + ;; + esac +} diff --git a/utils/infra_setup/heat_template/vstf_heat_template/ssh.sh b/utils/infra_setup/heat_template/vstf_heat_template/ssh.sh new file mode 100644 index 00000000..35600065 --- /dev/null +++ b/utils/infra_setup/heat_template/vstf_heat_template/ssh.sh @@ -0,0 +1,28 @@ +function run_cmd() +{ + local ip=$1 + local usr=$2 + local passwd=$3 + local cmd=$4 + expect -c " + spawn ssh $usr@$ip + set timeout -1 + expect { + \"*no)?\" { + send \"yes\r\" + exp_continue + } + \"*assword:*\" { + send \"$passwd\r\" + exp_continue + } + \"*#\" { + send \"$cmd\r\" + exec sleep 1 + send \"exit\r\" + expect eof + } + } + " +} + diff --git a/utils/infra_setup/heat_template/vstf_heat_template/vstf_HOT_create_instance.sh b/utils/infra_setup/heat_template/vstf_heat_template/vstf_HOT_create_instance.sh index 11df69b8..f608e120 100755 --- a/utils/infra_setup/heat_template/vstf_heat_template/vstf_HOT_create_instance.sh +++ b/utils/infra_setup/heat_template/vstf_heat_template/vstf_HOT_create_instance.sh @@ -26,7 +26,146 @@ bottlenecks_env_prepare() source $BOTTLENECKS_REPO_DIR/rubbos/rubbos_scripts/1-1-1/scripts/env_preparation.sh } +vstf_cleanup() +{ + echo "[INFO]Begin to clean up vstf heat-stack ,glance images and keypairs" + #heat stack-delete bottlenecks + sleep 30 + if heat stack-list; then + for stack in $(heat stack-list | grep -e " vstf " | awk '{print $2}'); do + echo "[INFO]clean up stack $stack" + heat stack-delete $stack || true + sleep 30 + done + fi + + glance image-delete ${MANAGER_IMAGE_NAME};glance image-delete "${AGENT_IMAGE_NAME}" + if glance image-list; then + for image in $(glance image-list | grep -e "${MANAGER_IMAGE_NAME}" | awk '{print $2}'); do + echo "[INFO]clean up image $image" + glance image-delete $image || true + done + for image in $(glance image-list | grep -e "${AGENT_IMAGE_NAME}" | awk '{print $2}'); do + echo "[INFO]clean up image $image" + glance image-delete $image || true + done + fi + + if nova keypair-list; then + for key in $(nova keypair-list | grep -e $KEY_NAME | awk '{print $2}'); do + echo "[INFO]clean up key $key" + nova keypair-delete $key || true + done + fi + + #check the default flavor m1.large existing + if nova flavor-list; then + flag=`nova flavor-list | grep "m1.large "` + echo "[INFO]the flavor m1.large num is $flag" + fi + return 0 +} + +vstf_register() +{ + echo "[INFO]download vstf images" + #download vstf-manager and vstf-agent image + #curl --connect-timeout 10 -o /tmp/vstf-manager.img $MANAGER_IMAGE_URL -v + #curl --connect-timeout 10 -o /tmp/vstf-agent.img $AGENT_IMAGE_URL -v + curl --connect-timeout 10 -o /tmp/vstf-manager.img $MANAGER_IMAGE_URL -v + curl --connect-timeout 10 -o /tmp/vstf-agent.img $AGENT_IMAGE_URL -v + #echo "begin to test downloading from vstf directory!!!!!!" + #curl --connect-timeout 10 -o /tmp/vstf-test.txt + #echo "begin to cat /tmp/vstf-test.txt vstf directory!!!!!!" + #cat /tmp/vstf-test.txt + #register + echo "[INFO]register vstf manager and agent images" + result=$(glance image-create \ + --name $MANAGER_IMAGE_NAME \ + --disk-format qcow2 \ + --container-format bare \ + --file /tmp/vstf-manager.img) + echo "Manager image register result $result." + + result=$(glance image-create \ + --name $AGENT_IMAGE_NAME \ + --disk-format qcow2 \ + --container-format bare \ + --file /tmp/vstf-agent.img) + echo "Agent image register result $result." + + glance image-list + + rm -rf /tmp/vstf-manager.img;rm -rf /tmp/vstf-agent.img +} + #vstf logic function here +vstf_create_heat_template() +{ + echo "create vstf instance using heat template" + echo "upload keypair" + nova keypair-add --pub_key $KEY_PATH/bottlenecks_key.pub $KEY_NAME + nova keypair-list + echo "use heat template to create stack" + cd ${HOT_PATH} + heat stack-create vstf -f ${TEMPLATE_NAME} + +} + +wait_heat_stack_complete() +{ + retry=0 + while true + do + status=$(heat stack-list | grep vstf | awk '{print $6}') + if [ x$status = x"CREATE_COMPLETE" ]; then + echo "vstf stacke create complete" + heat stack-show vstf + nova list | grep vstf- + break; + elif [ x$status = x"CREATE_FAILED" ]; then + echo "bottlenecks stacke create failed !!!" + heat stack-show vstf + exit 1 + fi + + if [ "$BOTTLENECKS_DEBUG" == "True" ]; then + heat stack-show vstf + nova list | grep vstf- + for i in $(nova list | grep "vstf-" | grep ERROR | awk '{print $2}') + do + nova show $i + done + fi + sleep 1 + let retry+=1 + if [[ $retry -ge $1 ]];then + echo "Heat vstf stack create timeout, status $status !!!" + exit 1 + fi + done +} + + +vstf_check_instance_ok() +{ + wait_heat_stack_complete 120 + + return 0 +} + +vstf_launch() +{ + cd ${HOT_PATH} + bash -x ./launch_vstf.sh + +} + +vstf_test() +{ + cd ${HOT_PATH} + bash -x ./vstf_test.sh +} main() { @@ -35,9 +174,42 @@ main() BOTTLENECKS_REPO=https://gerrit.opnfv.org/gerrit/bottlenecks BOTTLENECKS_REPO_DIR=/tmp/opnfvrepo_vstf/bottlenecks #vstf parameter here + MANAGER_IMAGE_URL=http://artifacts.opnfv.org/bottlenecks/vstf/vstf-manager.img + AGENT_IMAGE_URL=http://artifacts.opnfv.org/bottlenecks/vstf/vstf-agent.img + MANAGER_IMAGE_URL=http://artifacts.opnfv.org/bottlenecks/rubbos/bottlenecks-trusty-server.img + AGENT_IMAGE_URL=http://artifacts.opnfv.org/bottlenecks/rubbos/bottlenecks-trusty-server.img + MANAGER_IMAGE_NAME="vstf-manager" + AGENT_IMAGE_NAME="vstf-agent" + KEY_PATH=$BOTTLENECKS_REPO_DIR/utils/infra_setup/bottlenecks_key + HOT_PATH=$BOTTLENECKS_REPO_DIR/utils/infra_setup/heat_template/vstf_heat_template + KEY_NAME=vstf-key + #use the default openstack flavor m1.large + FLAVOR_NAME="m1.large" + TEMPLATE_NAME=bottleneck_vstf.yaml + PUBLIC_NET_NAME=net04_ext + #load adminrc bottlenecks_env_prepare + #vstf function here + vstf_cleanup + vstf_register + vstf_create_heat_template + vstf_check_instance_ok + heat stack-list + nova list + sleep 100 + vstf_launch + #sleep 30 + vstf_test + #sleep 10 + #echo "[INFO]bottleneck vstf testsuite done ,results in the directory ${HOT_PATH}/result" + echo "[INFO]Begin to clean up the vstf heat-stack and image" + vstf_cleanup + sleep 30 + heat stack-list + nova list + } main diff --git a/utils/infra_setup/heat_template/vstf_heat_template/vstf_test.sh b/utils/infra_setup/heat_template/vstf_heat_template/vstf_test.sh new file mode 100644 index 00000000..5f5aa36f --- /dev/null +++ b/utils/infra_setup/heat_template/vstf_heat_template/vstf_test.sh @@ -0,0 +1,79 @@ +#!/bin/bash +set -x + +VM_MANAGER_USER="root" +VM_MANAGER_PASSWD="root" +STACK_NAME="vstf" +#load func +source ./ssh.sh +source ./scp.sh + +function fn_parser_ipaddress(){ + #parser and get output ipaddress + manager_control_private_ip=`heat output-show ${STACK_NAME} manager_control_private_ip | sed 's/\"//g'` + manager_control_public_ip=`heat output-show ${STACK_NAME} manager_control_public_ip | sed 's/\"//g'` + + local ipaddr="" + for ipaddr in ${manager_control_private_ip} ${manager_control_public_ip} + do + if [ "${ipaddr}x" == "x" ] + then + echo "[ERROR]The ipaddress is null ,get ip from heat output failed" + exit 1 + fi + done + + return 0 +} + +function fn_vstf_test_config(){ + #get testing ipaddress + tester_testing_ip=`nova list | grep "vstf-tester" | grep "bottlenecks-testing" | awk -F'bottlenecks-testing=' '{print $2}' | awk '{print $1}'` + target_testing_ip=`nova list | grep "vstf-target" | grep "bottlenecks-testing" | awk -F'bottlenecks-testing=' '{print $2}' | awk '{print $1}'` + echo "tester_testing_ip = ${tester_testing_ip}" + echo "target_testing_ip = ${target_testing_ip}" + #setting testting ipaddress + local cmd='vstfadm settings ${tester_testing_ip} ${target_testing_ip}' + echo "$cmd" + #run_cmd ${manager_control_public_ip} ${VM_MANAGER_USER} ${VM_MANAGER_PASSWD} "${cmd}" + + return 0 +} + +function fn_testing_scenario(){ + local head_cmd="vstfadm perf-test " + local test_length_list="64" + local test_scenario_list="Tu-1 Tu-3" + local test_tool="netperf" + local protocol="udp" + local test_type="frameloss" + for scene in ${test_scenario_list} + do + local cmd="${head_cmd} ${scene} ${test_tool} ${protocol} ${test_type} \"${test_length_list}\" > /root/${scene}-result.txt" + echo ${cmd} + run_cmd ${manager_control_public_ip} ${VM_MANAGER_USER} ${VM_MANAGER_PASSWD} "${head_cmd} ${scene} ${test_tool} ${protocol} ${test_type} \"${test_length_list}\" > /root/${scene}" + done + return 0 +} + +function fn_result(){ + local test_scenario_list="Tu-1 Tu-3" + mkdir ./result + rm -rf ./result/* + for scene in ${test_scenario_list} + do + remote_scp_cmd ${manager_control_public_ip} ${VM_MANAGER_USER} ${VM_MANAGER_PASSWD} "/root/${scene}-result.txt" "./result/${scene}" "file" + done + return 0 +} + +function main(){ + fn_parser_ipaddress + fn_vstf_test_config + #fn_testing_scenario + #fn_result + return 0 +} + +main +set +x |