summaryrefslogtreecommitdiffstats
path: root/core/vnf_controller.py
blob: 937cd5ccbd2fb03b4ecaaa64a7c95f05f3774001 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# Copyright 2015-2016 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" VNF Controller interface
"""

import logging
import pexpect
from conf import settings
from vnfs.vnf.vnf import IVnf

class VnfController(object):
    """VNF controller class

    Used to set-up and control VNFs for specified scenario

    Attributes:
        _vnf_class: A class object representing the VNF to be used.
        _deployment: A string describing the scenario to set-up in the
            constructor.
        _vnfs: A list of vnfs controlled by the controller.
    """

    def __init__(self, deployment, vnf_class, extra_vnfs):
        """Sets up the VNF infrastructure based on deployment scenario

        :param vnf_class: The VNF class to be used.
        :param extra_vnfs: The number of VNFs not involved in given
            deployment scenario. It will be used to correctly expand
            configuration values and initialize shared dirs. This parameter
            is used in case, that additional VNFs are executed by TestSteps.
        """
        # reset VNF ID counter for each testcase
        IVnf.reset_vnf_counter()

        # setup controller with requested number of VNFs
        self._logger = logging.getLogger(__name__)
        self._vnf_class = vnf_class
        self._deployment = deployment.lower()
        self._vnfs = []
        if self._deployment == 'pvp':
            vm_number = 1
        elif (self._deployment.startswith('pvvp') or
              self._deployment.startswith('pvpv')):
            if len(self._deployment) > 4:
                vm_number = int(self._deployment[4:])
            else:
                vm_number = 2
        else:
            # VnfController is created for all deployments, including deployments
            # without VNFs like p2p
            vm_number = 0

        if vm_number + extra_vnfs > 0:
            self._logger.debug('Check configuration for %s guests.', vm_number + extra_vnfs)
            settings.check_vm_settings(vm_number + extra_vnfs)
            # enforce that GUEST_NIC_NR is 1 or even number of NICs
            updated = False
            nics_nr = settings.getValue('GUEST_NICS_NR')
            for index in range(len(nics_nr)):
                if nics_nr[index] > 1 and nics_nr[index] % 2:
                    updated = True
                    nics_nr[index] = int(nics_nr[index] / 2) * 2
            if updated:
                settings.setValue('GUEST_NICS_NR', nics_nr)
                self._logger.warning('Odd number of NICs was detected. Configuration '
                                     'was updated to GUEST_NICS_NR = %s',
                                     settings.getValue('GUEST_NICS_NR'))

        if vm_number:
            self._vnfs = [vnf_class() for _ in range(vm_number)]

            self._logger.debug('__init__ ' + str(len(self._vnfs)) +
                               ' VNF[s] with ' + ' '.join(map(str, self._vnfs)))

    def get_vnfs(self):
        """Returns a list of vnfs controlled by this controller.
        """
        self._logger.debug('get_vnfs ' + str(len(self._vnfs)) +
                           ' VNF[s] with ' + ' '.join(map(str, self._vnfs)))
        return self._vnfs

    def get_vnfs_number(self):
        """Returns a number of vnfs controlled by this controller.
        """
        self._logger.debug('get_vnfs_number ' + str(len(self._vnfs)) +
                           ' VNF[s]')
        return len(self._vnfs)

    def start(self):
        """Boots all VNFs set-up by __init__.

        This is a blocking function.
        """
        self._logger.debug('start ' + str(len(self._vnfs)) +
                           ' VNF[s] with ' + ' '.join(map(str, self._vnfs)))
        try:
            for vnf in self._vnfs:
                vnf.start()
        except pexpect.TIMEOUT:
            self.stop()
            raise

    def stop(self):
        """Stops all VNFs set-up by __init__.

        This is a blocking function.
        """
        self._logger.debug('stop ' + str(len(self._vnfs)) +
                           ' VNF[s] with ' + ' '.join(map(str, self._vnfs)))
        for vnf in self._vnfs:
            vnf.stop()

    def __enter__(self):
        self.start()

    def __exit__(self, type_, value, traceback):
        self.stop()
after fence_signal. To fight this, it is recommended * that before enable_signaling returns true an extra reference is * taken on the fence, to be released when the fence is signaled. * This will mean fence_signal will still be called twice, but * the second time will be a noop since it was already signaled. * * Notes on signaled: * May set fence->status if returning true. * * Notes on wait: * Must not be NULL, set to fence_default_wait for default implementation. * the fence_default_wait implementation should work for any fence, as long * as enable_signaling works correctly. * * Must return -ERESTARTSYS if the wait is intr = true and the wait was * interrupted, and remaining jiffies if fence has signaled, or 0 if wait * timed out. Can also return other error values on custom implementations, * which should be treated as if the fence is signaled. For example a hardware * lockup could be reported like that. * * Notes on release: * Can be NULL, this function allows additional commands to run on * destruction of the fence. Can be called from irq context. * If pointer is set to NULL, kfree will get called instead. */ struct fence_ops { const char * (*get_driver_name)(struct fence *fence); const char * (*get_timeline_name)(struct fence *fence); bool (*enable_signaling)(struct fence *fence); bool (*signaled)(struct fence *fence); signed long (*wait)(struct fence *fence, bool intr, signed long timeout); void (*release)(struct fence *fence); int (*fill_driver_data)(struct fence *fence, void *data, int size); void (*fence_value_str)(struct fence *fence, char *str, int size); void (*timeline_value_str)(struct fence *fence, char *str, int size); }; void fence_init(struct fence *fence, const struct fence_ops *ops, spinlock_t *lock, unsigned context, unsigned seqno); void fence_release(struct kref *kref); void fence_free(struct fence *fence); /** * fence_get - increases refcount of the fence * @fence: [in] fence to increase refcount of * * Returns the same fence, with refcount increased by 1. */ static inline struct fence *fence_get(struct fence *fence) { if (fence) kref_get(&fence->refcount); return fence; } /** * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock * @fence: [in] fence to increase refcount of * * Function returns NULL if no refcount could be obtained, or the fence. */ static inline struct fence *fence_get_rcu(struct fence *fence) { if (kref_get_unless_zero(&fence->refcount)) return fence; else return NULL; } /** * fence_put - decreases refcount of the fence * @fence: [in] fence to reduce refcount of */ static inline void fence_put(struct fence *fence) { if (fence) kref_put(&fence->refcount, fence_release); } int fence_signal(struct fence *fence); int fence_signal_locked(struct fence *fence); signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); int fence_add_callback(struct fence *fence, struct fence_cb *cb, fence_func_t func); bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); void fence_enable_sw_signaling(struct fence *fence); /** * fence_is_signaled_locked - Return an indication if the fence is signaled yet. * @fence: [in] the fence to check * * Returns true if the fence was already signaled, false if not. Since this * function doesn't enable signaling, it is not guaranteed to ever return * true if fence_add_callback, fence_wait or fence_enable_sw_signaling * haven't been called before. * * This function requires fence->lock to be held. */ static inline bool fence_is_signaled_locked(struct fence *fence) { if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return true; if (fence->ops->signaled && fence->ops->signaled(fence)) { fence_signal_locked(fence); return true; } return false; } /** * fence_is_signaled - Return an indication if the fence is signaled yet. * @fence: [in] the fence to check * * Returns true if the fence was already signaled, false if not. Since this * function doesn't enable signaling, it is not guaranteed to ever return * true if fence_add_callback, fence_wait or fence_enable_sw_signaling * haven't been called before. * * It's recommended for seqno fences to call fence_signal when the * operation is complete, it makes it possible to prevent issues from * wraparound between time of issue and time of use by checking the return * value of this function before calling hardware-specific wait instructions. */ static inline bool fence_is_signaled(struct fence *fence) { if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return true; if (fence->ops->signaled && fence->ops->signaled(fence)) { fence_signal(fence); return true; } return false; } /** * fence_is_later - return if f1 is chronologically later than f2 * @f1: [in] the first fence from the same context * @f2: [in] the second fence from the same context * * Returns true if f1 is chronologically later than f2. Both fences must be * from the same context, since a seqno is not re-used across contexts. */ static inline bool fence_is_later(struct fence *f1, struct fence *f2) { if (WARN_ON(f1->context != f2->context)) return false; return f1->seqno - f2->seqno < INT_MAX; } /** * fence_later - return the chronologically later fence * @f1: [in] the first fence from the same context * @f2: [in] the second fence from the same context * * Returns NULL if both fences are signaled, otherwise the fence that would be * signaled last. Both fences must be from the same context, since a seqno is * not re-used across contexts. */ static inline struct fence *fence_later(struct fence *f1, struct fence *f2) { if (WARN_ON(f1->context != f2->context)) return NULL; /* * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been * set if enable_signaling wasn't called, and enabling that here is * overkill. */ if (fence_is_later(f1, f2)) return fence_is_signaled(f1) ? NULL : f1; else return fence_is_signaled(f2) ? NULL : f2; } signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, bool intr, signed long timeout); /** * fence_wait - sleep until the fence gets signaled * @fence: [in] the fence to wait on * @intr: [in] if true, do an interruptible wait * * This function will return -ERESTARTSYS if interrupted by a signal, * or 0 if the fence was signaled. Other error values may be * returned on custom implementations. * * Performs a synchronous wait on this fence. It is assumed the caller * directly or indirectly holds a reference to the fence, otherwise the * fence might be freed before return, resulting in undefined behavior. */ static inline signed long fence_wait(struct fence *fence, bool intr) { signed long ret; /* Since fence_wait_timeout cannot timeout with * MAX_SCHEDULE_TIMEOUT, only valid return values are * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. */ ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); return ret < 0 ? ret : 0; } unsigned fence_context_alloc(unsigned num); #define FENCE_TRACE(f, fmt, args...) \ do { \ struct fence *__ff = (f); \ if (config_enabled(CONFIG_FENCE_TRACE)) \ pr_info("f %u#%u: " fmt, \ __ff->context, __ff->seqno, ##args); \ } while (0) #define FENCE_WARN(f, fmt, args...) \ do { \ struct fence *__ff = (f); \ pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ ##args); \ } while (0) #define FENCE_ERR(f, fmt, args...) \ do { \ struct fence *__ff = (f); \ pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ ##args); \ } while (0) #endif /* __LINUX_FENCE_H */