summaryrefslogtreecommitdiffstats
path: root/mcp/patches/0009-seedng-module-Sync-salt-version.patch
blob: b883147746576a1dacac33d60ac993581e36c7a2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
Date: Mon, 21 Aug 2017 02:03:01 +0200
Subject: [PATCH] seedng: module: Sync salt version

salt custom py module seedng.py should use the same Salt version
when preinstalling minion for salt-controlled VMs via bootstrap
script.

Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
---

diff --git a/_modules/seedng.py b/_modules/seedng.py
--- a/_modules/seedng.py
+++ b/_modules/seedng.py
@@ -256,8 +256,10 @@
     boot_, tmppath = (prep_bootstrap(mpt)
              or salt.syspaths.BOOTSTRAP)
     # Exec the chroot command
+    arg = 'stable {0}'.format('.'.join(salt.version.__version__.split('.')[:2]))
     cmd = 'if type salt-minion; then exit 0; '
-    cmd += 'else sh {0} -c /tmp; fi'.format(os.path.join(tmppath, 'bootstrap-salt.sh'))
+    cmd += 'else sh {0} -c /tmp {1}; fi'.format(
+        os.path.join(tmppath, 'bootstrap-salt.sh'), arg)
     return not __salt__['cmd.run_chroot'](mpt, cmd, python_shell=True)['retcode']
specific language governing permissions and // limitations under the License. */ #include <rte_mempool.h> #include <rte_version.h> #include <inttypes.h> #include "prox_malloc.h" #include "prox_port_cfg.h" #include "stats_mempool.h" struct stats_mempool_manager { uint32_t n_mempools; struct mempool_stats mempool_stats[0]; }; static struct stats_mempool_manager *smm; struct mempool_stats *stats_get_mempool_stats(uint32_t i) { return &smm->mempool_stats[i]; } int stats_get_n_mempools(void) { return smm->n_mempools; } static struct stats_mempool_manager *alloc_stats_mempool_manager(void) { const uint32_t socket_id = rte_lcore_to_socket_id(rte_lcore_id()); uint32_t n_max_mempools = sizeof(prox_port_cfg[0].pool)/sizeof(prox_port_cfg[0].pool[0]); uint32_t n_mempools = 0; size_t mem_size = sizeof(struct stats_mempool_manager); for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) { if (!prox_port_cfg[i].active) continue; for (uint8_t j = 0; j < n_max_mempools; ++j) { if (prox_port_cfg[i].pool[j] && prox_port_cfg[i].pool_size[j]) { mem_size += sizeof(struct mempool_stats); } } } return prox_zmalloc(mem_size, socket_id); } void stats_mempool_init(void) { uint32_t n_max_mempools = sizeof(prox_port_cfg[0].pool)/sizeof(prox_port_cfg[0].pool[0]); smm = alloc_stats_mempool_manager(); for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) { if (!prox_port_cfg[i].active) continue; for (uint8_t j = 0; j < n_max_mempools; ++j) { if (prox_port_cfg[i].pool[j] && prox_port_cfg[i].pool_size[j]) { struct mempool_stats *ms = &smm->mempool_stats[smm->n_mempools]; ms->pool = prox_port_cfg[i].pool[j]; ms->port = i; ms->queue = j; ms->size = prox_port_cfg[i].pool_size[j]; smm->n_mempools++; } } } } void stats_mempool_update(void) { for (uint8_t mp_id = 0; mp_id < smm->n_mempools; ++mp_id) { /* Note: The function free_count returns the number of used entries. */ #if RTE_VERSION >= RTE_VERSION_NUM(17,5,0,0) smm->mempool_stats[mp_id].free = rte_mempool_avail_count(smm->mempool_stats[mp_id].pool); #else smm->mempool_stats[mp_id].free = rte_mempool_count(smm->mempool_stats[mp_id].pool); #endif } }