aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tests/unit/benchmark/contexts/test_heat.py9
-rw-r--r--tests/unit/benchmark/contexts/test_model.py10
-rw-r--r--yardstick/benchmark/contexts/heat.py37
-rw-r--r--yardstick/benchmark/contexts/model.py43
-rw-r--r--yardstick/orchestrator/heat.py10
5 files changed, 91 insertions, 18 deletions
diff --git a/tests/unit/benchmark/contexts/test_heat.py b/tests/unit/benchmark/contexts/test_heat.py
index f8f349205..8f4852ca8 100644
--- a/tests/unit/benchmark/contexts/test_heat.py
+++ b/tests/unit/benchmark/contexts/test_heat.py
@@ -39,6 +39,7 @@ class HeatContextTestCase(unittest.TestCase):
self.assertEqual(self.test_context.networks, [])
self.assertEqual(self.test_context.servers, [])
self.assertEqual(self.test_context.placement_groups, [])
+ self.assertEqual(self.test_context.server_groups, [])
self.assertIsNone(self.test_context.keypair_name)
self.assertIsNone(self.test_context.secgroup_name)
self.assertEqual(self.test_context._server_map, {})
@@ -51,15 +52,18 @@ class HeatContextTestCase(unittest.TestCase):
self.assertIsNotNone(self.test_context.key_filename)
@mock.patch('yardstick.benchmark.contexts.heat.PlacementGroup')
+ @mock.patch('yardstick.benchmark.contexts.heat.ServerGroup')
@mock.patch('yardstick.benchmark.contexts.heat.Network')
@mock.patch('yardstick.benchmark.contexts.heat.Server')
- def test_init(self, mock_server, mock_network, mock_pg):
+ def test_init(self, mock_server, mock_network, mock_sg, mock_pg):
pgs = {'pgrp1': {'policy': 'availability'}}
+ sgs = {'servergroup1': {'policy': 'affinity'}}
networks = {'bar': {'cidr': '10.0.1.0/24'}}
servers = {'baz': {'floating_ip': True, 'placement': 'pgrp1'}}
attrs = {'name': 'foo',
'placement_groups': pgs,
+ 'server_groups': sgs,
'networks': networks,
'servers': servers}
@@ -71,7 +75,10 @@ class HeatContextTestCase(unittest.TestCase):
mock_pg.assert_called_with('pgrp1', self.test_context,
pgs['pgrp1']['policy'])
+ mock_sg.assert_called_with('servergroup1', self.test_context,
+ sgs['servergroup1']['policy'])
self.assertTrue(len(self.test_context.placement_groups) == 1)
+ self.assertTrue(len(self.test_context.server_groups) == 1)
mock_network.assert_called_with(
'bar', self.test_context, networks['bar'])
diff --git a/tests/unit/benchmark/contexts/test_model.py b/tests/unit/benchmark/contexts/test_model.py
index 537a8c008..a4b7d81ef 100644
--- a/tests/unit/benchmark/contexts/test_model.py
+++ b/tests/unit/benchmark/contexts/test_model.py
@@ -180,6 +180,7 @@ class ServerTestCase(unittest.TestCase):
self.assertEqual(test_server.keypair_name, 'some-keys')
self.assertEqual(test_server.secgroup_name, 'some-secgroup')
self.assertEqual(test_server.placement_groups, [])
+ self.assertIsNone(test_server.server_group)
self.assertEqual(test_server.instances, 1)
self.assertIsNone(test_server.floating_ip)
self.assertIsNone(test_server._image)
@@ -195,6 +196,15 @@ class ServerTestCase(unittest.TestCase):
self.assertRaises(ValueError, model.Server, 'foo',
self.mock_context, attrs)
+ @mock.patch('yardstick.benchmark.contexts.model.PlacementGroup')
+ def test_construct_get_wrong_server_group(self, mock_sg):
+
+ attrs = {'server_group': 'baz'}
+ mock_sg.get.return_value = None
+
+ self.assertRaises(ValueError, model.Server, 'foo',
+ self.mock_context, attrs)
+
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
def test__add_instance(self, mock_template):
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 4ca44b0ed..c5c127966 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -21,7 +21,7 @@ import pkg_resources
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.contexts.model import Network
-from yardstick.benchmark.contexts.model import PlacementGroup
+from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
from yardstick.benchmark.contexts.model import Server
from yardstick.benchmark.contexts.model import update_scheduler_hints
from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
@@ -41,6 +41,7 @@ class HeatContext(Context):
self.networks = []
self.servers = []
self.placement_groups = []
+ self.server_groups = []
self.keypair_name = None
self.secgroup_name = None
self._server_map = {}
@@ -57,7 +58,7 @@ class HeatContext(Context):
get_short_key_uuid(self.key_uuid)])
super(HeatContext, self).__init__()
- def init(self, attrs):
+ def init(self, attrs): # pragma: no cover
"""initializes itself from the supplied arguments"""
self.name = attrs["name"]
@@ -73,15 +74,18 @@ class HeatContext(Context):
self.secgroup_name = self.name + "-secgroup"
if "image" in attrs:
- self._image = attrs["image"]
+ self._image = attrs.get("image")
if "flavor" in attrs:
- self._flavor = attrs["flavor"]
+ self._flavor = attrs.get("flavor")
- if "placement_groups" in attrs:
- for name, pgattrs in attrs["placement_groups"].items():
- pg = PlacementGroup(name, self, pgattrs["policy"])
- self.placement_groups.append(pg)
+ self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
+ for name, pgattrs in attrs.get(
+ "placement_groups", {}).items()]
+
+ self.server_groups = [ServerGroup(name, self, sgattrs["policy"])
+ for name, sgattrs in attrs.get(
+ "server_groups", {}).items()]
for name, netattrs in attrs["networks"].items():
network = Network(name, self, netattrs)
@@ -163,19 +167,17 @@ class HeatContext(Context):
server.add_to_template(template,
self.networks,
scheduler_hints)
- added_servers.append(server.stack_name)
else:
scheduler_hints["different_host"] = \
scheduler_hints["different_host"][0]
server.add_to_template(template,
self.networks,
scheduler_hints)
- added_servers.append(server.stack_name)
else:
server.add_to_template(template,
self.networks,
scheduler_hints)
- added_servers.append(server.stack_name)
+ added_servers.append(server.stack_name)
# create list of servers with affinity policy
affinity_servers = []
@@ -195,10 +197,21 @@ class HeatContext(Context):
server.add_to_template(template, self.networks, scheduler_hints)
added_servers.append(server.stack_name)
+ # add server group
+ for sg in self.server_groups:
+ template.add_server_group(sg.name, sg.policy)
+
# add remaining servers with no placement group configured
for server in list_of_servers:
+ # TODO placement_group and server_group should combine
if not server.placement_groups:
- server.add_to_template(template, self.networks, {})
+ scheduler_hints = {}
+ # affinity/anti-aff server group
+ sg = server.server_group
+ if sg:
+ scheduler_hints["group"] = {'get_resource': sg.name}
+ server.add_to_template(template,
+ self.networks, scheduler_hints)
def deploy(self):
"""deploys template into a stack using cloud"""
diff --git a/yardstick/benchmark/contexts/model.py b/yardstick/benchmark/contexts/model.py
index c83a209cf..8cf3b621c 100644
--- a/yardstick/benchmark/contexts/model.py
+++ b/yardstick/benchmark/contexts/model.py
@@ -56,10 +56,32 @@ class PlacementGroup(Object):
@staticmethod
def get(name):
- if name in PlacementGroup.map:
- return PlacementGroup.map[name]
- else:
- return None
+ return PlacementGroup.map.get(name)
+
+
+class ServerGroup(Object): # pragma: no cover
+ """Class that represents a server group in the logical model
+ Policy should be one of "anti-affinity" or "affinity"
+ """
+ map = {}
+
+ def __init__(self, name, context, policy):
+ super(ServerGroup, self).__init__(name, context)
+ if policy not in {"affinity", "anti-affinity"}:
+ raise ValueError("server group '%s', policy '%s' is not valid" %
+ (name, policy))
+ self.name = name
+ self.members = set()
+ self.stack_name = context.name + "-" + name
+ self.policy = policy
+ ServerGroup.map[name] = self
+
+ def add_member(self, name):
+ self.members.add(name)
+
+ @staticmethod
+ def get(name):
+ return ServerGroup.map.get(name)
class Router(Object):
@@ -113,7 +135,7 @@ class Network(Object):
return None
-class Server(Object):
+class Server(Object): # pragma: no cover
"""Class that represents a server in the logical model"""
list = []
@@ -141,6 +163,17 @@ class Server(Object):
self.placement_groups.append(pg)
pg.add_member(self.stack_name)
+ # support servergroup attr
+ self.server_group = None
+ sg = attrs.get("server_group")
+ if (sg):
+ server_group = ServerGroup.get(sg)
+ if not server_group:
+ raise ValueError("server '%s', server_group '%s' is invalid" %
+ (name, sg))
+ self.server_group = server_group
+ server_group.add_member(self.stack_name)
+
self.instances = 1
if "instances" in attrs:
self.instances = attrs["instances"]
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index e39c4356c..500776e0e 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -197,6 +197,16 @@ class HeatTemplate(HeatObject):
'properties': {'name': name}
}
+ def add_server_group(self, name, policies): # pragma: no cover
+ """add to the template a ServerGroup"""
+ log.debug("adding Nova::ServerGroup '%s'", name)
+ policies = policies if isinstance(policies, list) else [policies]
+ self.resources[name] = {
+ 'type': 'OS::Nova::ServerGroup',
+ 'properties': {'name': name,
+ 'policies': policies}
+ }
+
def add_subnet(self, name, network, cidr):
"""add to the template a Neutron Subnet"""
log.debug("adding Neutron::Subnet '%s' in network '%s', cidr '%s'",