diff options
author | Justin Choquette <jchoquette@iol.unh.edu> | 2023-06-08 12:46:53 -0400 |
---|---|---|
committer | Justin Choquette <jchoquette@iol.unh.edu> | 2023-07-21 13:17:51 -0400 |
commit | a09db9f287a02873c0226759f8ea444bb304cd59 (patch) | |
tree | 59e744e4b998973a808abbae2d21fbdd6201d829 /src/api | |
parent | 8ddc7e820e120f1dde4e901d3cb6f1dd3f281e65 (diff) |
LaaS 3.0 Almost MVP
Change-Id: Ided9a43cf3088bb58a233dc459711c03f43e11b8
Signed-off-by: Justin Choquette <jchoquette@iol.unh.edu>
Diffstat (limited to 'src/api')
-rw-r--r-- | src/api/admin.py | 21 | ||||
-rw-r--r-- | src/api/migrations/0022_add_cifile_generated_field.py | 15 | ||||
-rw-r--r-- | src/api/migrations/0022_merge_20230607_1948.py (renamed from src/api/migrations/0022_merge_20211102_2136.py) | 4 | ||||
-rw-r--r-- | src/api/migrations/0023_auto_20230608_1913.py | 172 | ||||
-rw-r--r-- | src/api/models.py | 1275 | ||||
-rw-r--r-- | src/api/serializers/__init__.py | 8 | ||||
-rw-r--r-- | src/api/serializers/booking_serializer.py | 173 | ||||
-rw-r--r-- | src/api/serializers/old_serializers.py | 21 | ||||
-rw-r--r-- | src/api/tests/__init__.py | 8 | ||||
-rw-r--r-- | src/api/tests/test_models_unittest.py | 271 | ||||
-rw-r--r-- | src/api/urls.py | 44 | ||||
-rw-r--r-- | src/api/views.py | 756 |
12 files changed, 454 insertions, 2314 deletions
diff --git a/src/api/admin.py b/src/api/admin.py index 1e243a0..74b023e 100644 --- a/src/api/admin.py +++ b/src/api/admin.py @@ -12,16 +12,6 @@ from django.apps import AppConfig from django.contrib import admin from api.models import ( - Job, - OpnfvApiConfig, - HardwareConfig, - NetworkConfig, - SoftwareConfig, - AccessConfig, - AccessRelation, - SoftwareRelation, - HostHardwareRelation, - HostNetworkRelation, APILog ) @@ -29,15 +19,4 @@ from api.models import ( class ApiConfig(AppConfig): name = 'apiJobs' - -admin.site.register(Job) -admin.site.register(OpnfvApiConfig) -admin.site.register(HardwareConfig) -admin.site.register(NetworkConfig) -admin.site.register(SoftwareConfig) -admin.site.register(AccessConfig) -admin.site.register(AccessRelation) -admin.site.register(SoftwareRelation) -admin.site.register(HostHardwareRelation) -admin.site.register(HostNetworkRelation) admin.site.register(APILog) diff --git a/src/api/migrations/0022_add_cifile_generated_field.py b/src/api/migrations/0022_add_cifile_generated_field.py deleted file mode 100644 index f83a102..0000000 --- a/src/api/migrations/0022_add_cifile_generated_field.py +++ /dev/null @@ -1,15 +0,0 @@ -from django.db import migrations, models - - -class Migration(migrations.Migration): - dependencies = [ - ('api', '0018_cloudinitfile'), - ] - - operations = [ - migrations.AddField( - model_name="CloudInitFile", - name="generated", - field=models.BooleanField(default=False) - ), - ] diff --git a/src/api/migrations/0022_merge_20211102_2136.py b/src/api/migrations/0022_merge_20230607_1948.py index bb27ae4..2c6fae5 100644 --- a/src/api/migrations/0022_merge_20211102_2136.py +++ b/src/api/migrations/0022_merge_20230607_1948.py @@ -1,4 +1,4 @@ -# Generated by Django 2.2 on 2021-11-02 21:36 +# Generated by Django 2.2 on 2023-06-07 19:48 from django.db import migrations @@ -6,8 +6,8 @@ from django.db import migrations class Migration(migrations.Migration): dependencies = [ - ('api', '0019_auto_20210907_1448'), ('api', '0021_auto_20210405_1943'), + ('api', '0019_auto_20210907_1448'), ] operations = [ diff --git a/src/api/migrations/0023_auto_20230608_1913.py b/src/api/migrations/0023_auto_20230608_1913.py new file mode 100644 index 0000000..2bc986c --- /dev/null +++ b/src/api/migrations/0023_auto_20230608_1913.py @@ -0,0 +1,172 @@ +# Generated by Django 2.2 on 2023-06-08 19:13 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0022_merge_20230607_1948'), + ] + + operations = [ + migrations.RemoveField( + model_name='accessrelation', + name='config', + ), + migrations.RemoveField( + model_name='accessrelation', + name='job', + ), + migrations.RemoveField( + model_name='activeusersrelation', + name='config', + ), + migrations.RemoveField( + model_name='activeusersrelation', + name='job', + ), + migrations.RemoveField( + model_name='bridgeconfig', + name='interfaces', + ), + migrations.RemoveField( + model_name='bridgeconfig', + name='opnfv_config', + ), + migrations.RemoveField( + model_name='generatedcloudconfig', + name='booking', + ), + migrations.RemoveField( + model_name='generatedcloudconfig', + name='rconfig', + ), + migrations.RemoveField( + model_name='hardwareconfig', + name='taskconfig_ptr', + ), + migrations.RemoveField( + model_name='hosthardwarerelation', + name='config', + ), + migrations.RemoveField( + model_name='hosthardwarerelation', + name='job', + ), + migrations.RemoveField( + model_name='hostnetworkrelation', + name='config', + ), + migrations.RemoveField( + model_name='hostnetworkrelation', + name='job', + ), + migrations.RemoveField( + model_name='job', + name='booking', + ), + migrations.RemoveField( + model_name='networkconfig', + name='interfaces', + ), + migrations.RemoveField( + model_name='networkconfig', + name='taskconfig_ptr', + ), + migrations.RemoveField( + model_name='opnfvapiconfig', + name='bridge_config', + ), + migrations.RemoveField( + model_name='opnfvapiconfig', + name='opnfv_config', + ), + migrations.RemoveField( + model_name='opnfvapiconfig', + name='roles', + ), + migrations.RemoveField( + model_name='snapshotconfig', + name='taskconfig_ptr', + ), + migrations.RemoveField( + model_name='snapshotrelation', + name='config', + ), + migrations.RemoveField( + model_name='snapshotrelation', + name='job', + ), + migrations.RemoveField( + model_name='snapshotrelation', + name='snapshot', + ), + migrations.RemoveField( + model_name='softwareconfig', + name='opnfv', + ), + migrations.RemoveField( + model_name='softwareconfig', + name='taskconfig_ptr', + ), + migrations.RemoveField( + model_name='softwarerelation', + name='config', + ), + migrations.RemoveField( + model_name='softwarerelation', + name='job', + ), + migrations.DeleteModel( + name='AccessConfig', + ), + migrations.DeleteModel( + name='AccessRelation', + ), + migrations.DeleteModel( + name='ActiveUsersConfig', + ), + migrations.DeleteModel( + name='ActiveUsersRelation', + ), + migrations.DeleteModel( + name='BridgeConfig', + ), + migrations.DeleteModel( + name='GeneratedCloudConfig', + ), + migrations.DeleteModel( + name='HardwareConfig', + ), + migrations.DeleteModel( + name='HostHardwareRelation', + ), + migrations.DeleteModel( + name='HostNetworkRelation', + ), + migrations.DeleteModel( + name='Job', + ), + migrations.DeleteModel( + name='NetworkConfig', + ), + migrations.DeleteModel( + name='OpnfvApiConfig', + ), + migrations.DeleteModel( + name='SnapshotConfig', + ), + migrations.DeleteModel( + name='SnapshotRelation', + ), + migrations.DeleteModel( + name='SoftwareConfig', + ), + migrations.DeleteModel( + name='SoftwareRelation', + ), + migrations.DeleteModel( + name='TaskConfig', + ), + ] diff --git a/src/api/models.py b/src/api/models.py index 93168f5..ca33ed8 100644 --- a/src/api/models.py +++ b/src/api/models.py @@ -23,40 +23,10 @@ import yaml import re from booking.models import Booking -from resource_inventory.models import ( - Lab, - ResourceProfile, - Image, - Opsys, - Interface, - ResourceOPNFVConfig, - RemoteInfo, - OPNFVConfig, - ConfigState, - ResourceQuery, - ResourceConfiguration, - CloudInitFile -) -from resource_inventory.idf_templater import IDFTemplater -from resource_inventory.pdf_templater import PDFTemplater from account.models import Downtime, UserProfile from dashboard.utils import AbstractModelQuery -class JobStatus: - """ - A poor man's enum for a job's status. - - A job is NEW if it has not been started or recognized by the Lab - A job is CURRENT if it has been started by the lab but it is not yet completed - a job is DONE if all the tasks are complete and the booking is ready to use - """ - - NEW = 0 - CURRENT = 100 - DONE = 200 - ERROR = 300 - class LabManagerTracker: @@ -89,18 +59,6 @@ class LabManager: def __init__(self, lab): self.lab = lab - def get_opsyss(self): - return Opsys.objects.filter(from_lab=self.lab) - - def get_images(self): - return Image.objects.filter(from_lab=self.lab) - - def get_image(self, image_id): - return Image.objects.filter(from_lab=self.lab, lab_id=image_id) - - def get_opsys(self, opsys_id): - return Opsys.objects.filter(from_lab=self.lab, lab_id=opsys_id) - def get_downtime(self): return Downtime.objects.filter(start__lt=timezone.now(), end__gt=timezone.now(), lab=self.lab) @@ -135,50 +93,6 @@ class LabManager: ) return self.get_downtime_json() - def update_host_remote_info(self, data, res_id): - resource = ResourceQuery.filter(labid=res_id, lab=self.lab) - if len(resource) != 1: - return HttpResponseNotFound("Could not find single host with id " + str(res_id)) - resource = resource[0] - info = {} - try: - info['address'] = data['address'] - info['mac_address'] = data['mac_address'] - info['password'] = data['password'] - info['user'] = data['user'] - info['type'] = data['type'] - info['versions'] = json.dumps(data['versions']) - except Exception as e: - return {"error": "invalid arguement: " + str(e)} - remote_info = resource.remote_management - if "default" in remote_info.mac_address: - remote_info = RemoteInfo() - remote_info.address = info['address'] - remote_info.mac_address = info['mac_address'] - remote_info.password = info['password'] - remote_info.user = info['user'] - remote_info.type = info['type'] - remote_info.versions = info['versions'] - remote_info.save() - resource.remote_management = remote_info - resource.save() - booking = Booking.objects.get(resource=resource.bundle) - self.update_xdf(booking) - return {"status": "success"} - - def update_xdf(self, booking): - booking.pdf = PDFTemplater.makePDF(booking) - booking.idf = IDFTemplater().makeIDF(booking) - booking.save() - - def get_pdf(self, booking_id): - booking = get_object_or_404(Booking, pk=booking_id, lab=self.lab) - return booking.pdf - - def get_idf(self, booking_id): - booking = get_object_or_404(Booking, pk=booking_id, lab=self.lab) - return booking.idf - def get_profile(self): prof = {} prof['name'] = self.lab.name @@ -214,299 +128,12 @@ class LabManager: return json.dumps(self.format_user(profile)) - def get_inventory(self): - inventory = {} - resources = ResourceQuery.filter(lab=self.lab) - images = Image.objects.filter(from_lab=self.lab) - profiles = ResourceProfile.objects.filter(labs=self.lab) - inventory['resources'] = self.serialize_resources(resources) - inventory['images'] = self.serialize_images(images) - inventory['host_types'] = self.serialize_host_profiles(profiles) - return inventory - - def get_host(self, hostname): - resource = ResourceQuery.filter(labid=hostname, lab=self.lab) - if len(resource) != 1: - return HttpResponseNotFound("Could not find single host with id " + str(hostname)) - resource = resource[0] - return { - "booked": resource.booked, - "working": resource.working, - "type": resource.profile.name - } - - def update_host(self, hostname, data): - resource = ResourceQuery.filter(labid=hostname, lab=self.lab) - if len(resource) != 1: - return HttpResponseNotFound("Could not find single host with id " + str(hostname)) - resource = resource[0] - if "working" in data: - working = data['working'] == "true" - resource.working = working - resource.save() - return self.get_host(hostname) - def get_status(self): return {"status": self.lab.status} def set_status(self, payload): {} - def get_current_jobs(self): - jobs = Job.objects.filter(booking__lab=self.lab) - - return self.serialize_jobs(jobs, status=JobStatus.CURRENT) - - def get_new_jobs(self): - jobs = Job.objects.filter(booking__lab=self.lab) - - return self.serialize_jobs(jobs, status=JobStatus.NEW) - - def get_done_jobs(self): - jobs = Job.objects.filter(booking__lab=self.lab) - - return self.serialize_jobs(jobs, status=JobStatus.DONE) - - def get_analytics_job(self): - """ Get analytics job with status new """ - jobs = Job.objects.filter( - booking__lab=self.lab, - job_type='DATA' - ) - - return self.serialize_jobs(jobs, status=JobStatus.NEW) - - def get_job(self, jobid): - return Job.objects.get(pk=jobid).to_dict() - - def update_job(self, jobid, data): - {} - - def serialize_jobs(self, jobs, status=JobStatus.NEW): - job_ser = [] - for job in jobs: - jsonized_job = job.get_delta(status) - if len(jsonized_job['payload']) < 1: - continue - job_ser.append(jsonized_job) - - return job_ser - - def serialize_resources(self, resources): - # TODO: rewrite for Resource model - host_ser = [] - for res in resources: - r = { - 'interfaces': [], - 'hostname': res.name, - 'host_type': res.profile.name - } - for iface in res.get_interfaces(): - r['interfaces'].append({ - 'mac': iface.mac_address, - 'busaddr': iface.bus_address, - 'name': iface.name, - 'switchport': {"switch_name": iface.switch_name, "port_name": iface.port_name} - }) - return host_ser - - def serialize_images(self, images): - images_ser = [] - for image in images: - images_ser.append( - { - "name": image.name, - "lab_id": image.lab_id, - "dashboard_id": image.id - } - ) - return images_ser - - def serialize_resource_profiles(self, profiles): - profile_ser = [] - for profile in profiles: - p = {} - p['cpu'] = { - "cores": profile.cpuprofile.first().cores, - "arch": profile.cpuprofile.first().architecture, - "cpus": profile.cpuprofile.first().cpus, - } - p['disks'] = [] - for disk in profile.storageprofile.all(): - d = { - "size": disk.size, - "type": disk.media_type, - "name": disk.name - } - p['disks'].append(d) - p['description'] = profile.description - p['interfaces'] = [] - for iface in profile.interfaceprofile.all(): - p['interfaces'].append( - { - "speed": iface.speed, - "name": iface.name - } - ) - - p['ram'] = {"amount": profile.ramprofile.first().amount} - p['name'] = profile.name - profile_ser.append(p) - return profile_ser - - -class GeneratedCloudConfig(models.Model): - resource_id = models.CharField(max_length=200) - booking = models.ForeignKey(Booking, on_delete=models.CASCADE) - rconfig = models.ForeignKey(ResourceConfiguration, on_delete=models.CASCADE) - text = models.TextField(null=True, blank=True) - - def _normalize_username(self, username: str) -> str: - # TODO: make usernames posix compliant - s = re.sub(r'\W+', '', username) - return s - - def _get_ssh_string(self, username: str) -> str: - user = User.objects.get(username=username) - uprofile = user.userprofile - - ssh_file = uprofile.ssh_public_key - - escaped_file = ssh_file.open().read().decode(encoding="UTF-8").replace("\n", " ") - - return escaped_file - - def _serialize_users(self): - """ - returns the dictionary to be placed behind the `users` field of the toplevel c-i dict - """ - # conserves distro default user - user_array = ["default"] - - users = list(self.booking.collaborators.all()) - users.append(self.booking.owner) - for collaborator in users: - userdict = {} - - # TODO: validate if usernames are valid as linux usernames (and provide an override potentially) - userdict['name'] = self._normalize_username(collaborator.username) - - userdict['groups'] = "sudo" - userdict['sudo'] = "ALL=(ALL) NOPASSWD:ALL" - - userdict['ssh_authorized_keys'] = [self._get_ssh_string(collaborator.username)] - - user_array.append(userdict) - - # user_array.append({ - # "name": "opnfv", - # "passwd": "$6$k54L.vim1cLaEc4$5AyUIrufGlbtVBzuCWOlA1yV6QdD7Gr2MzwIs/WhuYR9ebSfh3Qlb7djkqzjwjxpnSAonK1YOabPP6NxUDccu.", - # "ssh_redirect_user": True, - # "sudo": "ALL=(ALL) NOPASSWD:ALL", - # "groups": "sudo", - # }) - - return user_array - - # TODO: make this configurable - def _serialize_sysinfo(self): - defuser = {} - defuser['name'] = 'opnfv' - defuser['plain_text_passwd'] = 'OPNFV_HOST' - defuser['home'] = '/home/opnfv' - defuser['shell'] = '/bin/bash' - defuser['lock_passwd'] = True - defuser['gecos'] = 'Lab Manager User' - defuser['groups'] = 'sudo' - - return {'default_user': defuser} - - # TODO: make this configurable - def _serialize_runcmds(self): - cmdlist = [] - - # have hosts run dhcp on boot - cmdlist.append(['sudo', 'dhclient', '-r']) - cmdlist.append(['sudo', 'dhclient']) - - return cmdlist - - def _serialize_netconf_v1(self): - # interfaces = {} # map from iface_name => dhcp_config - # vlans = {} # map from vlan_id => dhcp_config - - config_arr = [] - - for interface in self._resource().interfaces.all(): - interface_name = interface.profile.name - interface_mac = interface.mac_address - - iface_dict_entry = { - "type": "physical", - "name": interface_name, - "mac_address": interface_mac, - } - - for vlan in interface.config.all(): - if vlan.tagged: - vlan_dict_entry = {'type': 'vlan'} - vlan_dict_entry['name'] = str(interface_name) + "." + str(vlan.vlan_id) - vlan_dict_entry['vlan_link'] = str(interface_name) - vlan_dict_entry['vlan_id'] = int(vlan.vlan_id) - vlan_dict_entry['mac_address'] = str(interface_mac) - if vlan.public: - vlan_dict_entry["subnets"] = [{"type": "dhcp"}] - config_arr.append(vlan_dict_entry) - if (not vlan.tagged) and vlan.public: - iface_dict_entry["subnets"] = [{"type": "dhcp"}] - - # vlan_dict_entry['mtu'] = # TODO, determine override MTU if needed - - config_arr.append(iface_dict_entry) - - ns_dict = { - 'type': 'nameserver', - 'address': ['10.64.0.1', '8.8.8.8'] - } - - config_arr.append(ns_dict) - - full_dict = {'version': 1, 'config': config_arr} - - return full_dict - - @classmethod - def get(cls, booking_id: int, resource_lab_id: str, file_id: int): - return GeneratedCloudConfig.objects.get(resource_id=resource_lab_id, booking__id=booking_id, file_id=file_id) - - def _resource(self): - return ResourceQuery.get(labid=self.resource_id, lab=self.booking.lab) - - # def _get_facts(self): - # resource = self._resource() - - # hostname = self.rconfig.name - # iface_configs = for_config.interface_configs.all() - - def _to_dict(self): - main_dict = {} - - main_dict['users'] = self._serialize_users() - main_dict['network'] = self._serialize_netconf_v1() - main_dict['hostname'] = self.rconfig.name - - # add first startup commands - main_dict['runcmd'] = self._serialize_runcmds() - - # configure distro default user - main_dict['system_info'] = self._serialize_sysinfo() - - return main_dict - - def serialize(self) -> str: - return yaml.dump(self._to_dict(), width=float("inf")) - - class APILog(models.Model): user = models.ForeignKey(User, on_delete=models.PROTECT) call_time = models.DateTimeField(auto_now=True) @@ -534,7 +161,6 @@ class AutomationAPIManager: sbook['end'] = booking.end sbook['lab'] = AutomationAPIManager.serialize_lab(booking.lab) sbook['purpose'] = booking.purpose - sbook['resourceBundle'] = AutomationAPIManager.serialize_bundle(booking.resource) return sbook @staticmethod @@ -545,909 +171,12 @@ class AutomationAPIManager: return slab @staticmethod - def serialize_bundle(bundle): - sbundle = {} - sbundle['id'] = bundle.pk - sbundle['resources'] = [ - AutomationAPIManager.serialize_server(server) - for server in bundle.get_resources()] - return sbundle - - @staticmethod - def serialize_server(server): - sserver = {} - sserver['id'] = server.pk - sserver['name'] = server.name - return sserver - - @staticmethod - def serialize_resource_profile(profile): - sprofile = {} - sprofile['id'] = profile.pk - sprofile['name'] = profile.name - return sprofile - - @staticmethod - def serialize_template(rec_temp_and_count): - template = rec_temp_and_count[0] - count = rec_temp_and_count[1] - - stemplate = {} - stemplate['id'] = template.pk - stemplate['name'] = template.name - stemplate['count_available'] = count - stemplate['resourceProfiles'] = [ - AutomationAPIManager.serialize_resource_profile(config.profile) - for config in template.getConfigs() - ] - return stemplate - - @staticmethod - def serialize_image(image): - simage = {} - simage['id'] = image.pk - simage['name'] = image.name - return simage - - @staticmethod def serialize_userprofile(up): sup = {} sup['id'] = up.pk sup['username'] = up.user.username return sup - -class Job(models.Model): - """ - A Job to be performed by the Lab. - - The API uses Jobs and Tasks to communicate actions that need to be taken to the Lab - that is hosting a booking. A booking from a user has an associated Job which tells - the lab how to configure the hardware, networking, etc to fulfill the booking - for the user. - This is the class that is serialized and put into the api - """ - - JOB_TYPES = ( - ('BOOK', 'Booking'), - ('DATA', 'Analytics') - ) - - booking = models.OneToOneField(Booking, on_delete=models.CASCADE, null=True) - status = models.IntegerField(default=JobStatus.NEW) - complete = models.BooleanField(default=False) - job_type = models.CharField( - max_length=4, - choices=JOB_TYPES, - default='BOOK' - ) - - def to_dict(self): - d = {} - for relation in self.get_tasklist(): - if relation.job_key not in d: - d[relation.job_key] = {} - d[relation.job_key][relation.task_id] = relation.config.to_dict() - - return {"id": self.id, "payload": d} - - def get_tasklist(self, status="all"): - if status != "all": - return JobTaskQuery.filter(job=self, status=status) - return JobTaskQuery.filter(job=self) - - def is_fulfilled(self): - """ - If a job has been completed by the lab. - - This method should return true if all of the job's tasks are done, - and false otherwise - """ - my_tasks = self.get_tasklist() - for task in my_tasks: - if task.status != JobStatus.DONE: - return False - return True - - def get_delta(self, status): - d = {} - for relation in self.get_tasklist(status=status): - if relation.job_key not in d: - d[relation.job_key] = {} - d[relation.job_key][relation.task_id] = relation.config.get_delta() - - return {"id": self.id, "payload": d} - - def to_json(self): - return json.dumps(self.to_dict()) - - -class TaskConfig(models.Model): - state = models.IntegerField(default=ConfigState.NEW) - - keys = set() # TODO: This needs to be an instance variable, not a class variable - delta_keys_list = models.CharField(max_length=200, default="[]") - - @property - def delta_keys(self): - return list(set(json.loads(self.delta_keys_list))) - - @delta_keys.setter - def delta_keys(self, keylist): - self.delta_keys_list = json.dumps(keylist) - - def to_dict(self): - raise NotImplementedError - - def get_delta(self): - raise NotImplementedError - - def format_delta(self, config, token): - delta = {k: config[k] for k in self.delta_keys} - delta['lab_token'] = token - return delta - - def to_json(self): - return json.dumps(self.to_dict()) - - def clear_delta(self): - self.delta_keys = [] - - def set(self, *args): - dkeys = self.delta_keys - for arg in args: - if arg in self.keys: - dkeys.append(arg) - self.delta_keys = dkeys - - -class BridgeConfig(models.Model): - """Displays mapping between jumphost interfaces and bridges.""" - - interfaces = models.ManyToManyField(Interface) - opnfv_config = models.ForeignKey(OPNFVConfig, on_delete=models.CASCADE) - - def to_dict(self): - d = {} - hid = ResourceQuery.get(interface__pk=self.interfaces.first().pk).labid - d[hid] = {} - for interface in self.interfaces.all(): - d[hid][interface.mac_address] = [] - for vlan in interface.config.all(): - network_role = self.opnfv_model.networks().filter(network=vlan.network) - bridge = IDFTemplater.bridge_names[network_role.name] - br_config = { - "vlan_id": vlan.vlan_id, - "tagged": vlan.tagged, - "bridge": bridge - } - d[hid][interface.mac_address].append(br_config) - return d - - def to_json(self): - return json.dumps(self.to_dict()) - - -class ActiveUsersConfig(models.Model): - """ - Task for getting active VPN users - - StackStorm needs no information to run this job - so this task is very bare, but neccessary to fit - job creation convention. - """ - - def clear_delta(self): - self.delta = '{}' - - def get_delta(self): - return json.loads(self.to_json()) - - def to_json(self): - return json.dumps(self.to_dict()) - - def to_dict(self): - return {} - - -class OpnfvApiConfig(models.Model): - - installer = models.CharField(max_length=200) - scenario = models.CharField(max_length=300) - roles = models.ManyToManyField(ResourceOPNFVConfig) - # pdf and idf are url endpoints, not the actual file - pdf = models.CharField(max_length=100) - idf = models.CharField(max_length=100) - bridge_config = models.OneToOneField(BridgeConfig, on_delete=models.CASCADE, null=True) - delta = models.TextField() - opnfv_config = models.ForeignKey(OPNFVConfig, null=True, on_delete=models.SET_NULL) - - def to_dict(self): - d = {} - if not self.opnfv_config: - return d - if self.installer: - d['installer'] = self.installer - if self.scenario: - d['scenario'] = self.scenario - if self.pdf: - d['pdf'] = self.pdf - if self.idf: - d['idf'] = self.idf - if self.bridge_config: - d['bridged_interfaces'] = self.bridge_config.to_dict() - - hosts = self.roles.all() - if hosts.exists(): - d['roles'] = [] - for host in hosts: - d['roles'].append({ - host.labid: self.opnfv_config.host_opnfv_config.get( - host_config__pk=host.config.pk - ).role.name - }) - - return d - - def to_json(self): - return json.dumps(self.to_dict()) - - def set_installer(self, installer): - self.installer = installer - d = json.loads(self.delta) - d['installer'] = installer - self.delta = json.dumps(d) - - def set_scenario(self, scenario): - self.scenario = scenario - d = json.loads(self.delta) - d['scenario'] = scenario - self.delta = json.dumps(d) - - def set_xdf(self, booking, update_delta=True): - kwargs = {'lab_name': booking.lab.name, 'booking_id': booking.id} - self.pdf = reverse('get-pdf', kwargs=kwargs) - self.idf = reverse('get-idf', kwargs=kwargs) - if update_delta: - d = json.loads(self.delta) - d['pdf'] = self.pdf - d['idf'] = self.idf - self.delta = json.dumps(d) - - def add_role(self, host): - self.roles.add(host) - d = json.loads(self.delta) - if 'role' not in d: - d['role'] = [] - d['roles'].append({host.labid: host.config.opnfvRole.name}) - self.delta = json.dumps(d) - - def clear_delta(self): - self.delta = '{}' - - def get_delta(self): - return json.loads(self.to_json()) - - -class AccessConfig(TaskConfig): - access_type = models.CharField(max_length=50) - user = models.ForeignKey(User, on_delete=models.CASCADE) - revoke = models.BooleanField(default=False) - context = models.TextField(default="") - delta = models.TextField(default="{}") - - def to_dict(self): - d = {} - d['access_type'] = self.access_type - d['user'] = self.user.id - d['revoke'] = self.revoke - try: - d['context'] = json.loads(self.context) - except Exception: - pass - return d - - def get_delta(self): - d = json.loads(self.to_json()) - d["lab_token"] = self.accessrelation.lab_token - - return d - - def to_json(self): - return json.dumps(self.to_dict()) - - def clear_delta(self): - d = {} - d["lab_token"] = self.accessrelation.lab_token - self.delta = json.dumps(d) - - def set_access_type(self, access_type): - self.access_type = access_type - d = json.loads(self.delta) - d['access_type'] = access_type - self.delta = json.dumps(d) - - def set_user(self, user): - self.user = user - d = json.loads(self.delta) - d['user'] = self.user.id - self.delta = json.dumps(d) - - def set_revoke(self, revoke): - self.revoke = revoke - d = json.loads(self.delta) - d['revoke'] = revoke - self.delta = json.dumps(d) - - def set_context(self, context): - self.context = json.dumps(context) - d = json.loads(self.delta) - d['context'] = context - self.delta = json.dumps(d) - - -class SoftwareConfig(TaskConfig): - """Handles software installations, such as OPNFV or ONAP.""" - - opnfv = models.ForeignKey(OpnfvApiConfig, on_delete=models.CASCADE) - - def to_dict(self): - d = {} - if self.opnfv: - d['opnfv'] = self.opnfv.to_dict() - - d["lab_token"] = self.softwarerelation.lab_token - self.delta = json.dumps(d) - - return d - - def get_delta(self): - d = {} - d['opnfv'] = self.opnfv.get_delta() - d['lab_token'] = self.softwarerelation.lab_token - - return d - - def clear_delta(self): - self.opnfv.clear_delta() - - def to_json(self): - return json.dumps(self.to_dict()) - - -class HardwareConfig(TaskConfig): - """Describes the desired configuration of the hardware.""" - - image = models.CharField(max_length=100, default="defimage") - power = models.CharField(max_length=100, default="off") - hostname = models.CharField(max_length=100, default="hostname") - ipmi_create = models.BooleanField(default=False) - delta = models.TextField() - - keys = set(["id", "image", "power", "hostname", "ipmi_create"]) - - def to_dict(self): - return self.get_delta() - - def get_delta(self): - # TODO: grab the GeneratedCloudConfig urls from self.hosthardwarerelation.get_resource() - return self.format_delta( - self.hosthardwarerelation.get_resource().get_configuration(self.state), - self.hosthardwarerelation.lab_token) - - -class NetworkConfig(TaskConfig): - """Handles network configuration.""" - - interfaces = models.ManyToManyField(Interface) - delta = models.TextField() - - def to_dict(self): - d = {} - hid = self.hostnetworkrelation.resource_id - d[hid] = {} - for interface in self.interfaces.all(): - d[hid][interface.mac_address] = [] - if self.state != ConfigState.CLEAN: - for vlan in interface.config.all(): - # TODO: should this come from the interface? - # e.g. will different interfaces for different resources need different configs? - d[hid][interface.mac_address].append({"vlan_id": vlan.vlan_id, "tagged": vlan.tagged}) - - return d - - def to_json(self): - return json.dumps(self.to_dict()) - - def get_delta(self): - d = json.loads(self.to_json()) - d['lab_token'] = self.hostnetworkrelation.lab_token - return d - - def clear_delta(self): - self.delta = json.dumps(self.to_dict()) - self.save() - - def add_interface(self, interface): - self.interfaces.add(interface) - d = json.loads(self.delta) - hid = self.hostnetworkrelation.resource_id - if hid not in d: - d[hid] = {} - d[hid][interface.mac_address] = [] - for vlan in interface.config.all(): - d[hid][interface.mac_address].append({"vlan_id": vlan.vlan_id, "tagged": vlan.tagged}) - self.delta = json.dumps(d) - - -class SnapshotConfig(TaskConfig): - - resource_id = models.CharField(max_length=200, default="default_id") - image = models.CharField(max_length=200, null=True) # cobbler ID - dashboard_id = models.IntegerField() - delta = models.TextField(default="{}") - - def to_dict(self): - d = {} - if self.host: - d['host'] = self.host.labid - if self.image: - d['image'] = self.image - d['dashboard_id'] = self.dashboard_id - return d - - def to_json(self): - return json.dumps(self.to_dict()) - - def get_delta(self): - d = json.loads(self.to_json()) - return d - - def clear_delta(self): - self.delta = json.dumps(self.to_dict()) - self.save() - - def set_host(self, host): - self.host = host - d = json.loads(self.delta) - d['host'] = host.labid - self.delta = json.dumps(d) - - def set_image(self, image): - self.image = image - d = json.loads(self.delta) - d['image'] = self.image - self.delta = json.dumps(d) - - def clear_image(self): - self.image = None - d = json.loads(self.delta) - d.pop("image", None) - self.delta = json.dumps(d) - - def set_dashboard_id(self, dash): - self.dashboard_id = dash - d = json.loads(self.delta) - d['dashboard_id'] = self.dashboard_id - self.delta = json.dumps(d) - - def save(self, *args, **kwargs): - if len(ResourceQuery.filter(labid=self.resource_id)) != 1: - raise ValidationError("resource_id " + str(self.resource_id) + " does not refer to a single resource") - super().save(*args, **kwargs) - - -def get_task(task_id): - for taskclass in [AccessRelation, SoftwareRelation, HostHardwareRelation, HostNetworkRelation, SnapshotRelation]: - try: - ret = taskclass.objects.get(task_id=task_id) - return ret - except taskclass.DoesNotExist: - pass - from django.core.exceptions import ObjectDoesNotExist - raise ObjectDoesNotExist("Could not find matching TaskRelation instance") - - +# Needs to exist for migrations def get_task_uuid(): - return str(uuid.uuid4()) - - -class TaskRelation(models.Model): - """ - Relates a Job to a TaskConfig. - - superclass that relates a Job to tasks anc maintains information - like status and messages from the lab - """ - - status = models.IntegerField(default=JobStatus.NEW) - job = models.ForeignKey(Job, on_delete=models.CASCADE) - config = models.OneToOneField(TaskConfig, on_delete=models.CASCADE) - task_id = models.CharField(default=get_task_uuid, max_length=37) - lab_token = models.CharField(default="null", max_length=50) - message = models.TextField(default="") - - job_key = None - - def delete(self, *args, **kwargs): - self.config.delete() - return super(self.__class__, self).delete(*args, **kwargs) - - def type_str(self): - return "Generic Task" - - class Meta: - abstract = True - - -class AccessRelation(TaskRelation): - config = models.OneToOneField(AccessConfig, on_delete=models.CASCADE) - job_key = "access" - - def type_str(self): - return "Access Task" - - def delete(self, *args, **kwargs): - self.config.delete() - return super(self.__class__, self).delete(*args, **kwargs) - - -class SoftwareRelation(TaskRelation): - config = models.OneToOneField(SoftwareConfig, on_delete=models.CASCADE) - job_key = "software" - - def type_str(self): - return "Software Configuration Task" - - def delete(self, *args, **kwargs): - self.config.delete() - return super(self.__class__, self).delete(*args, **kwargs) - - -class HostHardwareRelation(TaskRelation): - resource_id = models.CharField(max_length=200, default="default_id") - config = models.OneToOneField(HardwareConfig, on_delete=models.CASCADE) - job_key = "hardware" - - def type_str(self): - return "Hardware Configuration Task" - - def get_delta(self): - return self.config.to_dict() - - def delete(self, *args, **kwargs): - self.config.delete() - return super(self.__class__, self).delete(*args, **kwargs) - - def save(self, *args, **kwargs): - if len(ResourceQuery.filter(labid=self.resource_id)) != 1: - raise ValidationError("resource_id " + str(self.resource_id) + " does not refer to a single resource") - super().save(*args, **kwargs) - - def get_resource(self): - return ResourceQuery.get(labid=self.resource_id) - - -class HostNetworkRelation(TaskRelation): - resource_id = models.CharField(max_length=200, default="default_id") - config = models.OneToOneField(NetworkConfig, on_delete=models.CASCADE) - job_key = "network" - - def type_str(self): - return "Network Configuration Task" - - def delete(self, *args, **kwargs): - self.config.delete() - return super(self.__class__, self).delete(*args, **kwargs) - - def save(self, *args, **kwargs): - if len(ResourceQuery.filter(labid=self.resource_id)) != 1: - raise ValidationError("resource_id " + str(self.resource_id) + " does not refer to a single resource") - super().save(*args, **kwargs) - - def get_resource(self): - return ResourceQuery.get(labid=self.resource_id) - - -class SnapshotRelation(TaskRelation): - snapshot = models.ForeignKey(Image, on_delete=models.CASCADE) - config = models.OneToOneField(SnapshotConfig, on_delete=models.CASCADE) - job_key = "snapshot" - - def type_str(self): - return "Snapshot Task" - - def get_delta(self): - return self.config.to_dict() - - def delete(self, *args, **kwargs): - self.config.delete() - return super(self.__class__, self).delete(*args, **kwargs) - - -class ActiveUsersRelation(TaskRelation): - config = models.OneToOneField(ActiveUsersConfig, on_delete=models.CASCADE) - job_key = "active users task" - - def type_str(self): - return "Active Users Task" - - -class JobFactory(object): - """This class creates all the API models (jobs, tasks, etc) needed to fulfill a booking.""" - - @classmethod - def reimageHost(cls, new_image, booking, host): - """Modify an existing job to reimage the given host.""" - job = Job.objects.get(booking=booking) - # make hardware task new - hardware_relation = HostHardwareRelation.objects.get(resource_id=host, job=job) - hardware_relation.config.image = new_image.lab_id - hardware_relation.config.save() - hardware_relation.status = JobStatus.NEW - - # re-apply networking after host is reset - net_relation = HostNetworkRelation.objects.get(resource_id=host, job=job) - net_relation.status = JobStatus.NEW - - # re-apply ssh access after host is reset - for relation in AccessRelation.objects.filter(job=job, config__access_type="ssh"): - relation.status = JobStatus.NEW - relation.save() - - hardware_relation.save() - net_relation.save() - - @classmethod - def makeSnapshotTask(cls, image, booking, host): - relation = SnapshotRelation() - job = Job.objects.get(booking=booking) - config = SnapshotConfig.objects.create(dashboard_id=image.id) - - relation.job = job - relation.config = config - relation.config.save() - relation.config = relation.config - relation.snapshot = image - relation.save() - - config.clear_delta() - config.set_host(host) - config.save() - - @classmethod - def makeActiveUsersTask(cls): - """ Append active users task to analytics job """ - config = ActiveUsersConfig() - relation = ActiveUsersRelation() - job = Job.objects.get(job_type='DATA') - - job.status = JobStatus.NEW - - relation.job = job - relation.config = config - relation.config.save() - relation.config = relation.config - relation.save() - config.save() - - @classmethod - def makeAnalyticsJob(cls, booking): - """ - Create the analytics job - - This will only run once since there will only be one analytics job. - All analytics tasks get appended to analytics job. - """ - - if len(Job.objects.filter(job_type='DATA')) > 0: - raise Exception("Cannot have more than one analytics job") - - if booking.resource: - raise Exception("Booking is not marker for analytics job, has resoure") - - job = Job() - job.booking = booking - job.job_type = 'DATA' - job.save() - - cls.makeActiveUsersTask() - - @classmethod - def makeCompleteJob(cls, booking): - """Create everything that is needed to fulfill the given booking.""" - resources = booking.resource.get_resources() - job = None - try: - job = Job.objects.get(booking=booking) - except Exception: - job = Job.objects.create(status=JobStatus.NEW, booking=booking) - cls.makeHardwareConfigs( - resources=resources, - job=job - ) - cls.makeNetworkConfigs( - resources=resources, - job=job - ) - cls.makeSoftware( - booking=booking, - job=job - ) - cls.makeGeneratedCloudConfigs( - resources=resources, - job=job - ) - all_users = list(booking.collaborators.all()) - all_users.append(booking.owner) - cls.makeAccessConfig( - users=all_users, - access_type="vpn", - revoke=False, - job=job - ) - for user in all_users: - try: - cls.makeAccessConfig( - users=[user], - access_type="ssh", - revoke=False, - job=job, - context={ - "key": user.userprofile.ssh_public_key.open().read().decode(encoding="UTF-8"), - "hosts": [r.labid for r in resources] - } - ) - except Exception: - continue - - @classmethod - def makeGeneratedCloudConfigs(cls, resources=[], job=Job()): - for res in resources: - cif = GeneratedCloudConfig.objects.create(resource_id=res.labid, booking=job.booking, rconfig=res.config) - cif.save() - - cif = CloudInitFile.create(priority=0, text=cif.serialize()) - cif.save() - - res.config.cloud_init_files.add(cif) - res.config.save() - - @classmethod - def makeHardwareConfigs(cls, resources=[], job=Job()): - """ - Create and save HardwareConfig. - - Helper function to create the tasks related to - configuring the hardware - """ - for res in resources: - hardware_config = None - try: - hardware_config = HardwareConfig.objects.get(relation__resource_id=res.labid) - except Exception: - hardware_config = HardwareConfig() - - relation = HostHardwareRelation() - relation.resource_id = res.labid - relation.job = job - relation.config = hardware_config - relation.config.save() - relation.config = relation.config - relation.save() - - hardware_config.set("id", "image", "hostname", "power", "ipmi_create") - hardware_config.save() - - @classmethod - def makeAccessConfig(cls, users, access_type, revoke=False, job=Job(), context=False): - """ - Create and save AccessConfig. - - Helper function to create the tasks related to - configuring the VPN, SSH, etc access for users - """ - for user in users: - relation = AccessRelation() - relation.job = job - config = AccessConfig() - config.access_type = access_type - config.user = user - config.save() - relation.config = config - relation.save() - config.clear_delta() - if context: - config.set_context(context) - config.set_access_type(access_type) - config.set_revoke(revoke) - config.set_user(user) - config.save() - - @classmethod - def makeNetworkConfigs(cls, resources=[], job=Job()): - """ - Create and save NetworkConfig. - - Helper function to create the tasks related to - configuring the networking - """ - for res in resources: - network_config = None - try: - network_config = NetworkConfig.objects.get(relation__host=res) - except Exception: - network_config = NetworkConfig.objects.create() - - relation = HostNetworkRelation() - relation.resource_id = res.labid - relation.job = job - network_config.save() - relation.config = network_config - relation.save() - network_config.clear_delta() - - # TODO: use get_interfaces() on resource - for interface in res.interfaces.all(): - network_config.add_interface(interface) - network_config.save() - - @classmethod - def make_bridge_config(cls, booking): - if len(booking.resource.get_resources()) < 2: - return None - try: - jumphost_config = ResourceOPNFVConfig.objects.filter( - role__name__iexact="jumphost" - ) - jumphost = ResourceQuery.filter( - bundle=booking.resource, - config=jumphost_config.resource_config - )[0] - except Exception: - return None - br_config = BridgeConfig.objects.create(opnfv_config=booking.opnfv_config) - for iface in jumphost.interfaces.all(): - br_config.interfaces.add(iface) - return br_config - - @classmethod - def makeSoftware(cls, booking=None, job=Job()): - """ - Create and save SoftwareConfig. - - Helper function to create the tasks related to - configuring the desired software, e.g. an OPNFV deployment - """ - if not booking.opnfv_config: - return None - - opnfv_api_config = OpnfvApiConfig.objects.create( - opnfv_config=booking.opnfv_config, - installer=booking.opnfv_config.installer.name, - scenario=booking.opnfv_config.scenario.name, - bridge_config=cls.make_bridge_config(booking) - ) - - opnfv_api_config.set_xdf(booking, False) - opnfv_api_config.save() - - for host in booking.resource.get_resources(): - opnfv_api_config.roles.add(host) - software_config = SoftwareConfig.objects.create(opnfv=opnfv_api_config) - software_relation = SoftwareRelation.objects.create(job=job, config=software_config) - return software_relation - - -JOB_TASK_CLASSLIST = [ - HostHardwareRelation, - AccessRelation, - HostNetworkRelation, - SoftwareRelation, - SnapshotRelation, - ActiveUsersRelation -] - - -class JobTaskQuery(AbstractModelQuery): - model_list = JOB_TASK_CLASSLIST + pass
\ No newline at end of file diff --git a/src/api/serializers/__init__.py b/src/api/serializers/__init__.py deleted file mode 100644 index e0408fa..0000000 --- a/src/api/serializers/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################## -# Copyright (c) 2018 Parker Berberian, Sawyer Bergeron, and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## diff --git a/src/api/serializers/booking_serializer.py b/src/api/serializers/booking_serializer.py deleted file mode 100644 index 993eb22..0000000 --- a/src/api/serializers/booking_serializer.py +++ /dev/null @@ -1,173 +0,0 @@ -############################################################################## -# Copyright (c) 2018 Parker Berberian, Sawyer Bergeron, and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - - -from rest_framework import serializers - -from resource_inventory.models import ( - ResourceConfiguration, - CpuProfile, - DiskProfile, - InterfaceProfile, - RamProfile, - Image, - Interface -) - - -class BookingField(serializers.Field): - - def to_representation(self, booking): - """ - Take in a booking object. - - Returns a dictionary of primitives representing that booking - """ - ser = {} - ser['id'] = booking.id - # main loop to grab relevant info out of booking - host_configs = {} # mapping hostname -> config - networks = {} # mapping vlan id -> network_hosts - for host in booking.resource.hosts.all(): - host_configs[host.name] = ResourceConfiguration.objects.get(host=host.template) - if "jumphost" not in ser and host_configs[host.name].opnfvRole.name.lower() == "jumphost": - ser['jumphost'] = host.name - # host is a Host model - for i in range(len(host.interfaces.all())): - interface = host.interfaces.all()[i] - # interface is an Interface model - for vlan in interface.config.all(): - # vlan is Vlan model - if vlan.id not in networks: - networks[vlan.id] = [] - net_host = {"hostname": host.name, "tagged": vlan.tagged, "interface": i} - networks[vlan.id].append(net_host) - # creates networking object of proper form - networking = [] - for vlanid in networks: - network = {} - network['vlan_id'] = vlanid - network['hosts'] = networks[vlanid] - - ser['networking'] = networking - - # creates hosts object of correct form - hosts = [] - for hostname in host_configs: - host = {"hostname": hostname} - host['deploy_image'] = True # TODO? - image = host_configs[hostname].image - host['image'] = { - "name": image.name, - "lab_id": image.lab_id, - "dashboard_id": image.id - } - hosts.append(host) - - ser['hosts'] = hosts - - return ser - - def to_internal_value(self, data): - """ - Take in a dictionary of primitives, and return a booking object. - - This is not going to be implemented or allowed. - If someone needs to create a booking through the api, - they will send a different booking object - """ - return None - - -class BookingSerializer(serializers.Serializer): - - booking = BookingField() - - -# Host Type stuff, for inventory -class CPUSerializer(serializers.ModelSerializer): - class Meta: - model = CpuProfile - fields = ('cores', 'architecture', 'cpus') - - -class DiskSerializer(serializers.ModelSerializer): - class Meta: - model = DiskProfile - fields = ('size', 'media_type', 'name') - - -class InterfaceProfileSerializer(serializers.ModelSerializer): - class Meta: - model = InterfaceProfile - fields = ('speed', 'name') - - -class RamSerializer(serializers.ModelSerializer): - class Meta: - model = RamProfile - fields = ('amount', 'channels') - - -class HostTypeSerializer(serializers.Serializer): - name = serializers.CharField(max_length=200) - ram = RamSerializer() - interface = InterfaceProfileSerializer() - description = serializers.CharField(max_length=1000) - disks = DiskSerializer() - cpu = CPUSerializer() - - -# the rest of the inventory stuff -class NetworkSerializer(serializers.Serializer): - cidr = serializers.CharField(max_length=200) - gateway = serializers.IPAddressField(max_length=200) - vlan = serializers.IntegerField() - - -class ImageSerializer(serializers.ModelSerializer): - lab_id = serializers.IntegerField() - id = serializers.IntegerField(source="dashboard_id") - name = serializers.CharField(max_length=50) - description = serializers.CharField(max_length=200) - - class Meta: - model = Image - - -class InterfaceField(serializers.Field): - def to_representation(self, interface): - pass - - def to_internal_value(self, data): - """Take in a serialized interface and creates an Interface model.""" - mac = data['mac'] - bus_address = data['busaddr'] - switch_name = data['switchport']['switch_name'] - port_name = data['switchport']['port_name'] - # TODO config?? - return Interface.objects.create( - mac_address=mac, - bus_address=bus_address, - switch_name=switch_name, - port_name=port_name - ) - - -class InventoryHostSerializer(serializers.Serializer): - hostname = serializers.CharField(max_length=100) - host_type = serializers.CharField(max_length=100) - interfaces = InterfaceField() - - -class InventorySerializer(serializers.Serializer): - hosts = InventoryHostSerializer() - networks = NetworkSerializer() - images = ImageSerializer() - host_types = HostTypeSerializer() diff --git a/src/api/serializers/old_serializers.py b/src/api/serializers/old_serializers.py deleted file mode 100644 index 0944881..0000000 --- a/src/api/serializers/old_serializers.py +++ /dev/null @@ -1,21 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Max Breitenfeldt and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - - -from rest_framework import serializers - -from account.models import UserProfile - - -class UserSerializer(serializers.ModelSerializer): - username = serializers.CharField(source='user.username') - - class Meta: - model = UserProfile - fields = ('user', 'username', 'ssh_public_key', 'pgp_public_key', 'email_addr') diff --git a/src/api/tests/__init__.py b/src/api/tests/__init__.py deleted file mode 100644 index 2435a9f..0000000 --- a/src/api/tests/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Parker Berberian and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## diff --git a/src/api/tests/test_models_unittest.py b/src/api/tests/test_models_unittest.py deleted file mode 100644 index 2dee29b..0000000 --- a/src/api/tests/test_models_unittest.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright (c) 2019 Sawyer Bergeron, Parker Berberian, and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -from api.models import ( - Job, - JobStatus, - JobFactory, - HostNetworkRelation, - HostHardwareRelation, - SoftwareRelation, - AccessConfig, - SnapshotRelation -) - -from resource_inventory.models import ( - OPNFVRole, - HostProfile, - ConfigState, -) - -from django.test import TestCase, Client - -from dashboard.testing_utils import ( - make_host, - make_user, - make_user_profile, - make_lab, - make_installer, - make_image, - make_scenario, - make_os, - make_complete_host_profile, - make_booking, -) - - -class ValidBookingCreatesValidJob(TestCase): - @classmethod - def setUpTestData(cls): - cls.user = make_user(False, username="newtestuser", password="testpassword") - cls.userprofile = make_user_profile(cls.user) - cls.lab = make_lab() - - cls.host_profile = make_complete_host_profile(cls.lab) - cls.scenario = make_scenario() - cls.installer = make_installer([cls.scenario]) - os = make_os([cls.installer]) - cls.image = make_image(cls.lab, 1, cls.user, os, cls.host_profile) - for i in range(30): - make_host(cls.host_profile, cls.lab, name="host" + str(i), labid="host" + str(i)) - cls.client = Client() - - def setUp(self): - self.booking, self.compute_hostnames, self.jump_hostname = self.create_multinode_generic_booking() - - def create_multinode_generic_booking(self): - topology = {} - - compute_hostnames = ["cmp01", "cmp02", "cmp03"] - - host_type = HostProfile.objects.first() - - universal_networks = [ - {"name": "public", "tagged": False, "public": True}, - {"name": "admin", "tagged": True, "public": False}] - compute_networks = [{"name": "private", "tagged": True, "public": False}] - jumphost_networks = [{"name": "external", "tagged": True, "public": True}] - - # generate a bunch of extra networks - for i in range(10): - net = {"tagged": False, "public": False} - net["name"] = "net" + str(i) - universal_networks.append(net) - - jumphost_info = { - "type": host_type, - "role": OPNFVRole.objects.get_or_create(name="Jumphost")[0], - "nets": self.make_networks(host_type, jumphost_networks + universal_networks), - "image": self.image - } - topology["jump"] = jumphost_info - - for hostname in compute_hostnames: - host_info = { - "type": host_type, - "role": OPNFVRole.objects.get_or_create(name="Compute")[0], - "nets": self.make_networks(host_type, compute_networks + universal_networks), - "image": self.image - } - topology[hostname] = host_info - - booking = make_booking( - owner=self.user, - lab=self.lab, - topology=topology, - installer=self.installer, - scenario=self.scenario - ) - - if not booking.resource: - raise Exception("Booking does not have a resource when trying to pass to makeCompleteJob") - return booking, compute_hostnames, "jump" - - def make_networks(self, hostprofile, nets): - """ - Distribute nets accross hostprofile's interfaces. - - returns a 2D array - """ - network_struct = [] - count = hostprofile.interfaceprofile.all().count() - for i in range(count): - network_struct.append([]) - while (nets): - index = len(nets) % count - network_struct[index].append(nets.pop()) - - return network_struct - - ################################################################# - # Complete Job Tests - ################################################################# - - def test_complete_job_makes_access_configs(self): - JobFactory.makeCompleteJob(self.booking) - job = Job.objects.get(booking=self.booking) - self.assertIsNotNone(job) - - access_configs = AccessConfig.objects.filter(accessrelation__job=job) - - vpn_configs = access_configs.filter(access_type="vpn") - ssh_configs = access_configs.filter(access_type="ssh") - - self.assertFalse(AccessConfig.objects.exclude(access_type__in=["vpn", "ssh"]).exists()) - - all_users = list(self.booking.collaborators.all()) - all_users.append(self.booking.owner) - - for user in all_users: - self.assertTrue(vpn_configs.filter(user=user).exists()) - self.assertTrue(ssh_configs.filter(user=user).exists()) - - def test_complete_job_makes_network_configs(self): - JobFactory.makeCompleteJob(self.booking) - job = Job.objects.get(booking=self.booking) - self.assertIsNotNone(job) - - booking_hosts = self.booking.resource.hosts.all() - - netrelations = HostNetworkRelation.objects.filter(job=job) - netconfigs = [r.config for r in netrelations] - - netrelation_hosts = [r.host for r in netrelations] - - for config in netconfigs: - for interface in config.interfaces.all(): - self.assertTrue(interface.host in booking_hosts) - - # if no interfaces are referenced that shouldn't have vlans, - # and no vlans exist outside those accounted for in netconfigs, - # then the api is faithfully representing networks - # as netconfigs reference resource_inventory models directly - - # this test relies on the assumption that - # every interface is configured, whether it does or does not have vlans - # if this is not true, the test fails - - for host in booking_hosts: - self.assertTrue(host in netrelation_hosts) - relation = HostNetworkRelation.objects.filter(job=job).get(host=host) - - # do 2 direction matching that interfaces are one to one - config = relation.config - for interface in config.interfaces.all(): - self.assertTrue(interface in host.interfaces) - for interface in host.interfaces.all(): - self.assertTrue(interface in config.interfaces) - - for host in netrelation_hosts: - self.assertTrue(host in booking_hosts) - - def test_complete_job_makes_hardware_configs(self): - JobFactory.makeCompleteJob(self.booking) - job = Job.objects.get(booking=self.booking) - self.assertIsNotNone(job) - - hardware_relations = HostHardwareRelation.objects.filter(job=job) - - job_hosts = [r.host for r in hardware_relations] - - booking_hosts = self.booking.resource.hosts.all() - - self.assertEqual(len(booking_hosts), len(job_hosts)) - - for relation in hardware_relations: - self.assertTrue(relation.host in booking_hosts) - self.assertEqual(relation.status, JobStatus.NEW) - config = relation.config - host = relation.host - self.assertEqual(config.get_delta()["hostname"], host.template.resource.name) - - def test_complete_job_makes_software_configs(self): - JobFactory.makeCompleteJob(self.booking) - job = Job.objects.get(booking=self.booking) - self.assertIsNotNone(job) - - srelation = SoftwareRelation.objects.filter(job=job).first() - self.assertIsNotNone(srelation) - - sconfig = srelation.config - self.assertIsNotNone(sconfig) - - oconfig = sconfig.opnfv - self.assertIsNotNone(oconfig) - - # not onetoone in models, but first() is safe here based on how ConfigBundle and a matching OPNFVConfig are created - # this should, however, be made explicit - self.assertEqual(oconfig.installer, self.booking.config_bundle.opnfv_config.first().installer.name) - self.assertEqual(oconfig.scenario, self.booking.config_bundle.opnfv_config.first().scenario.name) - - for host in oconfig.roles.all(): - role_name = host.config.host_opnfv_config.first().role.name - if str(role_name).lower() == "jumphost": - self.assertEqual(host.template.resource.name, self.jump_hostname) - elif str(role_name).lower() == "compute": - self.assertTrue(host.template.resource.name in self.compute_hostnames) - else: - self.fail(msg="Host with non-configured role name related to job: " + str(role_name)) - - def test_make_snapshot_task(self): - host = self.booking.resource.hosts.first() - image = make_image(self.lab, -1, None, None, host.profile) - - Job.objects.create(booking=self.booking) - - JobFactory.makeSnapshotTask(image, self.booking, host) - - snap_relation = SnapshotRelation.objects.get(job=self.booking.job) - config = snap_relation.config - self.assertEqual(host.id, config.host.id) - self.assertEqual(config.dashboard_id, image.id) - self.assertEqual(snap_relation.snapshot.id, image.id) - - def test_make_hardware_configs(self): - hosts = self.booking.resource.hosts.all() - job = Job.objects.create(booking=self.booking) - JobFactory.makeHardwareConfigs(hosts=hosts, job=job) - - hardware_relations = HostHardwareRelation.objects.filter(job=job) - - self.assertEqual(hardware_relations.count(), hosts.count()) - - host_set = set([h.id for h in hosts]) - - for relation in hardware_relations: - try: - host_set.remove(relation.host.id) - except KeyError: - self.fail("Hardware Relation/Config not created for host " + str(relation.host)) - # TODO: ConfigState needs to be fixed in factory methods - relation.config.state = ConfigState.NEW - self.assertEqual(relation.config.get_delta()["power"], "on") - self.assertTrue(relation.config.get_delta()["ipmi_create"]) - # TODO: the rest of hwconf attrs - - self.assertEqual(len(host_set), 0) diff --git a/src/api/urls.py b/src/api/urls.py index cbb453c..b009aeb 100644 --- a/src/api/urls.py +++ b/src/api/urls.py @@ -31,62 +31,23 @@ from django.urls import path from api.views import ( lab_profile, lab_status, - lab_inventory, lab_downtime, - specific_job, - specific_task, - new_jobs, - current_jobs, - done_jobs, - update_host_bmc, - lab_host, - get_pdf, - get_idf, lab_users, lab_user, GenerateTokenView, - analytics_job, user_bookings, specific_booking, extend_booking, make_booking, list_labs, all_users, - images_for_template, - available_templates, - resource_ci_metadata, - resource_ci_userdata, - resource_ci_userdata_directory, - all_images, - all_opsyss, - single_image, - single_opsys, - create_ci_file, booking_details, ) urlpatterns = [ - path('labs/<slug:lab_name>/opsys/<slug:opsys_id>', single_opsys), - path('labs/<slug:lab_name>/image/<slug:image_id>', single_image), - path('labs/<slug:lab_name>/opsys', all_opsyss), - path('labs/<slug:lab_name>/image', all_images), path('labs/<slug:lab_name>/profile', lab_profile), path('labs/<slug:lab_name>/status', lab_status), - path('labs/<slug:lab_name>/inventory', lab_inventory), path('labs/<slug:lab_name>/downtime', lab_downtime), - path('labs/<slug:lab_name>/hosts/<slug:host_id>', lab_host), - path('labs/<slug:lab_name>/hosts/<slug:host_id>/bmc', update_host_bmc), - path('labs/<slug:lab_name>/booking/<int:booking_id>/pdf', get_pdf, name="get-pdf"), - path('labs/<slug:lab_name>/booking/<int:booking_id>/idf', get_idf, name="get-idf"), - path('labs/<slug:lab_name>/jobs/<int:job_id>', specific_job), - path('labs/<slug:lab_name>/jobs/<int:job_id>/<slug:task_id>', specific_task), - path('labs/<slug:lab_name>/jobs/<int:job_id>/cidata/<slug:resource_id>/user-data', resource_ci_userdata_directory, name="specific-user-data"), - path('labs/<slug:lab_name>/jobs/<int:job_id>/cidata/<slug:resource_id>/meta-data', resource_ci_metadata, name="specific-meta-data"), - path('labs/<slug:lab_name>/jobs/<int:job_id>/cidata/<slug:resource_id>/<int:file_id>/user-data', resource_ci_userdata, name="user-data-dir"), - path('labs/<slug:lab_name>/jobs/new', new_jobs), - path('labs/<slug:lab_name>/jobs/current', current_jobs), - path('labs/<slug:lab_name>/jobs/done', done_jobs), - path('labs/<slug:lab_name>/jobs/getByType/DATA', analytics_job), path('labs/<slug:lab_name>/users', lab_users), path('labs/<slug:lab_name>/users/<int:user_id>', lab_user), @@ -96,11 +57,6 @@ urlpatterns = [ path('booking/makeBooking', make_booking), path('booking/<int:booking_id>/details', booking_details), - path('resource_inventory/availableTemplates', available_templates), - path('resource_inventory/<int:template_id>/images', images_for_template), - - path('resource_inventory/cloud/create', create_ci_file), - path('users', all_users), path('labs', list_labs), diff --git a/src/api/views.py b/src/api/views.py index d5966ed..ea36a6d 100644 --- a/src/api/views.py +++ b/src/api/views.py @@ -10,6 +10,7 @@ import json import math +import os import traceback import sys from datetime import timedelta @@ -21,28 +22,18 @@ from django.utils import timezone from django.views import View from django.http import HttpResponseNotFound from django.http.response import JsonResponse, HttpResponse +import requests from rest_framework import viewsets from rest_framework.authtoken.models import Token from django.views.decorators.csrf import csrf_exempt from django.core.exceptions import ObjectDoesNotExist from django.db.models import Q +from django.contrib.auth.models import User -from api.serializers.booking_serializer import BookingSerializer -from api.serializers.old_serializers import UserSerializer from api.forms import DowntimeForm from account.models import UserProfile, Lab from booking.models import Booking -from booking.quick_deployer import create_from_API -from api.models import LabManagerTracker, get_task, Job, AutomationAPIManager, APILog, GeneratedCloudConfig -from notifier.manager import NotificationHandler -from analytics.models import ActiveVPNUser -from resource_inventory.models import ( - Image, - Opsys, - CloudInitFile, - ResourceQuery, - ResourceTemplate, -) +from api.models import LabManagerTracker,AutomationAPIManager, APILog import yaml import uuid @@ -61,17 +52,6 @@ the correct thing will happen """ -class BookingViewSet(viewsets.ModelViewSet): - queryset = Booking.objects.all() - serializer_class = BookingSerializer - filter_fields = ('resource', 'id') - - -class UserViewSet(viewsets.ModelViewSet): - queryset = UserProfile.objects.all() - serializer_class = UserSerializer - - @method_decorator(login_required, name='dispatch') class GenerateTokenView(View): def get(self, request, *args, **kwargs): @@ -83,111 +63,6 @@ class GenerateTokenView(View): return redirect('account:settings') -def lab_inventory(request, lab_name=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - return JsonResponse(lab_manager.get_inventory(), safe=False) - - -@csrf_exempt -def lab_host(request, lab_name="", host_id=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - if request.method == "GET": - return JsonResponse(lab_manager.get_host(host_id), safe=False) - if request.method == "POST": - return JsonResponse(lab_manager.update_host(host_id, request.POST), safe=False) - -# API extension for Cobbler integration - - -def all_images(request, lab_name=""): - a = [] - for i in Image.objects.all(): - a.append(i.serialize()) - return JsonResponse(a, safe=False) - - -def all_opsyss(request, lab_name=""): - a = [] - for opsys in Opsys.objects.all(): - a.append(opsys.serialize()) - - return JsonResponse(a, safe=False) - - -@csrf_exempt -def single_image(request, lab_name="", image_id=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - img = lab_manager.get_image(image_id).first() - - if request.method == "GET": - if not img: - return HttpResponse(status=404) - return JsonResponse(img.serialize(), safe=False) - - if request.method == "POST": - # get POST data - data = json.loads(request.body.decode('utf-8')) - if img: - img.update(data) - else: - # append lab name and the ID from the URL - data['from_lab_id'] = lab_name - data['lab_id'] = image_id - - # create and save a new Image object - img = Image.new_from_data(data) - - img.save() - - # indicate success in response - return HttpResponse(status=200) - return HttpResponse(status=405) - - -@csrf_exempt -def single_opsys(request, lab_name="", opsys_id=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - opsys = lab_manager.get_opsys(opsys_id).first() - - if request.method == "GET": - if not opsys: - return HttpResponse(status=404) - return JsonResponse(opsys.serialize(), safe=False) - - if request.method == "POST": - data = json.loads(request.body.decode('utf-8')) - if opsys: - opsys.update(data) - else: - # only name, available, and obsolete are needed to create an Opsys - # other fields are derived from the URL parameters - data['from_lab_id'] = lab_name - data['lab_id'] = opsys_id - opsys = Opsys.new_from_data(data) - - opsys.save() - return HttpResponse(status=200) - return HttpResponse(status=405) - -# end API extension - - -def get_pdf(request, lab_name="", booking_id=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - return HttpResponse(lab_manager.get_pdf(booking_id), content_type="text/plain") - - -def get_idf(request, lab_name="", booking_id=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - return HttpResponse(lab_manager.get_idf(booking_id), content_type="text/plain") - - def lab_status(request, lab_name=""): lab_token = request.META.get('HTTP_AUTH_TOKEN') lab_manager = LabManagerTracker.get(lab_name, lab_token) @@ -208,171 +83,12 @@ def lab_user(request, lab_name="", user_id=-1): return HttpResponse(lab_manager.get_user(user_id), content_type="text/plain") -@csrf_exempt -def update_host_bmc(request, lab_name="", host_id=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - if request.method == "POST": - # update / create RemoteInfo for host - return JsonResponse( - lab_manager.update_host_remote_info(request.POST, host_id), - safe=False - ) - - def lab_profile(request, lab_name=""): lab_token = request.META.get('HTTP_AUTH_TOKEN') lab_manager = LabManagerTracker.get(lab_name, lab_token) return JsonResponse(lab_manager.get_profile(), safe=False) -@csrf_exempt -def specific_task(request, lab_name="", job_id="", task_id=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - LabManagerTracker.get(lab_name, lab_token) # Authorize caller, but we dont need the result - - if request.method == "POST": - task = get_task(task_id) - if 'status' in request.POST: - task.status = request.POST.get('status') - if 'message' in request.POST: - task.message = request.POST.get('message') - if 'lab_token' in request.POST: - task.lab_token = request.POST.get('lab_token') - task.save() - NotificationHandler.task_updated(task) - d = {} - d['task'] = task.config.get_delta() - m = {} - m['status'] = task.status - m['job'] = str(task.job) - m['message'] = task.message - d['meta'] = m - return JsonResponse(d, safe=False) - elif request.method == "GET": - return JsonResponse(get_task(task_id).config.get_delta()) - - -@csrf_exempt -def specific_job(request, lab_name="", job_id=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - if request.method == "POST": - return JsonResponse(lab_manager.update_job(job_id, request.POST), safe=False) - return JsonResponse(lab_manager.get_job(job_id), safe=False) - - -@csrf_exempt -def resource_ci_userdata(request, lab_name="", job_id="", resource_id="", file_id=0): - # lab_token = request.META.get('HTTP_AUTH_TOKEN') - # lab_manager = LabManagerTracker.get(lab_name, lab_token) - - # job = lab_manager.get_job(job_id) - Job.objects.get(id=job_id) # verify a valid job was given, even if we don't use it - - cifile = None - try: - cifile = CloudInitFile.objects.get(id=file_id) - except ObjectDoesNotExist: - return HttpResponseNotFound("Could not find a matching resource by id " + str(resource_id)) - - text = cifile.text - - prepended_text = "#cloud-config\n" - # mstrat = CloudInitFile.merge_strategy() - # prepended_text = prepended_text + yaml.dump({"merge_strategy": mstrat}) + "\n" - # print("in cloudinitfile create") - text = prepended_text + text - cloud_dict = { - "datasource": { - "None": { - "metadata": { - "instance-id": str(uuid.uuid4()) - }, - "userdata_raw": text, - }, - }, - "datasource_list": ["None"], - } - - return HttpResponse(yaml.dump(cloud_dict, width=float("inf")), status=200) - - -@csrf_exempt -def resource_ci_metadata(request, lab_name="", job_id="", resource_id="", file_id=0): - return HttpResponse("#cloud-config", status=200) - - -@csrf_exempt -def resource_ci_userdata_directory(request, lab_name="", job_id="", resource_id=""): - # files = [{"id": file.file_id, "priority": file.priority} for file in CloudInitFile.objects.filter(job__id=job_id, resource_id=resource_id).order_by("priority").all()] - resource = ResourceQuery.get(labid=resource_id, lab=Lab.objects.get(name=lab_name)) - files = resource.config.cloud_init_files - files = [{"id": file.id, "priority": file.priority} for file in files.order_by("priority").all()] - - d = {} - - merge_failures = [] - - merger = Merger( - [ - (list, ["append"]), - (dict, ["merge"]), - ], - ["override"], # fallback - ["override"], # if types conflict (shouldn't happen in CI, but handle case) - ) - - for f in resource.config.cloud_init_files.order_by("priority").all(): - try: - other_dict = yaml.safe_load(f.text) - if not (type(d) is dict): - raise Exception("CI file was valid yaml but was not a dict") - - merger.merge(d, other_dict) - except Exception as e: - # if fail to merge, then just skip - print("Failed to merge file in, as it had invalid content:", f.id) - print("File text was:") - print(f.text) - merge_failures.append({f.id: str(e)}) - - if len(merge_failures) > 0: - d['merge_failures'] = merge_failures - - file = CloudInitFile.create(text=yaml.dump(d, width=float("inf")), priority=0) - - return HttpResponse(json.dumps([{"id": file.id, "priority": file.priority}]), status=200) - - -def new_jobs(request, lab_name=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - return JsonResponse(lab_manager.get_new_jobs(), safe=False) - - -def current_jobs(request, lab_name=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - return JsonResponse(lab_manager.get_current_jobs(), safe=False) - - -@csrf_exempt -def analytics_job(request, lab_name=""): - """ returns all jobs with type booking""" - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - if request.method == "GET": - return JsonResponse(lab_manager.get_analytics_job(), safe=False) - if request.method == "POST": - users = json.loads(request.body.decode('utf-8'))['active_users'] - try: - ActiveVPNUser.create(lab_name, users) - except ObjectDoesNotExist: - return JsonResponse('Lab does not exist!', safe=False) - return HttpResponse(status=200) - return HttpResponse(status=405) - def lab_downtime(request, lab_name=""): lab_token = request.META.get('HTTP_AUTH_TOKEN') @@ -408,12 +124,6 @@ def delete_lab_downtime(lab_manager): return JsonResponse({"error": "Lab is not in downtime"}, status=422) -def done_jobs(request, lab_name=""): - lab_token = request.META.get('HTTP_AUTH_TOKEN') - lab_manager = LabManagerTracker.get(lab_name, lab_token) - return JsonResponse(lab_manager.get_done_jobs(), safe=False) - - def auth_and_log(request, endpoint): """ Function to authenticate an API user and log info @@ -471,37 +181,41 @@ Booking API Views def user_bookings(request): - token = auth_and_log(request, 'booking') + # token = auth_and_log(request, 'booking') - if isinstance(token, HttpResponse): - return token + # if isinstance(token, HttpResponse): + # return token - bookings = Booking.objects.filter(owner=token.user, end__gte=timezone.now()) - output = [AutomationAPIManager.serialize_booking(booking) - for booking in bookings] - return JsonResponse(output, safe=False) + # bookings = Booking.objects.filter(owner=token.user, end__gte=timezone.now()) + # output = [AutomationAPIManager.serialize_booking(booking) + # for booking in bookings] + # return JsonResponse(output, safe=False) + # todo - LL Integration + return HttpResponse(status=404) @csrf_exempt def specific_booking(request, booking_id=""): - token = auth_and_log(request, 'booking/{}'.format(booking_id)) + # token = auth_and_log(request, 'booking/{}'.format(booking_id)) - if isinstance(token, HttpResponse): - return token + # if isinstance(token, HttpResponse): + # return token - booking = get_object_or_404(Booking, pk=booking_id, owner=token.user) - if request.method == "GET": - sbooking = AutomationAPIManager.serialize_booking(booking) - return JsonResponse(sbooking, safe=False) + # booking = get_object_or_404(Booking, pk=booking_id, owner=token.user) + # if request.method == "GET": + # sbooking = AutomationAPIManager.serialize_booking(booking) + # return JsonResponse(sbooking, safe=False) - if request.method == "DELETE": + # if request.method == "DELETE": - if booking.end < timezone.now(): - return HttpResponse("Booking already over", status=400) + # if booking.end < timezone.now(): + # return HttpResponse("Booking already over", status=400) - booking.end = timezone.now() - booking.save() - return HttpResponse("Booking successfully cancelled") + # booking.end = timezone.now() + # booking.save() + # return HttpResponse("Booking successfully cancelled") + # todo - LL Integration + return HttpResponse(status=404) @csrf_exempt @@ -531,70 +245,82 @@ def extend_booking(request, booking_id="", days=""): @csrf_exempt def make_booking(request): - token = auth_and_log(request, 'booking/makeBooking') - - if isinstance(token, HttpResponse): - return token - + print("received call to make_booking") + data = json.loads(request.body) + print("incoming data is ", data) + + allowed_users = list(data["allowed_users"]) + allowed_users.append(str(request.user)) + + bookingBlob = { + "template_id": data["template_id"], + "allowed_users": allowed_users, + "global_cifile": data["global_cifile"], + "metadata": { + "booking_id": None, # fill in after creating django object + "owner": str(request.user), + "lab": "UNH_IOL", + "purpose": data["metadata"]["purpose"], + "project": data["metadata"]["project"], + "length": data["metadata"]["length"] + } + } + + print("allowed users are ", bookingBlob["allowed_users"]) try: - booking = create_from_API(request.body, token.user) + booking = Booking.objects.create( + purpose=bookingBlob["metadata"]["purpose"], + project=bookingBlob["metadata"]['project'], + lab=Lab.objects.get(name='UNH_IOL'), + owner=request.user, + start=timezone.now(), + end=timezone.now() + timedelta(days=int(bookingBlob["metadata"]['length'])), + ) + print("successfully created booking object with id ", booking.id) + + # Now add collabs + for c in bookingBlob["allowed_users"]: + if c != bookingBlob["metadata"]["owner"]: # Don't add self as a collab + booking.collaborators.add(User.objects.get(username=c)) + print("successfully added collabs") + + # Now create it in liblaas + bookingBlob["metadata"]["booking_id"] = str(booking.id) + liblaas_endpoint = os.environ.get("LIBLAAS_BASE_URL") + 'booking/create' + liblaas_response = requests.post(liblaas_endpoint, data=json.dumps(bookingBlob), headers={'Content-Type': 'application/json'}) + if liblaas_response.status_code != 200: + print("received non success from liblaas") + return JsonResponse( + data={}, + status=500, + safe=False + ) + aggregateId = json.loads(liblaas_response.content) + print("successfully created aggregate in liblaas") - except Exception: - finalTrace = '' - exc_type, exc_value, exc_traceback = sys.exc_info() - for i in traceback.format_exception(exc_type, exc_value, exc_traceback): - finalTrace += '<br>' + i.strip() - return HttpResponse(finalTrace, status=400) + # Now update the agg_id + booking.aggregateId = aggregateId + booking.save() + print("sucessfully updated aggreagateId in booking object") - sbooking = AutomationAPIManager.serialize_booking(booking) - return JsonResponse(sbooking, safe=False) + return JsonResponse( + data = {"bookingId": booking.id}, + status=200, + safe=False + ) + except Exception as error: + print(error) + return JsonResponse( + data={}, + status=500, + safe=False + ) """ Resource Inventory API Views """ - - -def available_templates(request): - token = auth_and_log(request, 'resource_inventory/availableTemplates') - - if isinstance(token, HttpResponse): - return token - - # get available templates - # mirrors MultipleSelectFilter Widget - avt = [] - for lab in Lab.objects.all(): - for template in ResourceTemplate.objects.filter(Q(owner=token.user) | Q(public=True), lab=lab, temporary=False): - available_resources = lab.get_available_resources() - required_resources = template.get_required_resources() - least_available = 100 - - for resource, count_required in required_resources.items(): - try: - curr_count = math.floor(available_resources[str(resource)] / count_required) - if curr_count < least_available: - least_available = curr_count - except KeyError: - least_available = 0 - - if least_available > 0: - avt.append((template, least_available)) - - savt = [AutomationAPIManager.serialize_template(temp) - for temp in avt] - - return JsonResponse(savt, safe=False) - - -def images_for_template(request, template_id=""): - _ = auth_and_log(request, 'resource_inventory/{}/images'.format(template_id)) - - template = get_object_or_404(ResourceTemplate, pk=template_id) - images = [AutomationAPIManager.serialize_image(config.image) - for config in template.getConfigs()] - return JsonResponse(images, safe=False) - +# todo - LL Integration """ User API Views @@ -613,25 +339,6 @@ def all_users(request): return JsonResponse(users, safe=False) -def create_ci_file(request): - token = auth_and_log(request, 'booking/makeCloudConfig') - - if isinstance(token, HttpResponse): - return token - - try: - cconf = request.body - d = yaml.load(cconf) - if not (type(d) is dict): - raise Exception() - - cconf = CloudInitFile.create(text=cconf, priority=CloudInitFile.objects.count()) - - return JsonResponse({"id": cconf.id}) - except Exception: - return JsonResponse({"error": "Provided config file was not valid yaml or was not a dict at the top level"}) - - """ Lab API Views """ @@ -662,95 +369,188 @@ Booking Details API Views def booking_details(request, booking_id=""): - token = auth_and_log(request, 'booking/{}/details'.format(booking_id)) - - if isinstance(token, HttpResponse): - return token - - booking = get_object_or_404(Booking, pk=booking_id, owner=token.user) + # token = auth_and_log(request, 'booking/{}/details'.format(booking_id)) + + # if isinstance(token, HttpResponse): + # return token + + # booking = get_object_or_404(Booking, pk=booking_id, owner=token.user) + + # # overview + # overview = { + # 'username': GeneratedCloudConfig._normalize_username(None, str(token.user)), + # 'purpose': booking.purpose, + # 'project': booking.project, + # 'start_time': booking.start, + # 'end_time': booking.end, + # 'pod_definitions': booking.resource.template, + # 'lab': booking.lab + # } + + # # deployment progress + # task_list = [] + # for task in booking.job.get_tasklist(): + # task_info = { + # 'name': str(task), + # 'status': 'DONE', + # 'lab_response': 'No response provided (yet)' + # } + # if task.status < 100: + # task_info['status'] = 'PENDING' + # elif task.status < 200: + # task_info['status'] = 'IN PROGRESS' + + # if task.message: + # if task.type_str == "Access Task" and request.user.id != task.config.user.id: + # task_info['lab_response'] = '--secret--' + # else: + # task_info['lab_response'] = str(task.message) + # task_list.append(task_info) + + # # pods + # pod_list = [] + # for host in booking.resource.get_resources(): + # pod_info = { + # 'hostname': host.config.name, + # 'machine': host.name, + # 'role': '', + # 'is_headnode': host.config.is_head_node, + # 'image': host.config.image, + # 'ram': {'amount': str(host.profile.ramprofile.first().amount) + 'G', 'channels': host.profile.ramprofile.first().channels}, + # 'cpu': {'arch': host.profile.cpuprofile.first().architecture, 'cores': host.profile.cpuprofile.first().cores, 'sockets': host.profile.cpuprofile.first().cpus}, + # 'disk': {'size': str(host.profile.storageprofile.first().size) + 'GiB', 'type': host.profile.storageprofile.first().media_type, 'mount_point': host.profile.storageprofile.first().name}, + # 'interfaces': [], + # } + # try: + # pod_info['role'] = host.template.opnfvRole + # except Exception: + # pass + # for intprof in host.profile.interfaceprofile.all(): + # int_info = { + # 'name': intprof.name, + # 'speed': intprof.speed + # } + # pod_info['interfaces'].append(int_info) + # pod_list.append(pod_info) + + # # diagnostic info + # diagnostic_info = { + # 'job_id': booking.job.id, + # 'ci_files': '', + # 'pods': [] + # } + # for host in booking.resource.get_resources(): + # pod = { + # 'host': host.name, + # 'configs': [], + + # } + # for ci_file in host.config.cloud_init_files.all(): + # ci_info = { + # 'id': ci_file.id, + # 'text': ci_file.text + # } + # pod['configs'].append(ci_info) + # diagnostic_info['pods'].append(pod) + + # details = { + # 'overview': overview, + # 'deployment_progress': task_list, + # 'pods': pod_list, + # 'diagnostic_info': diagnostic_info, + # 'pdf': booking.pdf + # } + # return JsonResponse(str(details), safe=False) + # todo - LL Integration + return HttpResponse(status=404) + + +""" Forwards a request to the LibLaaS API from a workflow """ +def liblaas_request(request) -> JsonResponse: + print("handing liblaas request... ", request.method) + print(request.body) + if request.method != 'POST': + return JsonResponse({"error" : "405 Method not allowed"}) + + liblaas_base_url = os.environ.get("LIBLAAS_BASE_URL") + post_data = json.loads(request.body) + print("post data is " + str(post_data)) + http_method = post_data["method"] + liblaas_endpoint = post_data["endpoint"] + payload = post_data["workflow_data"] + # Fill in actual username + liblaas_endpoint = liblaas_endpoint.replace("[username]", str(request.user)) + liblaas_endpoint = liblaas_base_url + liblaas_endpoint + print("processed endpoint is ", liblaas_endpoint) + + if (http_method == "GET"): + response = requests.get(liblaas_endpoint, data=json.dumps(payload)) + elif (http_method == "POST"): + response = requests.post(liblaas_endpoint, data=json.dumps(payload), headers={'Content-Type': 'application/json'}) + elif (http_method == "DELETE"): + response = requests.delete(liblaas_endpoint, data=json.dumps(payload)) + elif (http_method == "PUT"): + response = requests.put(liblaas_endpoint, data=json.dumps(payload)) + else: + return JsonResponse( + data={}, + status=405, + safe=False + ) + try: + return JsonResponse( + data=json.loads(response.content.decode('utf8')), + status=200, + safe=False + ) + except Exception as e: + print("fail") + print(e) + return JsonResponse( + data = {}, + status=500, + safe=False + ) - # overview - overview = { - 'username': GeneratedCloudConfig._normalize_username(None, str(token.user)), - 'purpose': booking.purpose, - 'project': booking.project, - 'start_time': booking.start, - 'end_time': booking.end, - 'pod_definitions': booking.resource.template, - 'lab': booking.lab - } +def liblaas_templates(request): + liblaas_url = os.environ.get("LIBLAAS_BASE_URL") + "template/list/" + str(request.user) + print("api call to " + liblaas_url) + return requests.get(liblaas_url) - # deployment progress - task_list = [] - for task in booking.job.get_tasklist(): - task_info = { - 'name': str(task), - 'status': 'DONE', - 'lab_response': 'No response provided (yet)' - } - if task.status < 100: - task_info['status'] = 'PENDING' - elif task.status < 200: - task_info['status'] = 'IN PROGRESS' - - if task.message: - if task.type_str == "Access Task" and request.user.id != task.config.user.id: - task_info['lab_response'] = '--secret--' - else: - task_info['lab_response'] = str(task.message) - task_list.append(task_info) - - # pods - pod_list = [] - for host in booking.resource.get_resources(): - pod_info = { - 'hostname': host.config.name, - 'machine': host.name, - 'role': '', - 'is_headnode': host.config.is_head_node, - 'image': host.config.image, - 'ram': {'amount': str(host.profile.ramprofile.first().amount) + 'G', 'channels': host.profile.ramprofile.first().channels}, - 'cpu': {'arch': host.profile.cpuprofile.first().architecture, 'cores': host.profile.cpuprofile.first().cores, 'sockets': host.profile.cpuprofile.first().cpus}, - 'disk': {'size': str(host.profile.storageprofile.first().size) + 'GiB', 'type': host.profile.storageprofile.first().media_type, 'mount_point': host.profile.storageprofile.first().name}, - 'interfaces': [], - } - try: - pod_info['role'] = host.template.opnfvRole - except Exception: - pass - for intprof in host.profile.interfaceprofile.all(): - int_info = { - 'name': intprof.name, - 'speed': intprof.speed - } - pod_info['interfaces'].append(int_info) - pod_list.append(pod_info) - - # diagnostic info - diagnostic_info = { - 'job_id': booking.job.id, - 'ci_files': '', - 'pods': [] - } - for host in booking.resource.get_resources(): - pod = { - 'host': host.name, - 'configs': [], +def delete_template(request): + endpoint = json.loads(request.body)["endpoint"] + liblaas_url = os.environ.get("LIBLAAS_BASE_URL") + endpoint + print("api call to ", liblaas_url) + try: + response = requests.delete(liblaas_url) + return JsonResponse( + data={}, + status=response.status_code, + safe=False + ) + except: + return JsonResponse( + data={}, + status=500, + safe=False + ) - } - for ci_file in host.config.cloud_init_files.all(): - ci_info = { - 'id': ci_file.id, - 'text': ci_file.text - } - pod['configs'].append(ci_info) - diagnostic_info['pods'].append(pod) - - details = { - 'overview': overview, - 'deployment_progress': task_list, - 'pods': pod_list, - 'diagnostic_info': diagnostic_info, - 'pdf': booking.pdf - } - return JsonResponse(str(details), safe=False) +def get_booking_status(bookingObject): + liblaas_url = os.environ.get("LIBLAAS_BASE_URL") + "booking/" + bookingObject.aggregateId + "/status" + print("Getting booking status at: ", liblaas_url) + response = requests.get(liblaas_url) + try: + return json.loads(response.content) + except: + print("failed to get status") + return [] + +def liblaas_end_booking(aggregateId): + liblaas_url = os.environ.get('LIBLAAS_BASE_URL') + "booking/" + str(aggregateId) + "/end" + print("Ending booking at ", liblaas_url) + response = requests.delete(liblaas_url) + try: + return response + except: + print("failed to end booking") + return HttpResponse(status=500)
\ No newline at end of file |