summaryrefslogtreecommitdiffstats
path: root/kernel/arch/powerpc/platforms/embedded6xx/gamecube.c
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/arch/powerpc/platforms/embedded6xx/gamecube.c
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/arch/powerpc/platforms/embedded6xx/gamecube.c')
-rw-r--r--kernel/arch/powerpc/platforms/embedded6xx/gamecube.c108
1 files changed, 108 insertions, 0 deletions
diff --git a/kernel/arch/powerpc/platforms/embedded6xx/gamecube.c b/kernel/arch/powerpc/platforms/embedded6xx/gamecube.c
new file mode 100644
index 000000000..fe0ed6ee2
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -0,0 +1,108 @@
+/*
+ * arch/powerpc/platforms/embedded6xx/gamecube.c
+ *
+ * Nintendo GameCube board-specific support
+ * Copyright (C) 2004-2009 The GameCube Linux Team
+ * Copyright (C) 2007,2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kexec.h>
+#include <linux/seq_file.h>
+#include <linux/of_platform.h>
+
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/udbg.h>
+
+#include "flipper-pic.h"
+#include "usbgecko_udbg.h"
+
+
+static void gamecube_spin(void)
+{
+ /* spin until power button pressed */
+ for (;;)
+ cpu_relax();
+}
+
+static void gamecube_restart(char *cmd)
+{
+ local_irq_disable();
+ flipper_platform_reset();
+ gamecube_spin();
+}
+
+static void gamecube_power_off(void)
+{
+ local_irq_disable();
+ gamecube_spin();
+}
+
+static void gamecube_halt(void)
+{
+ gamecube_restart(NULL);
+}
+
+static void __init gamecube_init_early(void)
+{
+ ug_udbg_init();
+}
+
+static int __init gamecube_probe(void)
+{
+ unsigned long dt_root;
+
+ dt_root = of_get_flat_dt_root();
+ if (!of_flat_dt_is_compatible(dt_root, "nintendo,gamecube"))
+ return 0;
+
+ pm_power_off = gamecube_power_off;
+
+ return 1;
+}
+
+static void gamecube_shutdown(void)
+{
+ flipper_quiesce();
+}
+
+define_machine(gamecube) {
+ .name = "gamecube",
+ .probe = gamecube_probe,
+ .init_early = gamecube_init_early,
+ .restart = gamecube_restart,
+ .halt = gamecube_halt,
+ .init_IRQ = flipper_pic_probe,
+ .get_irq = flipper_pic_get_irq,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+ .machine_shutdown = gamecube_shutdown,
+};
+
+
+static const struct of_device_id gamecube_of_bus[] = {
+ { .compatible = "nintendo,flipper", },
+ { },
+};
+
+static int __init gamecube_device_probe(void)
+{
+ if (!machine_is(gamecube))
+ return 0;
+
+ of_platform_bus_probe(NULL, gamecube_of_bus, NULL);
+ return 0;
+}
+device_initcall(gamecube_device_probe);
+
id='n366' href='#n366'>366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
{%- set primary_role = [roles[0]] -%}
{%- for role in roles -%}
  {%- if 'primary' in role.tags and 'controller' in role.tags -%}
    {%- set _ = primary_role.pop() -%}
    {%- set _ = primary_role.append(role) -%}
  {%- endif -%}
{%- endfor -%}
{%- set primary_role_name = primary_role[0].name -%}
# primary role is: {{primary_role_name}}
heat_template_version: pike

description: >
  Deploy an OpenStack environment, consisting of several node types (roles),
  Controller, Compute, BlockStorage, SwiftStorage and CephStorage. The Storage
  roles enable independent scaling of the storage components, but the minimal
  deployment is one Controller and one Compute node.


# TODO(shadower): we should probably use the parameter groups to put
# some order in here.
parameters:

  # Common parameters (not specific to a role)
  CloudName:
    default: overcloud.localdomain
    description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
    type: string
  CloudNameInternal:
    default: overcloud.internalapi.localdomain
    description: >
      The DNS name of this cloud's internal API endpoint. E.g.
      'ci-overcloud.internalapi.tripleo.org'.
    type: string
  CloudNameStorage:
    default: overcloud.storage.localdomain
    description: >
      The DNS name of this cloud's storage endpoint. E.g.
      'ci-overcloud.storage.tripleo.org'.
    type: string
  CloudNameStorageManagement:
    default: overcloud.storagemgmt.localdomain
    description: >
      The DNS name of this cloud's storage management endpoint. E.g.
      'ci-overcloud.storagemgmt.tripleo.org'.
    type: string
  CloudNameCtlplane:
    default: overcloud.ctlplane.localdomain
    description: >
      The DNS name of this cloud's storage management endpoint. E.g.
      'ci-overcloud.management.tripleo.org'.
    type: string
  ControlFixedIPs:
    default: []
    description: >
        Control the IP allocation for the ControlVirtualIP port. E.g.
        [{'ip_address':'1.2.3.4'}]
    type: json
  InternalApiVirtualFixedIPs:
    default: []
    description: >
        Control the IP allocation for the InternalApiVirtualInterface port. E.g.
        [{'ip_address':'1.2.3.4'}]
    type: json
  NeutronControlPlaneID:
    default: 'ctlplane'
    type: string
    description: Neutron ID or name for ctlplane network.
  NeutronPublicInterface:
    default: nic1
    description: What interface to bridge onto br-ex for network nodes.
    type: string
  PublicVirtualFixedIPs:
    default: []
    description: >
        Control the IP allocation for the PublicVirtualInterface port. E.g.
        [{'ip_address':'1.2.3.4'}]
    type: json
  RabbitCookieSalt:
    type: string
    default: unset
    description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
  StorageVirtualFixedIPs:
    default: []
    description: >
        Control the IP allocation for the StorageVirtualInterface port. E.g.
        [{'ip_address':'1.2.3.4'}]
    type: json
  StorageMgmtVirtualFixedIPs:
    default: []
    description: >
        Control the IP allocation for the StorageMgmgVirtualInterface port. E.g.
        [{'ip_address':'1.2.3.4'}]
    type: json
  RedisVirtualFixedIPs:
    default: []
    description: >
        Control the IP allocation for the virtual IP used by Redis. E.g.
        [{'ip_address':'1.2.3.4'}]
    type: json
  CloudDomain:
    default: 'localdomain'
    type: string
    description: >
      The DNS domain used for the hosts. This should match the dhcp_domain
      configured in the Undercloud neutron. Defaults to localdomain.
  ServerMetadata:
    default: {}
    description: >
      Extra properties or metadata passed to Nova for the created nodes in
      the overcloud. It's accessible via the Nova metadata API.
    type: json

# Compute-specific params
# FIXME(shardy) handle these deprecated names as they don't match compute.yaml
  HypervisorNeutronPhysicalBridge:
    default: 'br-ex'
    description: >
      An OVS bridge to create on each hypervisor. This defaults to br-ex the
      same as the control plane nodes, as we have a uniform configuration of
      the openvswitch agent. Typically should not need to be changed.
    type: string
  HypervisorNeutronPublicInterface:
    default: nic1
    description: What interface to add to the HypervisorNeutronPhysicalBridge.
    type: string

  NodeCreateBatchSize:
    default: 30
    description: Maxiumum batch size for creating nodes
    type: number

  # Jinja loop for Role in role_data.yaml
{% for role in roles %}
  # Parameters generated for {{role.name}} Role
  {{role.name}}Services:
    description: A list of service resources (configured in the Heat
                 resource_registry) which represent nested stacks
                 for each service that should get installed on the {{role.name}} role.
    type: comma_delimited_list

  {{role.name}}Count:
    description: Number of {{role.name}} nodes to deploy
    type: number
    default: {{role.CountDefault|default(0)}}

  {{role.name}}HostnameFormat:
    type: string
    description: >
      Format for {{role.name}} node hostnames
      Note %index% is translated into the index of the node, e.g 0/1/2 etc
      and %stackname% is replaced with the stack name e.g overcloud
  {% if role.HostnameFormatDefault %}
    default: "{{role.HostnameFormatDefault}}"
  {% else %}
    default: "%stackname%-{{role.name.lower()}}-%index%"
  {% endif %}

  {{role.name}}RemovalPolicies:
    default: []
    type: json
    description: >
      List of resources to be removed from {{role.name}} ResourceGroup when
      doing an update which requires removal of specific resources.
      Example format ComputeRemovalPolicies: [{'resource_list': ['0']}]

{% if role.name != 'Compute' %}
  {{role.name}}SchedulerHints:
{% else %}
  NovaComputeSchedulerHints:
{% endif %}
    type: json
    description: Optional scheduler hints to pass to nova
    default: {}

  {{role.name}}Parameters:
    type: json
    description: Optional Role Specific parameters to be provided to service
    default: {}
{% endfor %}

  # Identifiers to trigger tasks on nodes
  UpdateIdentifier:
    default: ''
    type: string
    description: >
      Setting to a previously unused value during stack-update will trigger
      package update on all nodes
  DeployIdentifier:
    default: ''
    type: string
    description: >
      Setting this to a unique value will re-run any deployment tasks which
      perform configuration on a Heat stack-update.
  AddVipsToEtcHosts:
    default: True
    type: boolean
    description: >
      Set to true to append per network Vips to /etc/hosts on each node.

conditions:
  add_vips_to_etc_hosts: {equals : [{get_param: AddVipsToEtcHosts}, True]}

resources:

  VipHosts:
    type: OS::Heat::Value
    properties:
      type: string
      value:
        list_join:
        - "\n"
        - - str_replace:
              template: IP  HOST
              params:
                IP: {get_attr: [VipMap, net_ip_map, external]}
                HOST: {get_param: CloudName}
          - str_replace:
              template: IP  HOST
              params:
                IP: {get_attr: [VipMap, net_ip_map, ctlplane]}
                HOST: {get_param: CloudNameCtlplane}
          - str_replace:
              template: IP  HOST
              params:
                IP: {get_attr: [VipMap, net_ip_map, internal_api]}
                HOST: {get_param: CloudNameInternal}
          - str_replace:
              template: IP  HOST
              params:
                IP: {get_attr: [VipMap, net_ip_map, storage]}
                HOST: {get_param: CloudNameStorage}
          - str_replace:
              template: IP  HOST
              params:
                IP: {get_attr: [VipMap, net_ip_map, storage_mgmt]}
                HOST: {get_param: CloudNameStorageManagement}

  HeatAuthEncryptionKey:
    type: OS::Heat::RandomString

  PcsdPassword:
    type: OS::Heat::RandomString
    properties:
      length: 16

  HorizonSecret:
    type: OS::Heat::RandomString
    properties:
      length: 10

  ServiceNetMap:
    type: OS::TripleO::ServiceNetMap

  EndpointMap:
    type: OS::TripleO::EndpointMap
    properties:
      CloudEndpoints:
        external: {get_param: CloudName}
        internal_api: {get_param: CloudNameInternal}
        storage: {get_param: CloudNameStorage}
        storage_mgmt: {get_param: CloudNameStorageManagement}
        ctlplane: {get_param: CloudNameCtlplane}
      NetIpMap: {get_attr: [VipMap, net_ip_map]}
      ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}

  EndpointMapData:
    type: OS::Heat::Value
    properties:
      type: json
      value: {get_attr: [EndpointMap, endpoint_map]}

  SshKnownHostsConfig:
    type: OS::TripleO::Ssh::KnownHostsConfig
    properties:
      known_hosts:
        list_join:
          - ''
          {% for role in roles %}
          - {get_attr: [{{role.name}}, known_hosts_entry]}
          {% endfor %}

  # Jinja loop for Role in roles_data.yaml
{% for role in roles %}
  # Resources generated for {{role.name}} Role
  {{role.name}}ServiceChain:
    type: OS::TripleO::Services
    properties:
      Services:
        get_param: {{role.name}}Services
      ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
      EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
      DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
      RoleName: {{role.name}}
      RoleParameters: {get_param: {{role.name}}Parameters}

  # Lookup of role_data via heat outputs is slow, so workaround this by caching
  # the value in an OS::Heat::Value resource
  {{role.name}}ServiceChainRoleData:
    type: OS::Heat::Value
    properties:
      type: json
      value: {get_attr: [{{role.name}}ServiceChain, role_data]}

  # Filter any null/None service_names which may be present due to mapping
  # of services to OS::Heat::None
  {{role.name}}ServiceNames:
    type: OS::Heat::Value
    depends_on: {{role.name}}ServiceChain
    properties:
      type: comma_delimited_list
      value:
        yaql:
          expression: coalesce($.data, []).where($ != null)
          data: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_names]}

  {{role.name}}HostsDeployment:
    type: OS::Heat::StructuredDeployments
    properties:
      name: {{role.name}}HostsDeployment
      config: {get_attr: [hostsConfig, config_id]}
      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}

  {{role.name}}SshKnownHostsDeployment:
    type: OS::Heat::StructuredDeployments
    properties:
      name: {{role.name}}SshKnownHostsDeployment
      config: {get_resource: SshKnownHostsConfig}
      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}

  {{role.name}}AllNodesDeployment:
    type: OS::Heat::StructuredDeployments
    depends_on:
{% for role_inner in roles %}
      - {{role_inner.name}}HostsDeployment
{% endfor %}
    properties:
      name: {{role.name}}AllNodesDeployment
      config: {get_attr: [allNodesConfig, config_id]}
      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
      input_values:
        # Note we have to use yaql to look up the first hostname/ip in the
        # list because heat path based attributes operate on the attribute
        # inside the ResourceGroup, not the exposed list ref discussion in
        # https://bugs.launchpad.net/heat/+bug/1640488
        # The coalesce is needed because $.data is None during heat validation
        bootstrap_nodeid:
          yaql:
            expression: coalesce($.data, []).first(null)
            data: {get_attr: [{{role.name}}, hostname]}
        bootstrap_nodeid_ip:
          yaql:
            expression: coalesce($.data, []).first(null)
            data: {get_attr: [{{role.name}}, ip_address]}

  {{role.name}}AllNodesValidationDeployment:
    type: OS::Heat::StructuredDeployments
    depends_on: {{role.name}}AllNodesDeployment
    properties:
      name: {{role.name}}AllNodesValidationDeployment
      config: {get_resource: AllNodesValidationConfig}
      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}

  {{role.name}}IpListMap:
    type: OS::TripleO::Network::Ports::NetIpListMap
    properties:
      ControlPlaneIpList: {get_attr: [{{role.name}}, ip_address]}
      ExternalIpList: {get_attr: [{{role.name}}, external_ip_address]}
      InternalApiIpList: {get_attr: [{{role.name}}, internal_api_ip_address]}
      StorageIpList: {get_attr: [{{role.name}}, storage_ip_address]}
      StorageMgmtIpList: {get_attr: [{{role.name}}, storage_mgmt_ip_address]}
      TenantIpList: {get_attr: [{{role.name}}, tenant_ip_address]}
      ManagementIpList: {get_attr: [{{role.name}}, management_ip_address]}
      EnabledServices: {get_attr: [{{role.name}}ServiceNames, value]}
      ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
      ServiceHostnameList: {get_attr: [{{role.name}}, hostname]}
      NetworkHostnameMap: {get_attr: [{{role.name}}NetworkHostnameMap, value]}

  {{role.name}}NetworkHostnameMap:
    type: OS::Heat::Value
    properties:
      type: json
      value:
        # Note (shardy) this somewhat complex yaql may be replaced
        # with a map_deep_merge function in ocata.  It merges the
        # list of maps, but appends to colliding lists so we can
        # create a map of lists for all nodes for each network
        yaql:
          expression: dict($.data.where($ != null).flatten().selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
          data:
            - {get_attr: [{{role.name}}, hostname_map]}

  {{role.name}}:
    type: OS::Heat::ResourceGroup
    depends_on: Networks
    update_policy:
      batch_create:
        max_batch_size: {get_param: NodeCreateBatchSize}
    properties:
      count: {get_param: {{role.name}}Count}
      removal_policies: {get_param: {{role.name}}RemovalPolicies}
      resource_def:
        type: OS::TripleO::{{role.name}}
        properties:
          CloudDomain: {get_param: CloudDomain}
          ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
          EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
          Hostname:
            str_replace:
              template: {get_param: {{role.name}}HostnameFormat}
              params:
                '%stackname%': {get_param: 'OS::stack_name'}
          NodeIndex: '%index%'
  {% if role.name != 'Compute' %}
          {{role.name}}SchedulerHints: {get_param: {{role.name}}SchedulerHints}
  {% else %}
          NovaComputeSchedulerHints: {get_param: NovaComputeSchedulerHints}
  {% endif %}
          ServiceConfigSettings:
            map_merge:
              -  get_attr: [{{role.name}}ServiceChainRoleData, value, config_settings]
          {% for r in roles %}
              - get_attr: [{{r.name}}ServiceChain, role_data, global_config_settings]
          {% endfor %}
              # This next step combines two yaql passes:
              # - The inner one does a deep merge on the service_config_settings for all roles
              # - The outer one filters the map based on the services enabled for the role
              #   then merges the result into one map.
              - yaql:
                  expression: let(root => $) -> $.data.map.items().where($[0] in coalesce($root.data.services, [])).select($[1]).reduce($1.mergeWith($2), {})
                  data:
                    map:
                      yaql:
                        expression: $.data.where($ != null).reduce($1.mergeWith($2), {})
                        data:
                        {% for r in roles %}
                          - get_attr: [{{r.name}}ServiceChain, role_data, service_config_settings]
                        {% endfor %}
                    services: {get_attr: [{{role.name}}ServiceNames, value]}
          ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]}
          MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]}
          ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]}
{% endfor %}

  hostsConfig:
    type: OS::TripleO::Hosts::SoftwareConfig
    properties:
      hosts:
        list_join:
        - "\n"
        - - if:
            - add_vips_to_etc_hosts
            - {get_attr: [VipHosts, value]}
            - ''
        -
{% for role in roles %}
          - list_join:
            - ""
            - {get_attr: [{{role.name}}, hosts_entry]}
{% endfor %}

  allNodesConfig:
    type: OS::TripleO::AllNodes::SoftwareConfig
    properties:
      cloud_name_external: {get_param: CloudName}
      cloud_name_internal_api: {get_param: CloudNameInternal}
      cloud_name_storage: {get_param: CloudNameStorage}
      cloud_name_storage_mgmt: {get_param: CloudNameStorageManagement}
      cloud_name_ctlplane: {get_param: CloudNameCtlplane}
      enabled_services:
        list_join:
          - ','
{% for role in roles %}
          - {get_attr: [{{role.name}}ServiceNames, value]}
{% endfor %}
      logging_groups:
        yaql:
          expression: >
            $.data.groups.flatten()
          data:
            groups:
{% for role in roles %}
              - {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_groups]}
{% endfor %}
      logging_sources:
        yaql:
          expression: >
            $.data.sources.flatten()
          data:
            sources:
{% for role in roles %}
              - {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_sources]}
{% endfor %}
      controller_ips: {get_attr: [{{primary_role_name}}, ip_address]}
      controller_names: {get_attr: [{{primary_role_name}}, hostname]}
      service_ips:
        # Note (shardy) this somewhat complex yaql may be replaced
        # with a map_deep_merge function in ocata.  It merges the
        # list of maps, but appends to colliding lists when a service
        # is deployed on more than one role
        yaql:
          expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
          data:
            l:
{% for role in roles %}
              - {get_attr: [{{role.name}}IpListMap, service_ips]}
{% endfor %}
      service_node_names:
        yaql:
          expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
          data:
            l:
{% for role in roles %}
              - {get_attr: [{{role.name}}IpListMap, service_hostnames]}
{% endfor %}
      short_service_node_names:
        yaql:
          expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
          data:
            l:
{% for role in roles %}
              - {get_attr: [{{role.name}}IpListMap, short_service_hostnames]}
{% endfor %}
      short_service_bootstrap_node:
        yaql:
          expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten().first()]))
          data:
            l:
{% for role in roles %}
              - {get_attr: [{{role.name}}IpListMap, short_service_bootstrap_hostnames]}
{% endfor %}
      # FIXME(shardy): These require further work to move into service_ips
      memcache_node_ips: {get_attr: [{{primary_role_name}}IpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
      NetVipMap: {get_attr: [VipMap, net_ip_map]}
      RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
      ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
      DeployIdentifier: {get_param: DeployIdentifier}
      UpdateIdentifier: {get_param: UpdateIdentifier}

  MysqlRootPassword:
    type: OS::Heat::RandomString
    properties:
      length: 10

  RabbitCookie:
    type: OS::Heat::RandomString
    properties:
      length: 20
      salt: {get_param: RabbitCookieSalt}

  DefaultPasswords:
    type: OS::TripleO::DefaultPasswords
    properties:
      DefaultMysqlRootPassword: {get_attr: [MysqlRootPassword, value]}
      DefaultRabbitCookie: {get_attr: [RabbitCookie, value]}
      DefaultHeatAuthEncryptionKey: {get_attr: [HeatAuthEncryptionKey, value]}
      DefaultPcsdPassword: {get_attr: [PcsdPassword, value]}
      DefaultHorizonSecret: {get_attr: [HorizonSecret, value]}

  # creates the network architecture
  Networks:
    type: OS::TripleO::Network

  ControlVirtualIP:
    type: OS::TripleO::Network::Ports::ControlPlaneVipPort
    depends_on: Networks
    properties:
      name: control_virtual_ip
      network: {get_param: NeutronControlPlaneID}
      fixed_ips: {get_param: ControlFixedIPs}
      replacement_policy: AUTO

  RedisVirtualIP:
    depends_on: Networks
    type: OS::TripleO::Network::Ports::RedisVipPort
    properties:
      ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
      ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
      PortName: redis_virtual_ip
      NetworkName: {get_attr: [ServiceNetMap, service_net_map, RedisNetwork]}
      ServiceName: redis
      FixedIPs: {get_param: RedisVirtualFixedIPs}

  # The public VIP is on the External net, falls back to ctlplane
  PublicVirtualIP:
    depends_on: Networks
    type: OS::TripleO::Network::Ports::ExternalVipPort
    properties:
      ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
      ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
      PortName: public_virtual_ip
      FixedIPs: {get_param: PublicVirtualFixedIPs}

  InternalApiVirtualIP:
    depends_on: Networks
    type: OS::TripleO::Network::Ports::InternalApiVipPort
    properties:
      ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
      PortName: internal_api_virtual_ip
      FixedIPs: {get_param: InternalApiVirtualFixedIPs}

  StorageVirtualIP:
    depends_on: Networks
    type: OS::TripleO::Network::Ports::StorageVipPort
    properties:
      ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
      PortName: storage_virtual_ip
      FixedIPs: {get_param: StorageVirtualFixedIPs}

  StorageMgmtVirtualIP:
    depends_on: Networks
    type: OS::TripleO::Network::Ports::StorageMgmtVipPort
    properties:
      ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
      PortName: storage_management_virtual_ip
      FixedIPs: {get_param: StorageMgmtVirtualFixedIPs}

  VipMap:
    type: OS::TripleO::Network::Ports::NetVipMap
    properties:
      ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
      ExternalIp: {get_attr: [PublicVirtualIP, ip_address]}
      ExternalIpUri: {get_attr: [PublicVirtualIP, ip_address_uri]}
      InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]}
      InternalApiIpUri: {get_attr: [InternalApiVirtualIP, ip_address_uri]}
      StorageIp: {get_attr: [StorageVirtualIP, ip_address]}
      StorageIpUri: {get_attr: [StorageVirtualIP, ip_address_uri]}
      StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]}
      StorageMgmtIpUri: {get_attr: [StorageMgmtVirtualIP, ip_address_uri]}
      # No tenant or management VIP required

  # All Nodes Validations
  AllNodesValidationConfig:
    type: OS::TripleO::AllNodes::Validation
    properties:
      PingTestIps:
        list_join:
        - ' '
        - - yaql:
              expression: coalesce($.data, []).first(null)
              data: {get_attr: [{{primary_role_name}}, external_ip_address]}
          - yaql:
              expression: coalesce($.data, []).first(null)
              data: {get_attr: [{{primary_role_name}}, internal_api_ip_address]}
          - yaql:
              expression: coalesce($.data, []).first(null)
              data: {get_attr: [{{primary_role_name}}, storage_ip_address]}
          - yaql:
              expression: coalesce($.data, []).first(null)
              data: {get_attr: [{{primary_role_name}}, storage_mgmt_ip_address]}
          - yaql:
              expression: coalesce($.data, []).first(null)
              data: {get_attr: [{{primary_role_name}}, tenant_ip_address]}
          - yaql:
              expression: coalesce($.data, []).first(null)
              data: {get_attr: [{{primary_role_name}}, management_ip_address]}

  UpdateWorkflow:
    type: OS::TripleO::Tasks::UpdateWorkflow
    depends_on:
{% for role in roles %}
      - {{role.name}}AllNodesDeployment
{% endfor %}
    properties:
      servers:
{% for role in roles %}
        {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{% endfor %}
      input_values:
        deploy_identifier: {get_param: DeployIdentifier}
        update_identifier: {get_param: UpdateIdentifier}

  # Optional ExtraConfig for all nodes - all roles are passed in here, but
  # the nested template may configure each role differently (or not at all)
  AllNodesExtraConfig:
    type: OS::TripleO::AllNodesExtraConfig
    depends_on:
      - UpdateWorkflow
{% for role in roles %}
      - {{role.name}}AllNodesValidationDeployment
{% endfor %}
    properties:
      servers:
{% for role in roles %}
        {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{% endfor %}

  # Post deployment steps for all roles
  AllNodesDeploySteps:
    type: OS::TripleO::PostDeploySteps
    depends_on:
      - AllNodesExtraConfig
{% for role in roles %}
      - {{role.name}}AllNodesDeployment
{% endfor %}
    properties:
      servers:
{% for role in roles %}
        {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{% endfor %}
      EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
      role_data:
{% for role in roles %}
        {{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
{% endfor %}

outputs:
  ManagedEndpoints:
    description: Asserts that the keystone endpoints have been provisioned.
    value: true
  KeystoneURL:
    description: URL for the Overcloud Keystone service
    value: {get_attr: [EndpointMapData, value, KeystonePublic, uri]}
  KeystoneAdminVip:
    description: Keystone Admin VIP endpoint
    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
  EndpointMap:
    description: |
      Mapping of the resources with the needed info for their endpoints.
      This includes the protocol used, the IP, port and also a full
      representation of the URI.
    value: {get_attr: [EndpointMapData, value]}
  HostsEntry:
    description: |
      The content that should be appended to your /etc/hosts if you want to get
      hostname-based access to the deployed nodes (useful for testing without
      setting up a DNS).
    value:
      list_join:
      - "\n"
      - - {get_attr: [hostsConfig, hosts_entries]}
      - - {get_attr: [VipHosts, value]}
  EnabledServices:
    description: The services enabled on each role
    value:
{% for role in roles %}
      {{role.name}}: {get_attr: [{{role.name}}ServiceNames, value]}
{% endfor %}
  RoleData:
    description: The configuration data associated with each role
    value:
{% for role in roles %}
      {{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
{% endfor %}
  RoleNetIpMap:
    description: Mapping of each network to a list of IPs for each role
    value:
{% for role in roles %}
      {{role.name}}: {get_attr: [{{role.name}}IpListMap, net_ip_map]}
{% endfor %}
  RoleNetHostnameMap:
    description: Mapping of each network to a list of hostnames for each role
    value:
{% for role in roles %}
      {{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
{% endfor %}