aboutsummaryrefslogtreecommitdiffstats
path: root/framework/src/onos/core/store/dist/src/main/java/org
diff options
context:
space:
mode:
Diffstat (limited to 'framework/src/onos/core/store/dist/src/main/java/org')
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/app/GossipApplicationStore.java429
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/app/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cfg/GossipComponentConfigStore.java120
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cfg/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinition.java58
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionManager.java179
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionStore.java63
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterManagementMessageSubjects.java26
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterMembershipEvent.java41
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterMembershipEventType.java24
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterNodesDelegate.java54
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/DistributedClusterStore.java280
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/NodeInfo.java118
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/PhiAccrualFailureDetector.java119
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/ClusterCommunicationManager.java261
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/IOLoopMessagingManager.java40
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/NettyMessagingManager.java72
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/config/impl/DistributedNetworkConfigStore.java289
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/config/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/AsyncCachingConsistentMap.java71
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/CommitResponse.java61
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/ConsistentMapBackedJavaMap.java145
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/CopycatCommunicationProtocol.java134
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Database.java106
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseConfig.java157
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseDefinition.java108
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseDefinitionStore.java74
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseManager.java455
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabasePartitioner.java45
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseProxy.java224
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseSerializer.java103
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseState.java114
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAsyncAtomicCounter.java84
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAsyncConsistentMap.java465
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicCounter.java82
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicCounterBuilder.java77
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicValue.java138
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicValueBuilder.java71
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultConsistentMap.java204
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultConsistentMapBuilder.java141
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDatabase.java243
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDatabaseState.java368
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedQueue.java129
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedQueueBuilder.java81
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedSet.java234
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedSetBuilder.java93
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransaction.java70
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionContext.java116
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionContextBuilder.java50
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionalMap.java204
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DistributedLeadershipManager.java605
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Match.java129
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/MeteringAgent.java134
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/PartitionedDatabase.java386
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Partitioner.java33
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Result.java121
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/SimpleKeyHashPartitioner.java38
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/SimpleTableHashPartitioner.java39
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/StateMachineUpdate.java91
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/TransactionManager.java126
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/UpdateResult.java85
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/package-info.java21
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/AppIdEvent.java34
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/AppIdStoreDelegate.java24
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/ConsistentApplicationIdStore.java154
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/ConsistentIdBlockStore.java64
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/LogicalClockManager.java51
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceAntiEntropyAdvertisement.java72
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceAntiEntropyRequest.java61
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceClockManager.java82
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceDescriptions.java134
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceFragmentId.java69
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceInjectedEvent.java49
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceKey.java70
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/ECDeviceStore.java784
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/GossipDeviceStore.java1670
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/GossipDeviceStoreMessageSubjects.java41
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceEvent.java71
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceEventSerializer.java60
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceOfflineEvent.java64
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceOfflineEventSerializer.java53
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceRemovedEvent.java64
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortEvent.java73
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortEventSerializer.java62
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortStatusEvent.java71
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortStatusEventSerializer.java58
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortFragmentId.java76
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortInjectedEvent.java50
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortKey.java79
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/AntiEntropyAdvertisement.java71
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/EventuallyConsistentMapBuilderImpl.java161
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/EventuallyConsistentMapImpl.java678
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/MapDbPersistentStore.java103
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/MapValue.java158
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/PersistentStore.java47
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/UpdateEntry.java80
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/package-info.java21
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfo.java85
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoEvent.java64
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoEventListener.java26
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoService.java48
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/FlowStoreMessageSubjects.java43
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/NewDistributedFlowRuleStore.java789
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/ReplicaInfoManager.java123
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/package-info.java21
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flowobjective/impl/DistributedFlowObjectiveStore.java102
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flowobjective/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/DistributedGroupStore.java1304
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/GroupStoreMessage.java184
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/GroupStoreMessageSubjects.java28
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/package-info.java19
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/host/impl/ECHostStore.java267
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/host/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/LogicalTimestamp.java68
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/MastershipBasedTimestamp.java117
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/Timestamped.java119
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/GossipIntentStore.java334
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/PartitionId.java68
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/PartitionManager.java243
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/ECLinkStore.java390
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/GossipLinkStore.java903
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/GossipLinkStoreMessageSubjects.java35
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/InternalLinkEvent.java61
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/InternalLinkRemovedEvent.java64
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkAntiEntropyAdvertisement.java63
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkFragmentId.java77
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkInjectedEvent.java38
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/Provided.java68
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/ConsistentDeviceMastershipStore.java419
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/RoleValue.java179
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/RoleValueSerializer.java67
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/newresource/impl/ConsistentResourceStore.java349
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/newresource/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/packet/impl/DistributedPacketStore.java207
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/packet/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/proxyarp/impl/DistributedProxyArpStore.java174
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/ConsistentDeviceResourceStore.java225
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/ConsistentLinkResourceStore.java503
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/ClusterMessageSerializer.java53
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/DistributedStoreSerializers.java42
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/MastershipBasedTimestampSerializer.java51
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/MessageSubjectSerializer.java46
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/package-info.java22
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/DistributedStatisticStore.java317
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/StatisticStoreMessageSubjects.java30
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/package-info.java20
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/DistributedTopologyStore.java254
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/PathKey.java55
-rw-r--r--framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/package-info.java20
159 files changed, 23211 insertions, 0 deletions
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/app/GossipApplicationStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/app/GossipApplicationStore.java
new file mode 100644
index 00000000..6764c222
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/app/GossipApplicationStore.java
@@ -0,0 +1,429 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.app;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableSet;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.app.ApplicationDescription;
+import org.onosproject.app.ApplicationEvent;
+import org.onosproject.app.ApplicationException;
+import org.onosproject.app.ApplicationState;
+import org.onosproject.app.ApplicationStore;
+import org.onosproject.app.ApplicationStoreDelegate;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.common.app.ApplicationArchive;
+import org.onosproject.core.Application;
+import org.onosproject.core.ApplicationId;
+import org.onosproject.core.ApplicationIdStore;
+import org.onosproject.core.DefaultApplication;
+import org.onosproject.security.Permission;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapEvent;
+import org.onosproject.store.service.EventuallyConsistentMapListener;
+import org.onosproject.store.service.LogicalClockService;
+import org.onosproject.store.service.MultiValuedTimestamp;
+import org.onosproject.store.service.StorageException;
+import org.onosproject.store.service.StorageService;
+import org.slf4j.Logger;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.function.Function;
+
+import static com.google.common.io.ByteStreams.toByteArray;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.onlab.util.Tools.groupedThreads;
+import static org.onlab.util.Tools.randomDelay;
+import static org.onosproject.app.ApplicationEvent.Type.*;
+import static org.onosproject.store.app.GossipApplicationStore.InternalState.*;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.PUT;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.REMOVE;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Manages inventory of applications in a distributed data store that uses
+ * optimistic replication and gossip based anti-entropy techniques.
+ */
+@Component(immediate = true)
+@Service
+public class GossipApplicationStore extends ApplicationArchive
+ implements ApplicationStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private static final MessageSubject APP_BITS_REQUEST = new MessageSubject("app-bits-request");
+
+ private static final int MAX_LOAD_RETRIES = 5;
+ private static final int RETRY_DELAY_MS = 2_000;
+
+ private static final int FETCH_TIMEOUT_MS = 10_000;
+
+ public enum InternalState {
+ INSTALLED, ACTIVATED, DEACTIVATED
+ }
+
+ private ScheduledExecutorService executor;
+ private ExecutorService messageHandlingExecutor;
+
+ private EventuallyConsistentMap<ApplicationId, Application> apps;
+ private EventuallyConsistentMap<Application, InternalState> states;
+ private EventuallyConsistentMap<Application, Set<Permission>> permissions;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected LogicalClockService clockService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ApplicationIdStore idStore;
+
+ @Activate
+ public void activate() {
+ KryoNamespace.Builder serializer = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .register(MultiValuedTimestamp.class)
+ .register(InternalState.class);
+
+ executor = Executors.newSingleThreadScheduledExecutor(groupedThreads("onos/app", "store"));
+
+ messageHandlingExecutor = Executors.newSingleThreadExecutor(
+ groupedThreads("onos/store/app", "message-handler"));
+
+ clusterCommunicator.<String, byte[]>addSubscriber(APP_BITS_REQUEST,
+ bytes -> new String(bytes, Charsets.UTF_8),
+ name -> {
+ try {
+ return toByteArray(getApplicationInputStream(name));
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ },
+ Function.identity(),
+ messageHandlingExecutor);
+
+ // FIXME: Consider consolidating into a single map.
+
+ apps = storageService.<ApplicationId, Application>eventuallyConsistentMapBuilder()
+ .withName("apps")
+ .withSerializer(serializer)
+ .withTimestampProvider((k, v) -> clockService.getTimestamp())
+ .build();
+
+ states = storageService.<Application, InternalState>eventuallyConsistentMapBuilder()
+ .withName("app-states")
+ .withSerializer(serializer)
+ .withTimestampProvider((k, v) -> clockService.getTimestamp())
+ .build();
+
+ states.addListener(new InternalAppStatesListener());
+
+ permissions = storageService.<Application, Set<Permission>>eventuallyConsistentMapBuilder()
+ .withName("app-permissions")
+ .withSerializer(serializer)
+ .withTimestampProvider((k, v) -> clockService.getTimestamp())
+ .build();
+
+ log.info("Started");
+ }
+
+ /**
+ * Loads the application inventory from the disk and activates apps if
+ * they are marked to be active.
+ */
+ private void loadFromDisk() {
+ for (String name : getApplicationNames()) {
+ for (int i = 0; i < MAX_LOAD_RETRIES; i++) {
+ try {
+ Application app = create(getApplicationDescription(name), false);
+ if (app != null && isActive(app.id().name())) {
+ activate(app.id(), false);
+ // load app permissions
+ }
+ } catch (Exception e) {
+ log.warn("Unable to load application {} from disk; retrying", name);
+ randomDelay(RETRY_DELAY_MS); // FIXME: This is a deliberate hack; fix in Drake
+ }
+ }
+ }
+ }
+
+ @Deactivate
+ public void deactivate() {
+ clusterCommunicator.removeSubscriber(APP_BITS_REQUEST);
+ messageHandlingExecutor.shutdown();
+ executor.shutdown();
+ apps.destroy();
+ states.destroy();
+ permissions.destroy();
+ log.info("Stopped");
+ }
+
+ @Override
+ public void setDelegate(ApplicationStoreDelegate delegate) {
+ super.setDelegate(delegate);
+ loadFromDisk();
+// executor.schedule(this::pruneUninstalledApps, LOAD_TIMEOUT_MS, MILLISECONDS);
+ }
+
+ @Override
+ public Set<Application> getApplications() {
+ return ImmutableSet.copyOf(apps.values());
+ }
+
+ @Override
+ public ApplicationId getId(String name) {
+ return idStore.getAppId(name);
+ }
+
+ @Override
+ public Application getApplication(ApplicationId appId) {
+ return apps.get(appId);
+ }
+
+ @Override
+ public ApplicationState getState(ApplicationId appId) {
+ Application app = apps.get(appId);
+ InternalState s = app == null ? null : states.get(app);
+ return s == null ? null : s == ACTIVATED ?
+ ApplicationState.ACTIVE : ApplicationState.INSTALLED;
+ }
+
+ @Override
+ public Application create(InputStream appDescStream) {
+ ApplicationDescription appDesc = saveApplication(appDescStream);
+ return create(appDesc, true);
+ }
+
+ private Application create(ApplicationDescription appDesc, boolean updateTime) {
+ Application app = registerApp(appDesc);
+ if (updateTime) {
+ updateTime(app.id().name());
+ }
+ apps.put(app.id(), app);
+ states.put(app, INSTALLED);
+ return app;
+ }
+
+ @Override
+ public void remove(ApplicationId appId) {
+ Application app = apps.get(appId);
+ if (app != null) {
+ apps.remove(appId);
+ states.remove(app);
+ permissions.remove(app);
+ }
+ }
+
+ @Override
+ public void activate(ApplicationId appId) {
+ activate(appId, true);
+ }
+
+ private void activate(ApplicationId appId, boolean updateTime) {
+ Application app = apps.get(appId);
+ if (app != null) {
+ if (updateTime) {
+ updateTime(appId.name());
+ }
+ states.put(app, ACTIVATED);
+ }
+ }
+
+ @Override
+ public void deactivate(ApplicationId appId) {
+ Application app = apps.get(appId);
+ if (app != null) {
+ updateTime(appId.name());
+ states.put(app, DEACTIVATED);
+ }
+ }
+
+ @Override
+ public Set<Permission> getPermissions(ApplicationId appId) {
+ Application app = apps.get(appId);
+ return app != null ? permissions.get(app) : null;
+ }
+
+ @Override
+ public void setPermissions(ApplicationId appId, Set<Permission> permissions) {
+ Application app = getApplication(appId);
+ if (app != null) {
+ this.permissions.put(app, permissions);
+ delegate.notify(new ApplicationEvent(APP_PERMISSIONS_CHANGED, app));
+ }
+ }
+
+ /**
+ * Listener to application state distributed map changes.
+ */
+ private final class InternalAppStatesListener
+ implements EventuallyConsistentMapListener<Application, InternalState> {
+ @Override
+ public void event(EventuallyConsistentMapEvent<Application, InternalState> event) {
+ // If we do not have a delegate, refuse to process any events entirely.
+ // This is to allow the anti-entropy to kick in and process the events
+ // perhaps a bit later, but with opportunity to notify delegate.
+ if (delegate == null) {
+ return;
+ }
+
+ Application app = event.key();
+ InternalState state = event.value();
+
+ if (event.type() == PUT) {
+ if (state == INSTALLED) {
+ fetchBitsIfNeeded(app);
+ delegate.notify(new ApplicationEvent(APP_INSTALLED, app));
+
+ } else if (state == ACTIVATED) {
+ installAppIfNeeded(app);
+ setActive(app.id().name());
+ delegate.notify(new ApplicationEvent(APP_ACTIVATED, app));
+
+ } else if (state == DEACTIVATED) {
+ clearActive(app.id().name());
+ delegate.notify(new ApplicationEvent(APP_DEACTIVATED, app));
+ }
+ } else if (event.type() == REMOVE) {
+ delegate.notify(new ApplicationEvent(APP_UNINSTALLED, app));
+ purgeApplication(app.id().name());
+ }
+ }
+ }
+
+ /**
+ * Determines if the application bits are available locally.
+ */
+ private boolean appBitsAvailable(Application app) {
+ try {
+ ApplicationDescription appDesc = getApplicationDescription(app.id().name());
+ return appDesc.version().equals(app.version());
+ } catch (ApplicationException e) {
+ return false;
+ }
+ }
+
+ /**
+ * Fetches the bits from the cluster peers if necessary.
+ */
+ private void fetchBitsIfNeeded(Application app) {
+ if (!appBitsAvailable(app)) {
+ fetchBits(app);
+ }
+ }
+
+ /**
+ * Installs the application if necessary from the application peers.
+ */
+ private void installAppIfNeeded(Application app) {
+ if (!appBitsAvailable(app)) {
+ fetchBits(app);
+ delegate.notify(new ApplicationEvent(APP_INSTALLED, app));
+ }
+ }
+
+ /**
+ * Fetches the bits from the cluster peers.
+ */
+ private void fetchBits(Application app) {
+ ControllerNode localNode = clusterService.getLocalNode();
+ CountDownLatch latch = new CountDownLatch(1);
+
+ // FIXME: send message with name & version to make sure we don't get served old bits
+
+ log.info("Downloading bits for application {}", app.id().name());
+ for (ControllerNode node : clusterService.getNodes()) {
+ if (latch.getCount() == 0) {
+ break;
+ }
+ if (node.equals(localNode)) {
+ continue;
+ }
+ clusterCommunicator.sendAndReceive(app.id().name(),
+ APP_BITS_REQUEST,
+ s -> s.getBytes(Charsets.UTF_8),
+ Function.identity(),
+ node.id())
+ .whenCompleteAsync((bits, error) -> {
+ if (error == null && latch.getCount() > 0) {
+ saveApplication(new ByteArrayInputStream(bits));
+ log.info("Downloaded bits for application {} from node {}",
+ app.id().name(), node.id());
+ latch.countDown();
+ } else if (error != null) {
+ log.warn("Unable to fetch bits for application {} from node {}",
+ app.id().name(), node.id());
+ }
+ }, executor);
+ }
+
+ try {
+ if (!latch.await(FETCH_TIMEOUT_MS, MILLISECONDS)) {
+ log.warn("Unable to fetch bits for application {}", app.id().name());
+ }
+ } catch (InterruptedException e) {
+ log.warn("Interrupted while fetching bits for application {}", app.id().name());
+ }
+ }
+
+ /**
+ * Prunes applications which are not in the map, but are on disk.
+ */
+ private void pruneUninstalledApps() {
+ for (String name : getApplicationNames()) {
+ if (getApplication(getId(name)) == null) {
+ Application app = registerApp(getApplicationDescription(name));
+ delegate.notify(new ApplicationEvent(APP_UNINSTALLED, app));
+ purgeApplication(app.id().name());
+ }
+ }
+ }
+
+ /**
+ * Produces a registered application from the supplied description.
+ */
+ private Application registerApp(ApplicationDescription appDesc) {
+ ApplicationId appId = idStore.registerApplication(appDesc.name());
+ return new DefaultApplication(appId, appDesc.version(), appDesc.description(),
+ appDesc.origin(), appDesc.role(), appDesc.permissions(),
+ appDesc.featuresRepo(), appDesc.features());
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/app/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/app/package-info.java
new file mode 100644
index 00000000..b2a909ee
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/app/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of distributed applications store.
+ */
+package org.onosproject.store.app;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cfg/GossipComponentConfigStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cfg/GossipComponentConfigStore.java
new file mode 100644
index 00000000..bf992643
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cfg/GossipComponentConfigStore.java
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cfg;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.cfg.ComponentConfigEvent;
+import org.onosproject.cfg.ComponentConfigStore;
+import org.onosproject.cfg.ComponentConfigStoreDelegate;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapEvent;
+import org.onosproject.store.service.EventuallyConsistentMapListener;
+import org.onosproject.store.service.LogicalClockService;
+import org.onosproject.store.service.StorageService;
+import org.slf4j.Logger;
+
+import static org.onosproject.cfg.ComponentConfigEvent.Type.PROPERTY_SET;
+import static org.onosproject.cfg.ComponentConfigEvent.Type.PROPERTY_UNSET;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.PUT;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.REMOVE;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Manages inventory of component configurations in a distributed data store
+ * that uses optimistic replication and gossip based anti-entropy techniques.
+ */
+@Component(immediate = true)
+@Service
+public class GossipComponentConfigStore
+ extends AbstractStore<ComponentConfigEvent, ComponentConfigStoreDelegate>
+ implements ComponentConfigStore {
+
+ private static final String SEP = "#";
+
+ private final Logger log = getLogger(getClass());
+
+ private EventuallyConsistentMap<String, String> properties;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected LogicalClockService clockService;
+
+ @Activate
+ public void activate() {
+ KryoNamespace.Builder serializer = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API);
+
+ properties = storageService.<String, String>eventuallyConsistentMapBuilder()
+ .withName("cfg")
+ .withSerializer(serializer)
+ .withTimestampProvider((k, v) -> clockService.getTimestamp())
+ .build();
+
+ properties.addListener(new InternalPropertiesListener());
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ properties.destroy();
+ log.info("Stopped");
+ }
+
+ @Override
+ public void setProperty(String componentName, String name, String value) {
+ properties.put(key(componentName, name), value);
+
+ }
+
+ @Override
+ public void unsetProperty(String componentName, String name) {
+ properties.remove(key(componentName, name));
+ }
+
+ /**
+ * Listener to component configuration properties distributed map changes.
+ */
+ private final class InternalPropertiesListener
+ implements EventuallyConsistentMapListener<String, String> {
+
+ @Override
+ public void event(EventuallyConsistentMapEvent<String, String> event) {
+ String[] keys = event.key().split(SEP);
+ String value = event.value();
+ if (event.type() == PUT) {
+ delegate.notify(new ComponentConfigEvent(PROPERTY_SET, keys[0], keys[1], value));
+ } else if (event.type() == REMOVE) {
+ delegate.notify(new ComponentConfigEvent(PROPERTY_UNSET, keys[0], keys[1], null));
+ }
+ }
+ }
+
+ // Generates a key from component name and property name.
+ private String key(String componentName, String name) {
+ return componentName + SEP + name;
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cfg/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cfg/package-info.java
new file mode 100644
index 00000000..f8f8509a
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cfg/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of distributed component configuration store.
+ */
+package org.onosproject.store.cfg;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinition.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinition.java
new file mode 100644
index 00000000..75f05a31
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinition.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cluster.impl;
+
+import java.util.Set;
+
+import com.google.common.collect.ImmutableSet;
+
+/**
+ * Cluster definition.
+ */
+public class ClusterDefinition {
+
+ private Set<NodeInfo> nodes;
+ private String ipPrefix;
+
+ /**
+ * Creates a new cluster definition.
+ * @param nodes cluster nodes information
+ * @param ipPrefix ip prefix common to all cluster nodes
+ * @return cluster definition
+ */
+ public static ClusterDefinition from(Set<NodeInfo> nodes, String ipPrefix) {
+ ClusterDefinition definition = new ClusterDefinition();
+ definition.ipPrefix = ipPrefix;
+ definition.nodes = ImmutableSet.copyOf(nodes);
+ return definition;
+ }
+
+ /**
+ * Returns set of cluster nodes info.
+ * @return cluster nodes info
+ */
+ public Set<NodeInfo> getNodes() {
+ return ImmutableSet.copyOf(nodes);
+ }
+
+ /**
+ * Returns ipPrefix in dotted decimal notion.
+ * @return ip prefix
+ */
+ public String getIpPrefix() {
+ return ipPrefix;
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionManager.java
new file mode 100644
index 00000000..4e28e3c2
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionManager.java
@@ -0,0 +1,179 @@
+package org.onosproject.store.cluster.impl;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.packet.IpAddress;
+import org.onosproject.cluster.ClusterDefinitionService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.cluster.DefaultControllerNode;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.store.consistent.impl.DatabaseDefinition;
+import org.onosproject.store.consistent.impl.DatabaseDefinitionStore;
+import org.slf4j.Logger;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.util.Enumeration;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static java.net.NetworkInterface.getNetworkInterfaces;
+import static java.util.Collections.list;
+import static org.onosproject.cluster.DefaultControllerNode.DEFAULT_PORT;
+import static org.onosproject.store.consistent.impl.DatabaseManager.PARTITION_DEFINITION_FILE;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Implementation of ClusterDefinitionService.
+ */
+@Component(immediate = true)
+@Service
+public class ClusterDefinitionManager implements ClusterDefinitionService {
+
+ public static final String CLUSTER_DEFINITION_FILE = "../config/cluster.json";
+ private static final String ONOS_NIC = "ONOS_NIC";
+ private static final Logger log = getLogger(ClusterDefinitionManager.class);
+ private ControllerNode localNode;
+ private Set<ControllerNode> seedNodes;
+
+ @Activate
+ public void activate() {
+ File clusterDefinitionFile = new File(CLUSTER_DEFINITION_FILE);
+ ClusterDefinitionStore clusterDefinitionStore =
+ new ClusterDefinitionStore(clusterDefinitionFile.getPath());
+
+ if (!clusterDefinitionFile.exists()) {
+ createDefaultClusterDefinition(clusterDefinitionStore);
+ }
+
+ try {
+ ClusterDefinition clusterDefinition = clusterDefinitionStore.read();
+ establishSelfIdentity(clusterDefinition);
+ seedNodes = ImmutableSet
+ .copyOf(clusterDefinition.getNodes())
+ .stream()
+ .filter(n -> !localNode.id().equals(new NodeId(n.getId())))
+ .map(n -> new DefaultControllerNode(new NodeId(n.getId()),
+ IpAddress.valueOf(n.getIp()),
+ n.getTcpPort()))
+ .collect(Collectors.toSet());
+ } catch (IOException e) {
+ throw new IllegalStateException("Failed to read cluster definition.", e);
+ }
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ @Override
+ public ControllerNode localNode() {
+ return localNode;
+ }
+
+ @Override
+ public Set<ControllerNode> seedNodes() {
+ return seedNodes;
+ }
+
+ @Override
+ public void formCluster(Set<ControllerNode> nodes, String ipPrefix) {
+ try {
+ Set<NodeInfo> infos = Sets.newHashSet();
+ nodes.forEach(n -> infos.add(NodeInfo.from(n.id().toString(),
+ n.ip().toString(),
+ n.tcpPort())));
+
+ ClusterDefinition cdef = ClusterDefinition.from(infos, ipPrefix);
+ new ClusterDefinitionStore(CLUSTER_DEFINITION_FILE).write(cdef);
+
+ DatabaseDefinition ddef = DatabaseDefinition.from(infos);
+ new DatabaseDefinitionStore(PARTITION_DEFINITION_FILE).write(ddef);
+ } catch (IOException e) {
+ log.error("Unable to form cluster", e);
+ }
+ }
+
+ private IpAddress findLocalIp(ClusterDefinition clusterDefinition) throws SocketException {
+ Enumeration<NetworkInterface> interfaces =
+ NetworkInterface.getNetworkInterfaces();
+ while (interfaces.hasMoreElements()) {
+ NetworkInterface iface = interfaces.nextElement();
+ Enumeration<InetAddress> inetAddresses = iface.getInetAddresses();
+ while (inetAddresses.hasMoreElements()) {
+ IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement());
+ if (clusterDefinition.getNodes().stream()
+ .map(NodeInfo::getIp)
+ .map(IpAddress::valueOf)
+ .anyMatch(nodeIp -> ip.equals(nodeIp))) {
+ return ip;
+ }
+ }
+ }
+ throw new IllegalStateException("Unable to determine local ip");
+ }
+
+ private void establishSelfIdentity(ClusterDefinition clusterDefinition) {
+ try {
+ IpAddress ip = findLocalIp(clusterDefinition);
+ localNode = new DefaultControllerNode(new NodeId(ip.toString()), ip);
+ } catch (SocketException e) {
+ throw new IllegalStateException("Cannot determine local IP", e);
+ }
+ }
+
+ private void createDefaultClusterDefinition(ClusterDefinitionStore store) {
+ // Assumes IPv4 is returned.
+ String ip = getSiteLocalAddress();
+ String ipPrefix = ip.replaceFirst("\\.[0-9]*$", ".*");
+ NodeInfo node = NodeInfo.from(ip, ip, DEFAULT_PORT);
+ try {
+ store.write(ClusterDefinition.from(ImmutableSet.of(node), ipPrefix));
+ } catch (IOException e) {
+ log.warn("Unable to write default cluster definition", e);
+ }
+ }
+
+ /**
+ * Returns the address that matches the IP prefix given in ONOS_NIC
+ * environment variable if one was specified, or the first site local
+ * address if one can be found or the loopback address otherwise.
+ *
+ * @return site-local address in string form
+ */
+ public static String getSiteLocalAddress() {
+ try {
+ String ipPrefix = System.getenv(ONOS_NIC);
+ for (NetworkInterface nif : list(getNetworkInterfaces())) {
+ for (InetAddress address : list(nif.getInetAddresses())) {
+ IpAddress ip = IpAddress.valueOf(address);
+ if (ipPrefix == null && address.isSiteLocalAddress() ||
+ ipPrefix != null && matchInterface(ip.toString(), ipPrefix)) {
+ return ip.toString();
+ }
+ }
+ }
+ } catch (SocketException e) {
+ log.error("Unable to get network interfaces", e);
+ }
+
+ return IpAddress.valueOf(InetAddress.getLoopbackAddress()).toString();
+ }
+
+ // Indicates whether the specified interface address matches the given prefix.
+ // FIXME: Add a facility to IpPrefix to make this more robust
+ private static boolean matchInterface(String ip, String ipPrefix) {
+ String s = ipPrefix.replaceAll("\\.\\*", "");
+ return ip.startsWith(s);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionStore.java
new file mode 100644
index 00000000..2a2f4dc4
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionStore.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cluster.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.io.Files;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ * Allows for reading and writing cluster definition as a JSON file.
+ */
+public class ClusterDefinitionStore {
+
+ private final File file;
+
+ /**
+ * Creates a reader/writer of the cluster definition file.
+ * @param filePath location of the definition file
+ */
+ public ClusterDefinitionStore(String filePath) {
+ file = new File(filePath);
+ }
+
+ /**
+ * Returns the cluster definition.
+ * @return cluster definition
+ * @throws IOException when I/O exception of some sort has occurred
+ */
+ public ClusterDefinition read() throws IOException {
+ ObjectMapper mapper = new ObjectMapper();
+ return mapper.readValue(file, ClusterDefinition.class);
+ }
+
+ /**
+ * Writes the specified cluster definition to file.
+ * @param definition cluster definition
+ * @throws IOException when I/O exception of some sort has occurred
+ */
+ public void write(ClusterDefinition definition) throws IOException {
+ checkNotNull(definition);
+ // write back to file
+ Files.createParentDirs(file);
+ ObjectMapper mapper = new ObjectMapper();
+ mapper.writeValue(file, definition);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterManagementMessageSubjects.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterManagementMessageSubjects.java
new file mode 100644
index 00000000..918f7921
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterManagementMessageSubjects.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cluster.impl;
+
+import org.onosproject.store.cluster.messaging.MessageSubject;
+
+//Not used right now
+public final class ClusterManagementMessageSubjects {
+ // avoid instantiation
+ private ClusterManagementMessageSubjects() {}
+
+ public static final MessageSubject CLUSTER_MEMBERSHIP_EVENT = new MessageSubject("CLUSTER_MEMBERSHIP_EVENT");
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterMembershipEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterMembershipEvent.java
new file mode 100644
index 00000000..c6428739
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterMembershipEvent.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cluster.impl;
+
+import org.onosproject.cluster.ControllerNode;
+
+//Not used right now
+/**
+ * Contains information that will be published when a cluster membership event occurs.
+ */
+public class ClusterMembershipEvent {
+
+ private final ClusterMembershipEventType type;
+ private final ControllerNode node;
+
+ public ClusterMembershipEvent(ClusterMembershipEventType type, ControllerNode node) {
+ this.type = type;
+ this.node = node;
+ }
+
+ public ClusterMembershipEventType type() {
+ return type;
+ }
+
+ public ControllerNode node() {
+ return node;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterMembershipEventType.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterMembershipEventType.java
new file mode 100644
index 00000000..a7f09c71
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterMembershipEventType.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cluster.impl;
+
+//Not used right now
+public enum ClusterMembershipEventType {
+ NEW_MEMBER,
+ LEAVING_MEMBER,
+ UNREACHABLE_MEMBER,
+ HEART_BEAT,
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterNodesDelegate.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterNodesDelegate.java
new file mode 100644
index 00000000..7aeca72f
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterNodesDelegate.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cluster.impl;
+
+import org.onosproject.cluster.DefaultControllerNode;
+import org.onosproject.cluster.NodeId;
+import org.onlab.packet.IpAddress;
+
+// Not used right now
+/**
+ * Simple back interface through which connection manager can interact with
+ * the cluster store.
+ */
+public interface ClusterNodesDelegate {
+
+ /**
+ * Notifies about cluster node coming online.
+ *
+ * @param nodeId newly detected cluster node id
+ * @param ip node IP listen address
+ * @param tcpPort node TCP listen port
+ * @return the controller node
+ */
+ DefaultControllerNode nodeDetected(NodeId nodeId, IpAddress ip,
+ int tcpPort);
+
+ /**
+ * Notifies about cluster node going offline.
+ *
+ * @param nodeId identifier of the cluster node that vanished
+ */
+ void nodeVanished(NodeId nodeId);
+
+ /**
+ * Notifies about remote request to remove node from cluster.
+ *
+ * @param nodeId identifier of the cluster node that was removed
+ */
+ void nodeRemoved(NodeId nodeId);
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/DistributedClusterStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/DistributedClusterStore.java
new file mode 100644
index 00000000..859efebf
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/DistributedClusterStore.java
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cluster.impl;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.joda.time.DateTime;
+import org.onlab.packet.IpAddress;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.cluster.ClusterDefinitionService;
+import org.onosproject.cluster.ClusterEvent;
+import org.onosproject.cluster.ClusterStore;
+import org.onosproject.cluster.ClusterStoreDelegate;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.cluster.ControllerNode.State;
+import org.onosproject.cluster.DefaultControllerNode;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.cluster.messaging.Endpoint;
+import org.onosproject.store.cluster.messaging.MessagingService;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.slf4j.Logger;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.onlab.util.Tools.groupedThreads;
+import static org.slf4j.LoggerFactory.getLogger;
+
+@Component(immediate = true)
+@Service
+/**
+ * Distributed cluster nodes store that employs an accrual failure
+ * detector to identify cluster member up/down status.
+ */
+public class DistributedClusterStore
+ extends AbstractStore<ClusterEvent, ClusterStoreDelegate>
+ implements ClusterStore {
+
+ private static final Logger log = getLogger(DistributedClusterStore.class);
+
+ public static final String HEARTBEAT_MESSAGE = "onos-cluster-heartbeat";
+
+ // TODO: make these configurable.
+ private static final int HEARTBEAT_INTERVAL_MS = 100;
+ private static final int PHI_FAILURE_THRESHOLD = 10;
+
+ private static final KryoSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .register(HeartbeatMessage.class)
+ .build()
+ .populate(1);
+ }
+ };
+
+ private static final String INSTANCE_ID_NULL = "Instance ID cannot be null";
+
+ private final Map<NodeId, ControllerNode> allNodes = Maps.newConcurrentMap();
+ private final Map<NodeId, State> nodeStates = Maps.newConcurrentMap();
+ private final Map<NodeId, DateTime> nodeStateLastUpdatedTimes = Maps.newConcurrentMap();
+ private ScheduledExecutorService heartBeatSender = Executors.newSingleThreadScheduledExecutor(
+ groupedThreads("onos/cluster/membership", "heartbeat-sender"));
+ private ExecutorService heartBeatMessageHandler = Executors.newSingleThreadExecutor(
+ groupedThreads("onos/cluster/membership", "heartbeat-receiver"));
+
+ private PhiAccrualFailureDetector failureDetector;
+
+ private ControllerNode localNode;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterDefinitionService clusterDefinitionService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MessagingService messagingService;
+
+ @Activate
+ public void activate() {
+ localNode = clusterDefinitionService.localNode();
+
+ messagingService.registerHandler(HEARTBEAT_MESSAGE,
+ new HeartbeatMessageHandler(), heartBeatMessageHandler);
+
+ failureDetector = new PhiAccrualFailureDetector();
+
+ heartBeatSender.scheduleWithFixedDelay(this::heartbeat, 0,
+ HEARTBEAT_INTERVAL_MS, TimeUnit.MILLISECONDS);
+
+ addNode(localNode);
+ updateState(localNode.id(), State.ACTIVE);
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ messagingService.unregisterHandler(HEARTBEAT_MESSAGE);
+ heartBeatSender.shutdownNow();
+ heartBeatMessageHandler.shutdownNow();
+
+ log.info("Stopped");
+ }
+
+ @Override
+ public void setDelegate(ClusterStoreDelegate delegate) {
+ checkNotNull(delegate, "Delegate cannot be null");
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void unsetDelegate(ClusterStoreDelegate delegate) {
+ this.delegate = null;
+ }
+
+ @Override
+ public boolean hasDelegate() {
+ return this.delegate != null;
+ }
+
+ @Override
+ public ControllerNode getLocalNode() {
+ return localNode;
+ }
+
+ @Override
+ public Set<ControllerNode> getNodes() {
+ return ImmutableSet.copyOf(allNodes.values());
+ }
+
+ @Override
+ public ControllerNode getNode(NodeId nodeId) {
+ checkNotNull(nodeId, INSTANCE_ID_NULL);
+ return allNodes.get(nodeId);
+ }
+
+ @Override
+ public State getState(NodeId nodeId) {
+ checkNotNull(nodeId, INSTANCE_ID_NULL);
+ return nodeStates.get(nodeId);
+ }
+
+ @Override
+ public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) {
+ ControllerNode node = new DefaultControllerNode(nodeId, ip, tcpPort);
+ addNode(node);
+ return node;
+ }
+
+ @Override
+ public void removeNode(NodeId nodeId) {
+ checkNotNull(nodeId, INSTANCE_ID_NULL);
+ ControllerNode node = allNodes.remove(nodeId);
+ if (node != null) {
+ nodeStates.remove(nodeId);
+ notifyDelegate(new ClusterEvent(ClusterEvent.Type.INSTANCE_REMOVED, node));
+ }
+ }
+
+ private void addNode(ControllerNode node) {
+ allNodes.put(node.id(), node);
+ updateState(node.id(), State.INACTIVE);
+ notifyDelegate(new ClusterEvent(ClusterEvent.Type.INSTANCE_ADDED, node));
+ }
+
+ private void updateState(NodeId nodeId, State newState) {
+ nodeStates.put(nodeId, newState);
+ nodeStateLastUpdatedTimes.put(nodeId, DateTime.now());
+ }
+
+ private void heartbeat() {
+ try {
+ Set<ControllerNode> peers = allNodes.values()
+ .stream()
+ .filter(node -> !(node.id().equals(localNode.id())))
+ .collect(Collectors.toSet());
+ byte[] hbMessagePayload = SERIALIZER.encode(new HeartbeatMessage(localNode, peers));
+ peers.forEach((node) -> {
+ heartbeatToPeer(hbMessagePayload, node);
+ State currentState = nodeStates.get(node.id());
+ double phi = failureDetector.phi(node.id());
+ if (phi >= PHI_FAILURE_THRESHOLD) {
+ if (currentState == State.ACTIVE) {
+ updateState(node.id(), State.INACTIVE);
+ notifyStateChange(node.id(), State.ACTIVE, State.INACTIVE);
+ }
+ } else {
+ if (currentState == State.INACTIVE) {
+ updateState(node.id(), State.ACTIVE);
+ notifyStateChange(node.id(), State.INACTIVE, State.ACTIVE);
+ }
+ }
+ });
+ } catch (Exception e) {
+ log.debug("Failed to send heartbeat", e);
+ }
+ }
+
+ private void notifyStateChange(NodeId nodeId, State oldState, State newState) {
+ ControllerNode node = allNodes.get(nodeId);
+ if (newState == State.ACTIVE) {
+ notifyDelegate(new ClusterEvent(ClusterEvent.Type.INSTANCE_ACTIVATED, node));
+ } else {
+ notifyDelegate(new ClusterEvent(ClusterEvent.Type.INSTANCE_DEACTIVATED, node));
+ }
+ }
+
+ private void heartbeatToPeer(byte[] messagePayload, ControllerNode peer) {
+ Endpoint remoteEp = new Endpoint(peer.ip(), peer.tcpPort());
+ messagingService.sendAsync(remoteEp, HEARTBEAT_MESSAGE, messagePayload).whenComplete((result, error) -> {
+ if (error != null) {
+ log.trace("Sending heartbeat to {} failed", remoteEp, error);
+ }
+ });
+ }
+
+ private class HeartbeatMessageHandler implements Consumer<byte[]> {
+ @Override
+ public void accept(byte[] message) {
+ HeartbeatMessage hb = SERIALIZER.decode(message);
+ failureDetector.report(hb.source().id());
+ hb.knownPeers().forEach(node -> {
+ allNodes.put(node.id(), node);
+ });
+ }
+ }
+
+ private static class HeartbeatMessage {
+ private ControllerNode source;
+ private Set<ControllerNode> knownPeers;
+
+ public HeartbeatMessage(ControllerNode source, Set<ControllerNode> members) {
+ this.source = source;
+ this.knownPeers = ImmutableSet.copyOf(members);
+ }
+
+ public ControllerNode source() {
+ return source;
+ }
+
+ public Set<ControllerNode> knownPeers() {
+ return knownPeers;
+ }
+ }
+
+ @Override
+ public DateTime getLastUpdated(NodeId nodeId) {
+ return nodeStateLastUpdatedTimes.get(nodeId);
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/NodeInfo.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/NodeInfo.java
new file mode 100644
index 00000000..d436ca76
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/NodeInfo.java
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cluster.impl;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+
+import java.util.Objects;
+
+import org.onosproject.cluster.ControllerNode;
+
+/**
+ * Node info read from configuration files during bootstrap.
+ */
+public final class NodeInfo {
+ private final String id;
+ private final String ip;
+ private final int tcpPort;
+
+ private NodeInfo(String id, String ip, int port) {
+ this.id = id;
+ this.ip = ip;
+ this.tcpPort = port;
+ }
+
+ /*
+ * Needed for serialization.
+ */
+ private NodeInfo() {
+ id = null;
+ ip = null;
+ tcpPort = 0;
+ }
+
+ /**
+ * Creates a new instance.
+ * @param id node id
+ * @param ip node ip address
+ * @param port tcp port
+ * @return NodeInfo
+ */
+ public static NodeInfo from(String id, String ip, int port) {
+ NodeInfo node = new NodeInfo(id, ip, port);
+ return node;
+ }
+
+ /**
+ * Returns the NodeInfo for a controller node.
+ * @param node controller node
+ * @return NodeInfo
+ */
+ public static NodeInfo of(ControllerNode node) {
+ return NodeInfo.from(node.id().toString(), node.ip().toString(), node.tcpPort());
+ }
+
+ /**
+ * Returns node id.
+ * @return node id
+ */
+ public String getId() {
+ return id;
+ }
+
+ /**
+ * Returns node ip.
+ * @return node ip
+ */
+ public String getIp() {
+ return ip;
+ }
+
+ /**
+ * Returns node port.
+ * @return port
+ */
+ public int getTcpPort() {
+ return tcpPort;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(id, ip, tcpPort);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o instanceof NodeInfo) {
+ NodeInfo that = (NodeInfo) o;
+ return Objects.equals(this.id, that.id) &&
+ Objects.equals(this.ip, that.ip) &&
+ Objects.equals(this.tcpPort, that.tcpPort);
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return toStringHelper(this)
+ .add("id", id)
+ .add("ip", ip)
+ .add("tcpPort", tcpPort).toString();
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/PhiAccrualFailureDetector.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/PhiAccrualFailureDetector.java
new file mode 100644
index 00000000..cdb138b4
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/PhiAccrualFailureDetector.java
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cluster.impl;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Map;
+
+import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
+import org.onosproject.cluster.NodeId;
+
+import com.google.common.collect.Maps;
+
+/**
+ * Phi Accrual failure detector.
+ * <p>
+ * Based on a paper titled: "The φ Accrual Failure Detector" by Hayashibara, et al.
+ */
+public class PhiAccrualFailureDetector {
+ private final Map<NodeId, History> states = Maps.newConcurrentMap();
+
+ // TODO: make these configurable.
+ private static final int WINDOW_SIZE = 250;
+ private static final int MIN_SAMPLES = 25;
+ private static final double PHI_FACTOR = 1.0 / Math.log(10.0);
+
+ // If a node does not have any heartbeats, this is the phi
+ // value to report. Indicates the node is inactive (from the
+ // detectors perspective.
+ private static final double BOOTSTRAP_PHI_VALUE = 100.0;
+
+ /**
+ * Report a new heart beat for the specified node id.
+ * @param nodeId node id
+ */
+ public void report(NodeId nodeId) {
+ report(nodeId, System.currentTimeMillis());
+ }
+
+ /**
+ * Report a new heart beat for the specified node id.
+ * @param nodeId node id
+ * @param arrivalTime arrival time
+ */
+ public void report(NodeId nodeId, long arrivalTime) {
+ checkNotNull(nodeId, "NodeId must not be null");
+ checkArgument(arrivalTime >= 0, "arrivalTime must not be negative");
+ History nodeState =
+ states.computeIfAbsent(nodeId, key -> new History());
+ synchronized (nodeState) {
+ long latestHeartbeat = nodeState.latestHeartbeatTime();
+ if (latestHeartbeat != -1) {
+ nodeState.samples().addValue(arrivalTime - latestHeartbeat);
+ }
+ nodeState.setLatestHeartbeatTime(arrivalTime);
+ }
+ }
+
+ /**
+ * Compute phi for the specified node id.
+ * @param nodeId node id
+ * @return phi value
+ */
+ public double phi(NodeId nodeId) {
+ checkNotNull(nodeId, "NodeId must not be null");
+ if (!states.containsKey(nodeId)) {
+ return BOOTSTRAP_PHI_VALUE;
+ }
+ History nodeState = states.get(nodeId);
+ synchronized (nodeState) {
+ long latestHeartbeat = nodeState.latestHeartbeatTime();
+ DescriptiveStatistics samples = nodeState.samples();
+ if (latestHeartbeat == -1 || samples.getN() < MIN_SAMPLES) {
+ return 0.0;
+ }
+ return computePhi(samples, latestHeartbeat, System.currentTimeMillis());
+ }
+ }
+
+ private double computePhi(DescriptiveStatistics samples, long tLast, long tNow) {
+ long size = samples.getN();
+ long t = tNow - tLast;
+ return (size > 0)
+ ? PHI_FACTOR * t / samples.getMean()
+ : BOOTSTRAP_PHI_VALUE;
+ }
+
+ private static class History {
+ DescriptiveStatistics samples =
+ new DescriptiveStatistics(WINDOW_SIZE);
+ long lastHeartbeatTime = -1;
+
+ public DescriptiveStatistics samples() {
+ return samples;
+ }
+
+ public long latestHeartbeatTime() {
+ return lastHeartbeatTime;
+ }
+
+ public void setLatestHeartbeatTime(long value) {
+ lastHeartbeatTime = value;
+ }
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/package-info.java
new file mode 100644
index 00000000..9e2db676
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of a distributed cluster node store using Hazelcast.
+ */
+package org.onosproject.store.cluster.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/ClusterCommunicationManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/ClusterCommunicationManager.java
new file mode 100644
index 00000000..8a237ef0
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/ClusterCommunicationManager.java
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.cluster.messaging.impl;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.Tools;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.ClusterMessage;
+import org.onosproject.store.cluster.messaging.ClusterMessageHandler;
+import org.onosproject.store.cluster.messaging.Endpoint;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+import org.onosproject.store.cluster.messaging.MessagingService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+@Component(immediate = true)
+@Service
+public class ClusterCommunicationManager
+ implements ClusterCommunicationService {
+
+ private final Logger log = LoggerFactory.getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ private ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MessagingService messagingService;
+
+ private NodeId localNodeId;
+
+ @Activate
+ public void activate() {
+ localNodeId = clusterService.getLocalNode().id();
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ @Override
+ public <M> void broadcast(M message,
+ MessageSubject subject,
+ Function<M, byte[]> encoder) {
+ multicast(message,
+ subject,
+ encoder,
+ clusterService.getNodes()
+ .stream()
+ .filter(node -> !Objects.equal(node, clusterService.getLocalNode()))
+ .map(ControllerNode::id)
+ .collect(Collectors.toSet()));
+ }
+
+ @Override
+ public <M> void broadcastIncludeSelf(M message,
+ MessageSubject subject,
+ Function<M, byte[]> encoder) {
+ multicast(message,
+ subject,
+ encoder,
+ clusterService.getNodes()
+ .stream()
+ .map(ControllerNode::id)
+ .collect(Collectors.toSet()));
+ }
+
+ @Override
+ public <M> CompletableFuture<Void> unicast(M message,
+ MessageSubject subject,
+ Function<M, byte[]> encoder,
+ NodeId toNodeId) {
+ try {
+ byte[] payload = new ClusterMessage(
+ localNodeId,
+ subject,
+ encoder.apply(message)).getBytes();
+ return doUnicast(subject, payload, toNodeId);
+ } catch (Exception e) {
+ return Tools.exceptionalFuture(e);
+ }
+ }
+
+ @Override
+ public <M> void multicast(M message,
+ MessageSubject subject,
+ Function<M, byte[]> encoder,
+ Set<NodeId> nodes) {
+ byte[] payload = new ClusterMessage(
+ localNodeId,
+ subject,
+ encoder.apply(message)).getBytes();
+ nodes.forEach(nodeId -> doUnicast(subject, payload, nodeId));
+ }
+
+ @Override
+ public <M, R> CompletableFuture<R> sendAndReceive(M message,
+ MessageSubject subject,
+ Function<M, byte[]> encoder,
+ Function<byte[], R> decoder,
+ NodeId toNodeId) {
+ try {
+ ClusterMessage envelope = new ClusterMessage(
+ clusterService.getLocalNode().id(),
+ subject,
+ encoder.apply(message));
+ return sendAndReceive(subject, envelope.getBytes(), toNodeId).thenApply(decoder);
+ } catch (Exception e) {
+ return Tools.exceptionalFuture(e);
+ }
+ }
+
+ private CompletableFuture<Void> doUnicast(MessageSubject subject, byte[] payload, NodeId toNodeId) {
+ ControllerNode node = clusterService.getNode(toNodeId);
+ checkArgument(node != null, "Unknown nodeId: %s", toNodeId);
+ Endpoint nodeEp = new Endpoint(node.ip(), node.tcpPort());
+ return messagingService.sendAsync(nodeEp, subject.value(), payload);
+ }
+
+ private CompletableFuture<byte[]> sendAndReceive(MessageSubject subject, byte[] payload, NodeId toNodeId) {
+ ControllerNode node = clusterService.getNode(toNodeId);
+ checkArgument(node != null, "Unknown nodeId: %s", toNodeId);
+ Endpoint nodeEp = new Endpoint(node.ip(), node.tcpPort());
+ return messagingService.sendAndReceive(nodeEp, subject.value(), payload);
+ }
+
+ @Override
+ public void addSubscriber(MessageSubject subject,
+ ClusterMessageHandler subscriber,
+ ExecutorService executor) {
+ messagingService.registerHandler(subject.value(),
+ new InternalClusterMessageHandler(subscriber),
+ executor);
+ }
+
+ @Override
+ public void removeSubscriber(MessageSubject subject) {
+ messagingService.unregisterHandler(subject.value());
+ }
+
+ @Override
+ public <M, R> void addSubscriber(MessageSubject subject,
+ Function<byte[], M> decoder,
+ Function<M, R> handler,
+ Function<R, byte[]> encoder,
+ Executor executor) {
+ messagingService.registerHandler(subject.value(),
+ new InternalMessageResponder<M, R>(decoder, encoder, m -> {
+ CompletableFuture<R> responseFuture = new CompletableFuture<>();
+ executor.execute(() -> {
+ try {
+ responseFuture.complete(handler.apply(m));
+ } catch (Exception e) {
+ responseFuture.completeExceptionally(e);
+ }
+ });
+ return responseFuture;
+ }));
+ }
+
+ @Override
+ public <M, R> void addSubscriber(MessageSubject subject,
+ Function<byte[], M> decoder,
+ Function<M, CompletableFuture<R>> handler,
+ Function<R, byte[]> encoder) {
+ messagingService.registerHandler(subject.value(),
+ new InternalMessageResponder<>(decoder, encoder, handler));
+ }
+
+ @Override
+ public <M> void addSubscriber(MessageSubject subject,
+ Function<byte[], M> decoder,
+ Consumer<M> handler,
+ Executor executor) {
+ messagingService.registerHandler(subject.value(),
+ new InternalMessageConsumer<>(decoder, handler),
+ executor);
+ }
+
+ private class InternalClusterMessageHandler implements Function<byte[], byte[]> {
+ private ClusterMessageHandler handler;
+
+ public InternalClusterMessageHandler(ClusterMessageHandler handler) {
+ this.handler = handler;
+ }
+
+ @Override
+ public byte[] apply(byte[] bytes) {
+ ClusterMessage message = ClusterMessage.fromBytes(bytes);
+ handler.handle(message);
+ return message.response();
+ }
+ }
+
+ private class InternalMessageResponder<M, R> implements Function<byte[], CompletableFuture<byte[]>> {
+ private final Function<byte[], M> decoder;
+ private final Function<R, byte[]> encoder;
+ private final Function<M, CompletableFuture<R>> handler;
+
+ public InternalMessageResponder(Function<byte[], M> decoder,
+ Function<R, byte[]> encoder,
+ Function<M, CompletableFuture<R>> handler) {
+ this.decoder = decoder;
+ this.encoder = encoder;
+ this.handler = handler;
+ }
+
+ @Override
+ public CompletableFuture<byte[]> apply(byte[] bytes) {
+ return handler.apply(decoder.apply(ClusterMessage.fromBytes(bytes).payload())).thenApply(encoder);
+ }
+ }
+
+ private class InternalMessageConsumer<M> implements Consumer<byte[]> {
+ private final Function<byte[], M> decoder;
+ private final Consumer<M> consumer;
+
+ public InternalMessageConsumer(Function<byte[], M> decoder, Consumer<M> consumer) {
+ this.decoder = decoder;
+ this.consumer = consumer;
+ }
+
+ @Override
+ public void accept(byte[] bytes) {
+ consumer.accept(decoder.apply(ClusterMessage.fromBytes(bytes).payload()));
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/IOLoopMessagingManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/IOLoopMessagingManager.java
new file mode 100644
index 00000000..9e52c3e3
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/IOLoopMessagingManager.java
@@ -0,0 +1,40 @@
+package org.onosproject.store.cluster.messaging.impl;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.nio.service.IOLoopMessaging;
+import org.onosproject.cluster.ClusterDefinitionService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.store.cluster.messaging.Endpoint;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * IOLoop based MessagingService.
+ */
+@Component(immediate = true, enabled = false)
+@Service
+public class IOLoopMessagingManager extends IOLoopMessaging {
+
+ private final Logger log = LoggerFactory.getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterDefinitionService clusterDefinitionService;
+
+ @Activate
+ public void activate() throws Exception {
+ ControllerNode localNode = clusterDefinitionService.localNode();
+ super.start(new Endpoint(localNode.ip(), localNode.tcpPort()));
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() throws Exception {
+ super.stop();
+ log.info("Stopped");
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/NettyMessagingManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/NettyMessagingManager.java
new file mode 100644
index 00000000..8b2cc8e2
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/NettyMessagingManager.java
@@ -0,0 +1,72 @@
+package org.onosproject.store.cluster.messaging.impl;
+
+import com.google.common.base.Strings;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.netty.NettyMessaging;
+import org.onosproject.cluster.ClusterDefinitionService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.store.cluster.messaging.Endpoint;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Netty based MessagingService.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class NettyMessagingManager extends NettyMessaging {
+
+ private final Logger log = LoggerFactory.getLogger(getClass());
+
+ private static final short MIN_KS_LENGTH = 6;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterDefinitionService clusterDefinitionService;
+
+ @Activate
+ public void activate() throws Exception {
+ ControllerNode localNode = clusterDefinitionService.localNode();
+ getTLSParameters();
+ super.start(new Endpoint(localNode.ip(), localNode.tcpPort()));
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() throws Exception {
+ super.stop();
+ log.info("Stopped");
+ }
+
+ private void getTLSParameters() {
+ String tempString = System.getProperty("enableNettyTLS");
+ enableNettyTLS = Strings.isNullOrEmpty(tempString) ? TLS_DISABLED : Boolean.parseBoolean(tempString);
+ log.info("enableNettyTLS = {}", enableNettyTLS);
+ if (enableNettyTLS) {
+ ksLocation = System.getProperty("javax.net.ssl.keyStore");
+ if (Strings.isNullOrEmpty(ksLocation)) {
+ enableNettyTLS = TLS_DISABLED;
+ return;
+ }
+ tsLocation = System.getProperty("javax.net.ssl.trustStore");
+ if (Strings.isNullOrEmpty(tsLocation)) {
+ enableNettyTLS = TLS_DISABLED;
+ return;
+ }
+ ksPwd = System.getProperty("javax.net.ssl.keyStorePassword").toCharArray();
+ if (MIN_KS_LENGTH > ksPwd.length) {
+ enableNettyTLS = TLS_DISABLED;
+ return;
+ }
+ tsPwd = System.getProperty("javax.net.ssl.trustStorePassword").toCharArray();
+ if (MIN_KS_LENGTH > tsPwd.length) {
+ enableNettyTLS = TLS_DISABLED;
+ return;
+ }
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/package-info.java
new file mode 100644
index 00000000..7157277e
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/cluster/messaging/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of the cluster messaging mechanism.
+ */
+package org.onosproject.store.cluster.messaging.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/config/impl/DistributedNetworkConfigStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/config/impl/DistributedNetworkConfigStore.java
new file mode 100644
index 00000000..3e73d8f4
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/config/impl/DistributedNetworkConfigStore.java
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.config.impl;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.BooleanNode;
+import com.fasterxml.jackson.databind.node.DoubleNode;
+import com.fasterxml.jackson.databind.node.IntNode;
+import com.fasterxml.jackson.databind.node.JsonNodeFactory;
+import com.fasterxml.jackson.databind.node.LongNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import com.fasterxml.jackson.databind.node.ShortNode;
+import com.fasterxml.jackson.databind.node.TextNode;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onlab.util.Tools;
+import org.onosproject.net.config.Config;
+import org.onosproject.net.config.ConfigApplyDelegate;
+import org.onosproject.net.config.ConfigFactory;
+import org.onosproject.net.config.NetworkConfigEvent;
+import org.onosproject.net.config.NetworkConfigStore;
+import org.onosproject.net.config.NetworkConfigStoreDelegate;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.ConsistentMapException;
+import org.onosproject.store.service.MapEvent;
+import org.onosproject.store.service.MapEventListener;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.StorageService;
+import org.onosproject.store.service.Versioned;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+
+import static org.onosproject.net.config.NetworkConfigEvent.Type.*;
+
+/**
+ * Implementation of a distributed network configuration store.
+ */
+@Component(immediate = true)
+@Service
+public class DistributedNetworkConfigStore
+ extends AbstractStore<NetworkConfigEvent, NetworkConfigStoreDelegate>
+ implements NetworkConfigStore {
+
+ private static final int MAX_BACKOFF = 10;
+
+ private final Logger log = LoggerFactory.getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ private ConsistentMap<ConfigKey, JsonNode> configs;
+
+ private final Map<String, ConfigFactory> factoriesByConfig = Maps.newConcurrentMap();
+ private final ObjectMapper mapper = new ObjectMapper();
+ private final ConfigApplyDelegate applyDelegate = new InternalApplyDelegate();
+ private final MapEventListener<ConfigKey, JsonNode> listener = new InternalMapListener();
+
+ @Activate
+ public void activate() {
+ KryoNamespace.Builder kryoBuilder = new KryoNamespace.Builder()
+ .register(KryoNamespaces.API)
+ .register(ConfigKey.class, ObjectNode.class, ArrayNode.class,
+ JsonNodeFactory.class, LinkedHashMap.class,
+ TextNode.class, BooleanNode.class,
+ LongNode.class, DoubleNode.class, ShortNode.class, IntNode.class);
+
+ configs = storageService.<ConfigKey, JsonNode>consistentMapBuilder()
+ .withSerializer(Serializer.using(kryoBuilder.build()))
+ .withName("onos-network-configs")
+ .withRelaxedReadConsistency()
+ .build();
+ configs.addListener(listener);
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ configs.removeListener(listener);
+ log.info("Stopped");
+ }
+
+ @Override
+ public void addConfigFactory(ConfigFactory configFactory) {
+ factoriesByConfig.put(configFactory.configClass().getName(), configFactory);
+ notifyDelegate(new NetworkConfigEvent(CONFIG_REGISTERED, configFactory.configKey(),
+ configFactory.configClass()));
+ }
+
+ @Override
+ public void removeConfigFactory(ConfigFactory configFactory) {
+ factoriesByConfig.remove(configFactory.configClass().getName());
+ notifyDelegate(new NetworkConfigEvent(CONFIG_UNREGISTERED, configFactory.configKey(),
+ configFactory.configClass()));
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <S, C extends Config<S>> ConfigFactory<S, C> getConfigFactory(Class<C> configClass) {
+ return (ConfigFactory<S, C>) factoriesByConfig.get(configClass.getName());
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <S> Set<S> getSubjects(Class<S> subjectClass) {
+ ImmutableSet.Builder<S> builder = ImmutableSet.builder();
+ configs.keySet().forEach(k -> {
+ if (subjectClass.isInstance(k.subject)) {
+ builder.add((S) k.subject);
+ }
+ });
+ return builder.build();
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <S, C extends Config<S>> Set<S> getSubjects(Class<S> subjectClass, Class<C> configClass) {
+ ImmutableSet.Builder<S> builder = ImmutableSet.builder();
+ String cName = configClass.getName();
+ configs.keySet().forEach(k -> {
+ if (subjectClass.isInstance(k.subject) && cName.equals(k.configClass)) {
+ builder.add((S) k.subject);
+ }
+ });
+ return builder.build();
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <S> Set<Class<? extends Config<S>>> getConfigClasses(S subject) {
+ ImmutableSet.Builder<Class<? extends Config<S>>> builder = ImmutableSet.builder();
+ configs.keySet().forEach(k -> {
+ if (Objects.equals(subject, k.subject) && delegate != null) {
+ builder.add(factoriesByConfig.get(k.configClass).configClass());
+ }
+ });
+ return builder.build();
+ }
+
+ @Override
+ public <S, T extends Config<S>> T getConfig(S subject, Class<T> configClass) {
+ // TODO: need to identify and address the root cause for timeouts.
+ Versioned<JsonNode> json = Tools.retryable(configs::get, ConsistentMapException.class, 1, MAX_BACKOFF)
+ .apply(key(subject, configClass));
+ return json != null ? createConfig(subject, configClass, json.value()) : null;
+ }
+
+
+ @Override
+ public <S, C extends Config<S>> C createConfig(S subject, Class<C> configClass) {
+ ConfigFactory<S, C> factory = getConfigFactory(configClass);
+ Versioned<JsonNode> json = configs.computeIfAbsent(key(subject, configClass),
+ k -> factory.isList() ?
+ mapper.createArrayNode() :
+ mapper.createObjectNode());
+ return createConfig(subject, configClass, json.value());
+ }
+
+ @Override
+ public <S, C extends Config<S>> C applyConfig(S subject, Class<C> configClass, JsonNode json) {
+ return createConfig(subject, configClass,
+ configs.putAndGet(key(subject, configClass), json).value());
+ }
+
+ @Override
+ public <S, C extends Config<S>> void clearConfig(S subject, Class<C> configClass) {
+ configs.remove(key(subject, configClass));
+ }
+
+ /**
+ * Produces a config from the specified subject, config class and raw JSON.
+ *
+ * @param subject config subject
+ * @param configClass config class
+ * @param json raw JSON data
+ * @return config object or null of no factory found or if the specified
+ * JSON is null
+ */
+ @SuppressWarnings("unchecked")
+ private <S, C extends Config<S>> C createConfig(S subject, Class<C> configClass,
+ JsonNode json) {
+ if (json != null) {
+ ConfigFactory<S, C> factory = factoriesByConfig.get(configClass.getName());
+ if (factory != null) {
+ C config = factory.createConfig();
+ config.init(subject, factory.configKey(), json, mapper, applyDelegate);
+ return config;
+ }
+ }
+ return null;
+ }
+
+
+ // Auxiliary delegate to receive notifications about changes applied to
+ // the network configuration - by the apps.
+ private class InternalApplyDelegate implements ConfigApplyDelegate {
+ @Override
+ public void onApply(Config config) {
+ configs.put(key(config.subject(), config.getClass()), config.node());
+ }
+ }
+
+ // Produces a key for uniquely tracking a subject config.
+ private static ConfigKey key(Object subject, Class<?> configClass) {
+ return new ConfigKey(subject, configClass);
+ }
+
+ // Auxiliary key to track subject configurations.
+ private static final class ConfigKey {
+ final Object subject;
+ final String configClass;
+
+ private ConfigKey(Object subject, Class<?> configClass) {
+ this.subject = subject;
+ this.configClass = configClass.getName();
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(subject, configClass);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj instanceof ConfigKey) {
+ final ConfigKey other = (ConfigKey) obj;
+ return Objects.equals(this.subject, other.subject)
+ && Objects.equals(this.configClass, other.configClass);
+ }
+ return false;
+ }
+ }
+
+ private class InternalMapListener implements MapEventListener<ConfigKey, JsonNode> {
+ @Override
+ public void event(MapEvent<ConfigKey, JsonNode> event) {
+ NetworkConfigEvent.Type type;
+ switch (event.type()) {
+ case INSERT:
+ type = CONFIG_ADDED;
+ break;
+ case UPDATE:
+ type = CONFIG_UPDATED;
+ break;
+ case REMOVE:
+ default:
+ type = CONFIG_REMOVED;
+ break;
+ }
+ ConfigFactory factory = factoriesByConfig.get(event.key().configClass);
+ if (factory != null) {
+ notifyDelegate(new NetworkConfigEvent(type, event.key().subject,
+ factory.configClass()));
+ }
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/config/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/config/impl/package-info.java
new file mode 100644
index 00000000..0e1264eb
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/config/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of the network configuration distributed store.
+ */
+package org.onosproject.store.config.impl; \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/AsyncCachingConsistentMap.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/AsyncCachingConsistentMap.java
new file mode 100644
index 00000000..7e575b01
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/AsyncCachingConsistentMap.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import java.util.concurrent.CompletableFuture;
+
+import org.onosproject.core.ApplicationId;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.Versioned;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+/**
+ * Extension of DefaultAsyncConsistentMap that provides a weaker read consistency
+ * guarantee in return for better read performance.
+ *
+ * @param <K> key type
+ * @param <V> value type
+ */
+public class AsyncCachingConsistentMap<K, V> extends DefaultAsyncConsistentMap<K, V> {
+
+ private final LoadingCache<K, CompletableFuture<Versioned<V>>> cache =
+ CacheBuilder.newBuilder()
+ .maximumSize(10000) // TODO: make configurable
+ .build(new CacheLoader<K, CompletableFuture<Versioned<V>>>() {
+ @Override
+ public CompletableFuture<Versioned<V>> load(K key)
+ throws Exception {
+ return AsyncCachingConsistentMap.super.get(key);
+ }
+ });
+
+ public AsyncCachingConsistentMap(String name,
+ ApplicationId applicationId,
+ Database database,
+ Serializer serializer,
+ boolean readOnly,
+ boolean purgeOnUninstall,
+ boolean meteringEnabled) {
+ super(name, applicationId, database, serializer, readOnly, purgeOnUninstall, meteringEnabled);
+ addListener(event -> cache.invalidate(event.key()));
+ }
+
+ @Override
+ public CompletableFuture<Versioned<V>> get(K key) {
+ CompletableFuture<Versioned<V>> cachedValue = cache.getIfPresent(key);
+ if (cachedValue != null) {
+ if (cachedValue.isCompletedExceptionally()) {
+ cache.invalidate(key);
+ } else {
+ return cachedValue;
+ }
+ }
+ return cache.getUnchecked(key);
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/CommitResponse.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/CommitResponse.java
new file mode 100644
index 00000000..bbc8e6e0
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/CommitResponse.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+
+import java.util.Collections;
+import java.util.List;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Result of a Transaction commit operation.
+ */
+public final class CommitResponse {
+
+ private boolean success;
+ private List<UpdateResult<String, byte[]>> updates;
+
+ public static CommitResponse success(List<UpdateResult<String, byte[]>> updates) {
+ return new CommitResponse(true, updates);
+ }
+
+ public static CommitResponse failure() {
+ return new CommitResponse(false, Collections.emptyList());
+ }
+
+ private CommitResponse(boolean success, List<UpdateResult<String, byte[]>> updates) {
+ this.success = success;
+ this.updates = ImmutableList.copyOf(updates);
+ }
+
+ public boolean success() {
+ return success;
+ }
+
+ public List<UpdateResult<String, byte[]>> updates() {
+ return updates;
+ }
+
+ @Override
+ public String toString() {
+ return toStringHelper(this)
+ .add("success", success)
+ .add("udpates", updates)
+ .toString();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/ConsistentMapBackedJavaMap.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/ConsistentMapBackedJavaMap.java
new file mode 100644
index 00000000..58aca31a
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/ConsistentMapBackedJavaMap.java
@@ -0,0 +1,145 @@
+package org.onosproject.store.consistent.impl;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.Versioned;
+
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Maps;
+
+/**
+ * Standard java Map backed by a ConsistentMap.
+ *
+ * @param <K> key type
+ * @param <V> value type
+ */
+public final class ConsistentMapBackedJavaMap<K, V> implements Map<K, V> {
+
+ private final ConsistentMap<K, V> backingMap;
+
+ public ConsistentMapBackedJavaMap(ConsistentMap<K, V> backingMap) {
+ this.backingMap = backingMap;
+ }
+
+ @Override
+ public int size() {
+ return backingMap.size();
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return backingMap.isEmpty();
+ }
+
+ @Override
+ public boolean containsKey(Object key) {
+ return backingMap.containsKey((K) key);
+ }
+
+ @Override
+ public boolean containsValue(Object value) {
+ return backingMap.containsValue((V) value);
+ }
+
+ @Override
+ public V get(Object key) {
+ return Versioned.valueOrElse(backingMap.get((K) key), null);
+ }
+
+ @Override
+ public V getOrDefault(Object key, V defaultValue) {
+ return Versioned.valueOrElse(backingMap.get((K) key), defaultValue);
+ }
+
+ @Override
+ public V put(K key, V value) {
+ return Versioned.valueOrElse(backingMap.put(key, value), null);
+ }
+
+ @Override
+ public V putIfAbsent(K key, V value) {
+ return Versioned.valueOrElse(backingMap.putIfAbsent(key, value), null);
+ }
+
+ @Override
+ public V remove(Object key) {
+ return Versioned.valueOrElse(backingMap.remove((K) key), null);
+ }
+
+ @Override
+ public boolean remove(Object key, Object value) {
+ return backingMap.remove((K) key, (V) value);
+ }
+
+ @Override
+ public V replace(K key, V value) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean replace(K key, V oldValue, V newValue) {
+ return backingMap.replace(key, oldValue, newValue);
+ }
+
+ @Override
+ public void putAll(Map<? extends K, ? extends V> m) {
+ m.forEach((k, v) -> {
+ backingMap.put(k, v);
+ });
+ }
+
+ @Override
+ public void clear() {
+ backingMap.clear();
+ }
+
+ @Override
+ public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ return Versioned.valueOrElse(backingMap.compute(key, remappingFunction), null);
+ }
+
+ @Override
+ public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
+ return Versioned.valueOrElse(backingMap.computeIfAbsent(key, mappingFunction), null);
+ }
+
+ @Override
+ public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ return Versioned.valueOrElse(backingMap.computeIfPresent(key, remappingFunction), null);
+ }
+
+ @Override
+ public Set<K> keySet() {
+ return backingMap.keySet();
+ }
+
+ @Override
+ public Collection<V> values() {
+ return Collections2.transform(backingMap.values(), v -> v.value());
+ }
+
+ @Override
+ public Set<java.util.Map.Entry<K, V>> entrySet() {
+ return backingMap.entrySet()
+ .stream()
+ .map(entry -> Maps.immutableEntry(entry.getKey(), entry.getValue().value()))
+ .collect(Collectors.toSet());
+ }
+
+ @Override
+ public void forEach(BiConsumer<? super K, ? super V> action) {
+ entrySet().forEach(e -> action.accept(e.getKey(), e.getValue()));
+ }
+
+ @Override
+ public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
+ return computeIfPresent(key, (k, v) -> v == null ? value : remappingFunction.apply(v, value));
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/CopycatCommunicationProtocol.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/CopycatCommunicationProtocol.java
new file mode 100644
index 00000000..88ddae62
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/CopycatCommunicationProtocol.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import java.net.URI;
+import java.nio.ByteBuffer;
+import java.util.concurrent.CompletableFuture;
+
+import org.onlab.util.Tools;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+
+import net.kuujo.copycat.protocol.AbstractProtocol;
+import net.kuujo.copycat.protocol.ProtocolClient;
+import net.kuujo.copycat.protocol.ProtocolHandler;
+import net.kuujo.copycat.protocol.ProtocolServer;
+import net.kuujo.copycat.util.Configurable;
+
+/**
+ * Protocol for Copycat communication that employs
+ * {@code ClusterCommunicationService}.
+ */
+public class CopycatCommunicationProtocol extends AbstractProtocol {
+
+ private static final MessageSubject COPYCAT_MESSAGE_SUBJECT =
+ new MessageSubject("onos-copycat-message");
+
+ protected ClusterService clusterService;
+ protected ClusterCommunicationService clusterCommunicator;
+
+ public CopycatCommunicationProtocol(ClusterService clusterService,
+ ClusterCommunicationService clusterCommunicator) {
+ this.clusterService = clusterService;
+ this.clusterCommunicator = clusterCommunicator;
+ }
+
+ @Override
+ public Configurable copy() {
+ return this;
+ }
+
+ @Override
+ public ProtocolClient createClient(URI uri) {
+ NodeId nodeId = uriToNodeId(uri);
+ if (nodeId == null) {
+ throw new IllegalStateException("Unknown peer " + uri);
+ }
+ return new Client(nodeId);
+ }
+
+ @Override
+ public ProtocolServer createServer(URI uri) {
+ return new Server();
+ }
+
+ private class Server implements ProtocolServer {
+
+ @Override
+ public void handler(ProtocolHandler handler) {
+ if (handler == null) {
+ clusterCommunicator.removeSubscriber(COPYCAT_MESSAGE_SUBJECT);
+ } else {
+ clusterCommunicator.addSubscriber(COPYCAT_MESSAGE_SUBJECT,
+ ByteBuffer::wrap,
+ handler,
+ Tools::byteBuffertoArray);
+ // FIXME: Tools::byteBuffertoArray involves a array copy.
+ }
+ }
+
+ @Override
+ public CompletableFuture<Void> listen() {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ @Override
+ public CompletableFuture<Void> close() {
+ clusterCommunicator.removeSubscriber(COPYCAT_MESSAGE_SUBJECT);
+ return CompletableFuture.completedFuture(null);
+ }
+ }
+
+ private class Client implements ProtocolClient {
+ private final NodeId peer;
+
+ public Client(NodeId peer) {
+ this.peer = peer;
+ }
+
+ @Override
+ public CompletableFuture<ByteBuffer> write(ByteBuffer request) {
+ return clusterCommunicator.sendAndReceive(request,
+ COPYCAT_MESSAGE_SUBJECT,
+ Tools::byteBuffertoArray,
+ ByteBuffer::wrap,
+ peer);
+ }
+
+ @Override
+ public CompletableFuture<Void> connect() {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ @Override
+ public CompletableFuture<Void> close() {
+ return CompletableFuture.completedFuture(null);
+ }
+ }
+
+ private NodeId uriToNodeId(URI uri) {
+ return clusterService.getNodes()
+ .stream()
+ .filter(node -> uri.getHost().equals(node.ip().toString()))
+ .map(ControllerNode::id)
+ .findAny()
+ .orElse(null);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Database.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Database.java
new file mode 100644
index 00000000..ff3e36ac
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Database.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+
+import java.util.function.Consumer;
+
+import net.kuujo.copycat.cluster.ClusterConfig;
+import net.kuujo.copycat.cluster.internal.coordinator.ClusterCoordinator;
+import net.kuujo.copycat.cluster.internal.coordinator.CoordinatorConfig;
+import net.kuujo.copycat.cluster.internal.coordinator.DefaultClusterCoordinator;
+import net.kuujo.copycat.resource.Resource;
+
+/**
+ * Database.
+ */
+public interface Database extends DatabaseProxy<String, byte[]>, Resource<Database> {
+
+ /**
+ * Creates a new database with the default cluster configuration.<p>
+ *
+ * The database will be constructed with the default cluster configuration. The default cluster configuration
+ * searches for two resources on the classpath - {@code cluster} and {cluster-defaults} - in that order. Configuration
+ * options specified in {@code cluster.conf} will override those in {cluster-defaults.conf}.<p>
+ *
+ * Additionally, the database will be constructed with an database configuration that searches the classpath for
+ * three configuration files - {@code {name}}, {@code database}, {@code database-defaults}, {@code resource}, and
+ * {@code resource-defaults} - in that order. The first resource is a configuration resource with the same name
+ * as the map resource. If the resource is namespaced - e.g. `databases.my-database.conf` - then resource
+ * configurations will be loaded according to namespaces as well; for example, `databases.conf`.
+ *
+ * @param name The database name.
+ * @return The database.
+ */
+ static Database create(String name) {
+ return create(name, new ClusterConfig(), new DatabaseConfig());
+ }
+
+ /**
+ * Creates a new database.<p>
+ *
+ * The database will be constructed with an database configuration that searches the classpath for
+ * three configuration files - {@code {name}}, {@code database}, {@code database-defaults}, {@code resource}, and
+ * {@code resource-defaults} - in that order. The first resource is a configuration resource with the same name
+ * as the database resource. If the resource is namespaced - e.g. `databases.my-database.conf` - then resource
+ * configurations will be loaded according to namespaces as well; for example, `databases.conf`.
+ *
+ * @param name The database name.
+ * @param cluster The cluster configuration.
+ * @return The database.
+ */
+ static Database create(String name, ClusterConfig cluster) {
+ return create(name, cluster, new DatabaseConfig());
+ }
+
+ /**
+ * Creates a new database.
+ *
+ * @param name The database name.
+ * @param cluster The cluster configuration.
+ * @param config The database configuration.
+
+ * @return The database.
+ */
+ static Database create(String name, ClusterConfig cluster, DatabaseConfig config) {
+ ClusterCoordinator coordinator =
+ new DefaultClusterCoordinator(new CoordinatorConfig().withName(name).withClusterConfig(cluster));
+ return coordinator.<Database>getResource(name, config.resolve(cluster))
+ .addStartupTask(() -> coordinator.open().thenApply(v -> null))
+ .addShutdownTask(coordinator::close);
+ }
+
+ /**
+ * Tells whether the database supports change notifications.
+ * @return true if notifications are supported; false otherwise
+ */
+ default boolean hasChangeNotificationSupport() {
+ return true;
+ }
+
+ /**
+ * Registers a new consumer of StateMachineUpdates.
+ * @param consumer consumer to register
+ */
+ void registerConsumer(Consumer<StateMachineUpdate> consumer);
+
+ /**
+ * Unregisters a consumer of StateMachineUpdates.
+ * @param consumer consumer to unregister
+ */
+ void unregisterConsumer(Consumer<StateMachineUpdate> consumer);
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseConfig.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseConfig.java
new file mode 100644
index 00000000..bd774b99
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseConfig.java
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import com.typesafe.config.ConfigValueFactory;
+import net.kuujo.copycat.cluster.ClusterConfig;
+import net.kuujo.copycat.cluster.internal.coordinator.CoordinatedResourceConfig;
+import net.kuujo.copycat.protocol.Consistency;
+import net.kuujo.copycat.resource.ResourceConfig;
+import net.kuujo.copycat.state.StateLogConfig;
+import net.kuujo.copycat.util.internal.Assert;
+
+import java.util.Map;
+
+/**
+ * Database configuration.
+ *
+ */
+public class DatabaseConfig extends ResourceConfig<DatabaseConfig> {
+ private static final String DATABASE_CONSISTENCY = "consistency";
+
+ private static final String DEFAULT_CONFIGURATION = "database-defaults";
+ private static final String CONFIGURATION = "database";
+
+ private String name;
+
+ public DatabaseConfig() {
+ super(CONFIGURATION, DEFAULT_CONFIGURATION);
+ }
+
+ public DatabaseConfig(Map<String, Object> config) {
+ super(config, CONFIGURATION, DEFAULT_CONFIGURATION);
+ }
+
+ public DatabaseConfig(String resource) {
+ super(resource, CONFIGURATION, DEFAULT_CONFIGURATION);
+ }
+
+ protected DatabaseConfig(DatabaseConfig config) {
+ super(config);
+ }
+
+ @Override
+ public DatabaseConfig copy() {
+ return new DatabaseConfig(this);
+ }
+
+ /**
+ * Sets the database read consistency.
+ *
+ * @param consistency The database read consistency.
+ * @throws java.lang.NullPointerException If the consistency is {@code null}
+ */
+ public void setConsistency(String consistency) {
+ this.config = config.withValue(DATABASE_CONSISTENCY,
+ ConfigValueFactory.fromAnyRef(
+ Consistency.parse(Assert.isNotNull(consistency, "consistency")).toString()));
+ }
+
+ /**
+ * Sets the database read consistency.
+ *
+ * @param consistency The database read consistency.
+ * @throws java.lang.NullPointerException If the consistency is {@code null}
+ */
+ public void setConsistency(Consistency consistency) {
+ this.config = config.withValue(DATABASE_CONSISTENCY,
+ ConfigValueFactory.fromAnyRef(
+ Assert.isNotNull(consistency, "consistency").toString()));
+ }
+
+ /**
+ * Returns the database read consistency.
+ *
+ * @return The database read consistency.
+ */
+ public Consistency getConsistency() {
+ return Consistency.parse(config.getString(DATABASE_CONSISTENCY));
+ }
+
+ /**
+ * Sets the database read consistency, returning the configuration for method chaining.
+ *
+ * @param consistency The database read consistency.
+ * @return The database configuration.
+ * @throws java.lang.NullPointerException If the consistency is {@code null}
+ */
+ public DatabaseConfig withConsistency(String consistency) {
+ setConsistency(consistency);
+ return this;
+ }
+
+ /**
+ * Sets the database read consistency, returning the configuration for method chaining.
+ *
+ * @param consistency The database read consistency.
+ * @return The database configuration.
+ * @throws java.lang.NullPointerException If the consistency is {@code null}
+ */
+ public DatabaseConfig withConsistency(Consistency consistency) {
+ setConsistency(consistency);
+ return this;
+ }
+
+ /**
+ * Returns the database name.
+ *
+ * @return The database name
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * Sets the database name, returning the configuration for method chaining.
+ *
+ * @param name The database name
+ * @return The database configuration
+ * @throws java.lang.NullPointerException If the name is {@code null}
+ */
+ public DatabaseConfig withName(String name) {
+ setName(Assert.isNotNull(name, "name"));
+ return this;
+ }
+
+ /**
+ * Sets the database name.
+ *
+ * @param name The database name
+ * @throws java.lang.NullPointerException If the name is {@code null}
+ */
+ public void setName(String name) {
+ this.name = Assert.isNotNull(name, "name");
+ }
+
+ @Override
+ public CoordinatedResourceConfig resolve(ClusterConfig cluster) {
+ return new StateLogConfig(toMap())
+ .resolve(cluster)
+ .withResourceType(DefaultDatabase.class);
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseDefinition.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseDefinition.java
new file mode 100644
index 00000000..11b56c14
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseDefinition.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+import org.onosproject.store.cluster.impl.NodeInfo;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Partitioned database configuration.
+ */
+public class DatabaseDefinition {
+ private Map<String, Set<NodeInfo>> partitions;
+ private Set<NodeInfo> nodes;
+
+ /**
+ * Creates a new DatabaseDefinition.
+ *
+ * @param partitions partition map
+ * @param nodes set of nodes
+ * @return database definition
+ */
+ public static DatabaseDefinition from(Map<String, Set<NodeInfo>> partitions,
+ Set<NodeInfo> nodes) {
+ checkNotNull(partitions);
+ checkNotNull(nodes);
+ DatabaseDefinition definition = new DatabaseDefinition();
+ definition.partitions = ImmutableMap.copyOf(partitions);
+ definition.nodes = ImmutableSet.copyOf(nodes);
+ return definition;
+ }
+
+ /**
+ * Creates a new DatabaseDefinition using default partitions.
+ *
+ * @param nodes set of nodes
+ * @return database definition
+ */
+ public static DatabaseDefinition from(Set<NodeInfo> nodes) {
+ return from(generateDefaultPartitions(nodes), nodes);
+ }
+
+ /**
+ * Returns the map of database partitions.
+ *
+ * @return db partition map
+ */
+ public Map<String, Set<NodeInfo>> getPartitions() {
+ return partitions;
+ }
+
+ /**
+ * Returns the set of nodes.
+ *
+ * @return nodes
+ */
+ public Set<NodeInfo> getNodes() {
+ return nodes;
+ }
+
+
+ /**
+ * Generates set of default partitions using permutations of the nodes.
+ *
+ * @param nodes information about cluster nodes
+ * @return default partition map
+ */
+ private static Map<String, Set<NodeInfo>> generateDefaultPartitions(Set<NodeInfo> nodes) {
+ List<NodeInfo> sorted = new ArrayList<>(nodes);
+ Collections.sort(sorted, (o1, o2) -> o1.getId().compareTo(o2.getId()));
+ Map<String, Set<NodeInfo>> partitions = Maps.newHashMap();
+
+ int length = nodes.size();
+ int count = 3;
+ for (int i = 0; i < length; i++) {
+ Set<NodeInfo> set = new HashSet<>(count);
+ for (int j = 0; j < count; j++) {
+ set.add(sorted.get((i + j) % length));
+ }
+ partitions.put("p" + (i + 1), set);
+ }
+ return partitions;
+ }
+
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseDefinitionStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseDefinitionStore.java
new file mode 100644
index 00000000..b77667b2
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseDefinitionStore.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import java.io.File;
+import java.io.IOException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.io.Files;
+
+/**
+ * Allows for reading and writing partitioned database definition as a JSON file.
+ */
+public class DatabaseDefinitionStore {
+
+ private final File file;
+
+ /**
+ * Creates a reader/writer of the database definition file.
+ *
+ * @param filePath location of the definition file
+ */
+ public DatabaseDefinitionStore(String filePath) {
+ file = new File(checkNotNull(filePath));
+ }
+
+ /**
+ * Creates a reader/writer of the database definition file.
+ *
+ * @param filePath location of the definition file
+ */
+ public DatabaseDefinitionStore(File filePath) {
+ file = checkNotNull(filePath);
+ }
+
+ /**
+ * Returns the database definition.
+ *
+ * @return database definition
+ * @throws IOException when I/O exception of some sort has occurred.
+ */
+ public DatabaseDefinition read() throws IOException {
+ ObjectMapper mapper = new ObjectMapper();
+ return mapper.readValue(file, DatabaseDefinition.class);
+ }
+
+ /**
+ * Writes the specified database definition to file.
+ *
+ * @param definition database definition
+ * @throws IOException when I/O exception of some sort has occurred.
+ */
+ public void write(DatabaseDefinition definition) throws IOException {
+ checkNotNull(definition);
+ // write back to file
+ Files.createParentDirs(file);
+ ObjectMapper mapper = new ObjectMapper();
+ mapper.writeValue(file, definition);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseManager.java
new file mode 100644
index 00000000..b7c3794b
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseManager.java
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Multimaps;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Futures;
+
+import net.kuujo.copycat.CopycatConfig;
+import net.kuujo.copycat.cluster.ClusterConfig;
+import net.kuujo.copycat.cluster.Member;
+import net.kuujo.copycat.cluster.Member.Type;
+import net.kuujo.copycat.cluster.internal.coordinator.ClusterCoordinator;
+import net.kuujo.copycat.cluster.internal.coordinator.DefaultClusterCoordinator;
+import net.kuujo.copycat.log.BufferedLog;
+import net.kuujo.copycat.log.FileLog;
+import net.kuujo.copycat.log.Log;
+import net.kuujo.copycat.protocol.Consistency;
+import net.kuujo.copycat.protocol.Protocol;
+import net.kuujo.copycat.util.concurrent.NamedThreadFactory;
+
+import org.apache.commons.lang.math.RandomUtils;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.ReferencePolicy;
+import org.apache.felix.scr.annotations.Service;
+
+import org.onosproject.app.ApplicationEvent;
+import org.onosproject.app.ApplicationListener;
+import org.onosproject.app.ApplicationService;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.core.ApplicationId;
+import org.onosproject.core.IdGenerator;
+import org.onosproject.store.cluster.impl.ClusterDefinitionManager;
+import org.onosproject.store.cluster.impl.NodeInfo;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.ecmap.EventuallyConsistentMapBuilderImpl;
+import org.onosproject.store.service.AtomicCounterBuilder;
+import org.onosproject.store.service.AtomicValueBuilder;
+import org.onosproject.store.service.ConsistentMapBuilder;
+import org.onosproject.store.service.ConsistentMapException;
+import org.onosproject.store.service.DistributedQueueBuilder;
+import org.onosproject.store.service.EventuallyConsistentMapBuilder;
+import org.onosproject.store.service.MapInfo;
+import org.onosproject.store.service.PartitionInfo;
+import org.onosproject.store.service.DistributedSetBuilder;
+import org.onosproject.store.service.StorageAdminService;
+import org.onosproject.store.service.StorageService;
+import org.onosproject.store.service.Transaction;
+import org.onosproject.store.service.TransactionContextBuilder;
+import org.slf4j.Logger;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.stream.Collectors;
+
+import static org.slf4j.LoggerFactory.getLogger;
+import static org.onosproject.app.ApplicationEvent.Type.APP_UNINSTALLED;
+import static org.onosproject.app.ApplicationEvent.Type.APP_DEACTIVATED;
+
+/**
+ * Database manager.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class DatabaseManager implements StorageService, StorageAdminService {
+
+ private final Logger log = getLogger(getClass());
+
+ public static final int COPYCAT_TCP_PORT = 9876;
+ public static final String PARTITION_DEFINITION_FILE = "../config/tablets.json";
+ public static final String BASE_PARTITION_NAME = "p0";
+
+ private static final int RAFT_ELECTION_TIMEOUT_MILLIS = 3000;
+ private static final int DATABASE_OPERATION_TIMEOUT_MILLIS = 5000;
+
+ private ClusterCoordinator coordinator;
+ protected PartitionedDatabase partitionedDatabase;
+ protected Database inMemoryDatabase;
+ protected NodeId localNodeId;
+
+ private TransactionManager transactionManager;
+ private final IdGenerator transactionIdGenerator = () -> RandomUtils.nextLong();
+
+ private ApplicationListener appListener = new InternalApplicationListener();
+
+ private final Multimap<String, DefaultAsyncConsistentMap> maps =
+ Multimaps.synchronizedMultimap(ArrayListMultimap.create());
+ private final Multimap<ApplicationId, DefaultAsyncConsistentMap> mapsByApplication =
+ Multimaps.synchronizedMultimap(ArrayListMultimap.create());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.OPTIONAL_UNARY, policy = ReferencePolicy.DYNAMIC)
+ protected ApplicationService applicationService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ protected String nodeToUri(NodeInfo node) {
+ return String.format("onos://%s:%d", node.getIp(), node.getTcpPort());
+ }
+
+ protected void bindApplicationService(ApplicationService service) {
+ applicationService = service;
+ applicationService.addListener(appListener);
+ }
+
+ protected void unbindApplicationService(ApplicationService service) {
+ applicationService.removeListener(appListener);
+ this.applicationService = null;
+ }
+
+ @Activate
+ public void activate() {
+ localNodeId = clusterService.getLocalNode().id();
+ // load database configuration
+ File databaseDefFile = new File(PARTITION_DEFINITION_FILE);
+ log.info("Loading database definition: {}", databaseDefFile.getAbsolutePath());
+
+ Map<String, Set<NodeInfo>> partitionMap;
+ try {
+ DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(databaseDefFile);
+ if (!databaseDefFile.exists()) {
+ createDefaultDatabaseDefinition(databaseDefStore);
+ }
+ partitionMap = databaseDefStore.read().getPartitions();
+ } catch (IOException e) {
+ throw new IllegalStateException("Failed to load database config", e);
+ }
+
+ String[] activeNodeUris = partitionMap.values()
+ .stream()
+ .reduce((s1, s2) -> Sets.union(s1, s2))
+ .get()
+ .stream()
+ .map(this::nodeToUri)
+ .toArray(String[]::new);
+
+ String localNodeUri = nodeToUri(NodeInfo.of(clusterService.getLocalNode()));
+ Protocol protocol = new CopycatCommunicationProtocol(clusterService, clusterCommunicator);
+
+ ClusterConfig clusterConfig = new ClusterConfig()
+ .withProtocol(protocol)
+ .withElectionTimeout(electionTimeoutMillis(activeNodeUris))
+ .withHeartbeatInterval(heartbeatTimeoutMillis(activeNodeUris))
+ .withMembers(activeNodeUris)
+ .withLocalMember(localNodeUri);
+
+ CopycatConfig copycatConfig = new CopycatConfig()
+ .withName("onos")
+ .withClusterConfig(clusterConfig)
+ .withDefaultSerializer(new DatabaseSerializer())
+ .withDefaultExecutor(Executors.newSingleThreadExecutor(new NamedThreadFactory("copycat-coordinator-%d")));
+
+ coordinator = new DefaultClusterCoordinator(copycatConfig.resolve());
+
+ DatabaseConfig inMemoryDatabaseConfig =
+ newDatabaseConfig(BASE_PARTITION_NAME, newInMemoryLog(), activeNodeUris);
+ inMemoryDatabase = coordinator
+ .getResource(inMemoryDatabaseConfig.getName(), inMemoryDatabaseConfig.resolve(clusterConfig)
+ .withSerializer(copycatConfig.getDefaultSerializer())
+ .withDefaultExecutor(copycatConfig.getDefaultExecutor()));
+
+ List<Database> partitions = partitionMap.entrySet()
+ .stream()
+ .map(entry -> {
+ String[] replicas = entry.getValue().stream().map(this::nodeToUri).toArray(String[]::new);
+ return newDatabaseConfig(entry.getKey(), newPersistentLog(), replicas);
+ })
+ .map(config -> {
+ Database db = coordinator.getResource(config.getName(), config.resolve(clusterConfig)
+ .withSerializer(copycatConfig.getDefaultSerializer())
+ .withDefaultExecutor(copycatConfig.getDefaultExecutor()));
+ return db;
+ })
+ .collect(Collectors.toList());
+
+ partitionedDatabase = new PartitionedDatabase("onos-store", partitions);
+
+ CompletableFuture<Void> status = coordinator.open()
+ .thenCompose(v -> CompletableFuture.allOf(inMemoryDatabase.open(), partitionedDatabase.open())
+ .whenComplete((db, error) -> {
+ if (error != null) {
+ log.error("Failed to initialize database.", error);
+ } else {
+ log.info("Successfully initialized database.");
+ }
+ }));
+
+ Futures.getUnchecked(status);
+
+ transactionManager = new TransactionManager(partitionedDatabase, consistentMapBuilder());
+ partitionedDatabase.setTransactionManager(transactionManager);
+
+ log.info("Started");
+ }
+
+ private void createDefaultDatabaseDefinition(DatabaseDefinitionStore store) {
+ // Assumes IPv4 is returned.
+ String ip = ClusterDefinitionManager.getSiteLocalAddress();
+ NodeInfo node = NodeInfo.from(ip, ip, COPYCAT_TCP_PORT);
+ try {
+ store.write(DatabaseDefinition.from(ImmutableSet.of(node)));
+ } catch (IOException e) {
+ log.warn("Unable to write default cluster definition", e);
+ }
+ }
+
+ @Deactivate
+ public void deactivate() {
+ CompletableFuture.allOf(inMemoryDatabase.close(), partitionedDatabase.close())
+ .thenCompose(v -> coordinator.close())
+ .whenComplete((result, error) -> {
+ if (error != null) {
+ log.warn("Failed to cleanly close databases.", error);
+ } else {
+ log.info("Successfully closed databases.");
+ }
+ });
+ maps.values().forEach(this::unregisterMap);
+ if (applicationService != null) {
+ applicationService.removeListener(appListener);
+ }
+ log.info("Stopped");
+ }
+
+ @Override
+ public TransactionContextBuilder transactionContextBuilder() {
+ return new DefaultTransactionContextBuilder(this, transactionIdGenerator.getNewId());
+ }
+
+ @Override
+ public List<PartitionInfo> getPartitionInfo() {
+ return Lists.asList(
+ inMemoryDatabase,
+ partitionedDatabase.getPartitions().toArray(new Database[]{}))
+ .stream()
+ .map(DatabaseManager::toPartitionInfo)
+ .collect(Collectors.toList());
+ }
+
+ private Log newPersistentLog() {
+ String logDir = System.getProperty("karaf.data", "./data");
+ return new FileLog()
+ .withDirectory(logDir)
+ .withSegmentSize(1073741824) // 1GB
+ .withFlushOnWrite(true)
+ .withSegmentInterval(Long.MAX_VALUE);
+ }
+
+ private Log newInMemoryLog() {
+ return new BufferedLog()
+ .withFlushOnWrite(false)
+ .withFlushInterval(Long.MAX_VALUE)
+ .withSegmentSize(10485760) // 10MB
+ .withSegmentInterval(Long.MAX_VALUE);
+ }
+
+ private DatabaseConfig newDatabaseConfig(String name, Log log, String[] replicas) {
+ return new DatabaseConfig()
+ .withName(name)
+ .withElectionTimeout(electionTimeoutMillis(replicas))
+ .withHeartbeatInterval(heartbeatTimeoutMillis(replicas))
+ .withConsistency(Consistency.DEFAULT)
+ .withLog(log)
+ .withDefaultSerializer(new DatabaseSerializer())
+ .withReplicas(replicas);
+ }
+
+ private long electionTimeoutMillis(String[] replicas) {
+ return replicas.length == 1 ? 10L : RAFT_ELECTION_TIMEOUT_MILLIS;
+ }
+
+ private long heartbeatTimeoutMillis(String[] replicas) {
+ return electionTimeoutMillis(replicas) / 2;
+ }
+
+ /**
+ * Maps a Raft Database object to a PartitionInfo object.
+ *
+ * @param database database containing input data
+ * @return PartitionInfo object
+ */
+ private static PartitionInfo toPartitionInfo(Database database) {
+ return new PartitionInfo(database.name(),
+ database.cluster().term(),
+ database.cluster().members()
+ .stream()
+ .filter(member -> Type.ACTIVE.equals(member.type()))
+ .map(Member::uri)
+ .sorted()
+ .collect(Collectors.toList()),
+ database.cluster().leader() != null ?
+ database.cluster().leader().uri() : null);
+ }
+
+
+ @Override
+ public <K, V> EventuallyConsistentMapBuilder<K, V> eventuallyConsistentMapBuilder() {
+ return new EventuallyConsistentMapBuilderImpl<>(clusterService,
+ clusterCommunicator);
+ }
+
+ @Override
+ public <K, V> ConsistentMapBuilder<K, V> consistentMapBuilder() {
+ return new DefaultConsistentMapBuilder<>(this);
+ }
+
+ @Override
+ public <E> DistributedSetBuilder<E> setBuilder() {
+ return new DefaultDistributedSetBuilder<>(this);
+ }
+
+
+ @Override
+ public <E> DistributedQueueBuilder<E> queueBuilder() {
+ return new DefaultDistributedQueueBuilder<>(this);
+ }
+
+ @Override
+ public AtomicCounterBuilder atomicCounterBuilder() {
+ return new DefaultAtomicCounterBuilder(inMemoryDatabase, partitionedDatabase);
+ }
+
+ @Override
+ public <V> AtomicValueBuilder<V> atomicValueBuilder() {
+ return new DefaultAtomicValueBuilder<>(this);
+ }
+
+ @Override
+ public List<MapInfo> getMapInfo() {
+ List<MapInfo> maps = Lists.newArrayList();
+ maps.addAll(getMapInfo(inMemoryDatabase));
+ maps.addAll(getMapInfo(partitionedDatabase));
+ return maps;
+ }
+
+ private List<MapInfo> getMapInfo(Database database) {
+ return complete(database.maps())
+ .stream()
+ .map(name -> new MapInfo(name, complete(database.mapSize(name))))
+ .filter(info -> info.size() > 0)
+ .collect(Collectors.toList());
+ }
+
+
+ @Override
+ public Map<String, Long> getCounters() {
+ Map<String, Long> counters = Maps.newHashMap();
+ counters.putAll(complete(inMemoryDatabase.counters()));
+ counters.putAll(complete(partitionedDatabase.counters()));
+ return counters;
+ }
+
+ @Override
+ public Map<String, Long> getPartitionedDatabaseCounters() {
+ Map<String, Long> counters = Maps.newHashMap();
+ counters.putAll(complete(partitionedDatabase.counters()));
+ return counters;
+ }
+
+ @Override
+ public Map<String, Long> getInMemoryDatabaseCounters() {
+ Map<String, Long> counters = Maps.newHashMap();
+ counters.putAll(complete(inMemoryDatabase.counters()));
+ return counters;
+ }
+
+ @Override
+ public Collection<Transaction> getTransactions() {
+ return complete(transactionManager.getTransactions());
+ }
+
+ private static <T> T complete(CompletableFuture<T> future) {
+ try {
+ return future.get(DATABASE_OPERATION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new ConsistentMapException.Interrupted();
+ } catch (TimeoutException e) {
+ throw new ConsistentMapException.Timeout();
+ } catch (ExecutionException e) {
+ throw new ConsistentMapException(e.getCause());
+ }
+ }
+
+ @Override
+ public void redriveTransactions() {
+ getTransactions().stream().forEach(transactionManager::execute);
+ }
+
+ protected <K, V> DefaultAsyncConsistentMap<K, V> registerMap(DefaultAsyncConsistentMap<K, V> map) {
+ maps.put(map.name(), map);
+ if (map.applicationId() != null) {
+ mapsByApplication.put(map.applicationId(), map);
+ }
+ return map;
+ }
+
+ protected <K, V> void unregisterMap(DefaultAsyncConsistentMap<K, V> map) {
+ maps.remove(map.name(), map);
+ if (map.applicationId() != null) {
+ mapsByApplication.remove(map.applicationId(), map);
+ }
+ }
+
+ private class InternalApplicationListener implements ApplicationListener {
+ @Override
+ public void event(ApplicationEvent event) {
+ if (event.type() == APP_UNINSTALLED || event.type() == APP_DEACTIVATED) {
+ ApplicationId appId = event.subject().id();
+ List<DefaultAsyncConsistentMap> mapsToRemove = ImmutableList.copyOf(mapsByApplication.get(appId));
+ mapsToRemove.forEach(DatabaseManager.this::unregisterMap);
+ if (event.type() == APP_UNINSTALLED) {
+ mapsToRemove.stream().filter(map -> map.purgeOnUninstall()).forEach(map -> map.clear());
+ }
+ }
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabasePartitioner.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabasePartitioner.java
new file mode 100644
index 00000000..740f81ad
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabasePartitioner.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import static com.google.common.base.Preconditions.checkState;
+
+import java.util.List;
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableList;
+import com.google.common.hash.Hashing;
+
+/**
+ * Partitioner for mapping map entries to individual database partitions.
+ * <p>
+ * By default a md5 hash of the hash key (key or map name) is used to pick a
+ * partition.
+ */
+public abstract class DatabasePartitioner implements Partitioner<String> {
+ // Database partitions sorted by their partition name.
+ protected final List<Database> partitions;
+
+ public DatabasePartitioner(List<Database> partitions) {
+ checkState(partitions != null && !partitions.isEmpty(), "Partitions cannot be null or empty");
+ this.partitions = ImmutableList.copyOf(partitions);
+ }
+
+ protected int hash(String key) {
+ return Math.abs(Hashing.md5().newHasher().putBytes(key.getBytes(Charsets.UTF_8)).hash().asInt());
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseProxy.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseProxy.java
new file mode 100644
index 00000000..95f9e39a
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseProxy.java
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+
+import org.onosproject.store.service.Transaction;
+import org.onosproject.store.service.Versioned;
+
+/**
+ * Database proxy.
+ */
+public interface DatabaseProxy<K, V> {
+
+ /**
+ * Returns a set of all map names.
+ *
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Set<String>> maps();
+
+ /**
+ * Returns a mapping from counter name to next value.
+ *
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Map<String, Long>> counters();
+
+ /**
+ * Returns the number of entries in map.
+ * @param mapName map name
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Integer> mapSize(String mapName);
+
+ /**
+ * Checks whether the map is empty.
+ *
+ * @param mapName map name
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Boolean> mapIsEmpty(String mapName);
+
+ /**
+ * Checks whether the map contains a key.
+ *
+ * @param mapName map name
+ * @param key key to check.
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Boolean> mapContainsKey(String mapName, K key);
+
+ /**
+ * Checks whether the map contains a value.
+ *
+ * @param mapName map name
+ * @param value The value to check.
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Boolean> mapContainsValue(String mapName, V value);
+
+ /**
+ * Gets a value from the map.
+ *
+ * @param mapName map name
+ * @param key The key to get.
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Versioned<V>> mapGet(String mapName, K key);
+
+ /**
+ * Updates the map.
+ *
+ * @param mapName map name
+ * @param key The key to set
+ * @param valueMatch match for checking existing value
+ * @param versionMatch match for checking existing version
+ * @param value new value
+ * @return A completable future to be completed with the result once complete
+ */
+ CompletableFuture<Result<UpdateResult<K, V>>> mapUpdate(
+ String mapName, K key, Match<V> valueMatch, Match<Long> versionMatch, V value);
+
+ /**
+ * Clears the map.
+ *
+ * @param mapName map name
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Result<Void>> mapClear(String mapName);
+
+ /**
+ * Gets a set of keys in the map.
+ *
+ * @param mapName map name
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Set<K>> mapKeySet(String mapName);
+
+ /**
+ * Gets a collection of values in the map.
+ *
+ * @param mapName map name
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Collection<Versioned<V>>> mapValues(String mapName);
+
+ /**
+ * Gets a set of entries in the map.
+ *
+ * @param mapName map name
+ * @return A completable future to be completed with the result once complete.
+ */
+ CompletableFuture<Set<Map.Entry<K, Versioned<V>>>> mapEntrySet(String mapName);
+
+ /**
+ * Atomically add the given value to current value of the specified counter.
+ *
+ * @param counterName counter name
+ * @param delta value to add
+ * @return updated value
+ */
+ CompletableFuture<Long> counterAddAndGet(String counterName, long delta);
+
+ /**
+ * Atomically add the given value to current value of the specified counter.
+ *
+ * @param counterName counter name
+ * @param delta value to add
+ * @return previous value
+ */
+ CompletableFuture<Long> counterGetAndAdd(String counterName, long delta);
+
+ /**
+ * Returns the current value of the specified atomic counter.
+ *
+ * @param counterName counter name
+ * @return current value
+ */
+ CompletableFuture<Long> counterGet(String counterName);
+
+ /**
+ * Returns the size of queue.
+ * @param queueName queue name
+ * @return queue size
+ */
+ CompletableFuture<Long> queueSize(String queueName);
+
+ /**
+ * Inserts an entry into the queue.
+ * @param queueName queue name
+ * @param entry queue entry
+ * @return void future
+ */
+ CompletableFuture<Void> queuePush(String queueName, byte[] entry);
+
+ /**
+ * Removes an entry from the queue if the queue is non-empty.
+ * @param queueName queue name
+ * @return entry future. Can be completed with null if queue is empty
+ */
+ CompletableFuture<byte[]> queuePop(String queueName);
+
+ /**
+ * Returns but does not remove an entry from the queue.
+ * @param queueName queue name
+ * @return entry. Can be null if queue is empty
+ */
+ CompletableFuture<byte[]> queuePeek(String queueName);
+
+ /**
+ * Prepare and commit the specified transaction.
+ *
+ * @param transaction transaction to commit (after preparation)
+ * @return A completable future to be completed with the result once complete
+ */
+ CompletableFuture<CommitResponse> prepareAndCommit(Transaction transaction);
+
+ /**
+ * Prepare the specified transaction for commit. A successful prepare implies
+ * all the affected resources are locked thus ensuring no concurrent updates can interfere.
+ *
+ * @param transaction transaction to prepare (for commit)
+ * @return A completable future to be completed with the result once complete. The future is completed
+ * with true if the transaction is successfully prepared i.e. all pre-conditions are met and
+ * applicable resources locked.
+ */
+ CompletableFuture<Boolean> prepare(Transaction transaction);
+
+ /**
+ * Commit the specified transaction. A successful commit implies
+ * all the updates are applied, are now durable and are now visible externally.
+ *
+ * @param transaction transaction to commit
+ * @return A completable future to be completed with the result once complete
+ */
+ CompletableFuture<CommitResponse> commit(Transaction transaction);
+
+ /**
+ * Rollback the specified transaction. A successful rollback implies
+ * all previously acquired locks for the affected resources are released.
+ *
+ * @param transaction transaction to rollback
+ * @return A completable future to be completed with the result once complete
+ */
+ CompletableFuture<Boolean> rollback(Transaction transaction);
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseSerializer.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseSerializer.java
new file mode 100644
index 00000000..de734144
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseSerializer.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import java.nio.ByteBuffer;
+
+import org.onlab.util.KryoNamespace;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.onosproject.store.service.DatabaseUpdate;
+import org.onosproject.store.service.Transaction;
+import org.onosproject.store.service.Versioned;
+
+import net.kuujo.copycat.cluster.internal.MemberInfo;
+import net.kuujo.copycat.raft.protocol.AppendRequest;
+import net.kuujo.copycat.raft.protocol.AppendResponse;
+import net.kuujo.copycat.raft.protocol.CommitRequest;
+import net.kuujo.copycat.raft.protocol.CommitResponse;
+import net.kuujo.copycat.raft.protocol.PollRequest;
+import net.kuujo.copycat.raft.protocol.PollResponse;
+import net.kuujo.copycat.raft.protocol.QueryRequest;
+import net.kuujo.copycat.raft.protocol.QueryResponse;
+import net.kuujo.copycat.raft.protocol.ReplicaInfo;
+import net.kuujo.copycat.raft.protocol.SyncRequest;
+import net.kuujo.copycat.raft.protocol.SyncResponse;
+import net.kuujo.copycat.raft.protocol.VoteRequest;
+import net.kuujo.copycat.raft.protocol.VoteResponse;
+import net.kuujo.copycat.util.serializer.SerializerConfig;
+
+/**
+ * Serializer for DatabaseManager's interaction with Copycat.
+ */
+public class DatabaseSerializer extends SerializerConfig {
+
+ private static final KryoNamespace COPYCAT = KryoNamespace.newBuilder()
+ .nextId(KryoNamespace.FLOATING_ID)
+ .register(AppendRequest.class)
+ .register(AppendResponse.class)
+ .register(SyncRequest.class)
+ .register(SyncResponse.class)
+ .register(VoteRequest.class)
+ .register(VoteResponse.class)
+ .register(PollRequest.class)
+ .register(PollResponse.class)
+ .register(QueryRequest.class)
+ .register(QueryResponse.class)
+ .register(CommitRequest.class)
+ .register(CommitResponse.class)
+ .register(ReplicaInfo.class)
+ .register(MemberInfo.class)
+ .build();
+
+ private static final KryoNamespace ONOS_STORE = KryoNamespace.newBuilder()
+ .nextId(KryoNamespace.FLOATING_ID)
+ .register(Versioned.class)
+ .register(DatabaseUpdate.class)
+ .register(DatabaseUpdate.Type.class)
+ .register(Result.class)
+ .register(UpdateResult.class)
+ .register(Result.Status.class)
+ .register(DefaultTransaction.class)
+ .register(Transaction.State.class)
+ .register(org.onosproject.store.consistent.impl.CommitResponse.class)
+ .register(Match.class)
+ .register(NodeId.class)
+ .build();
+
+ private static final KryoSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.BASIC)
+ .register(COPYCAT)
+ .register(ONOS_STORE)
+ .build();
+ }
+ };
+
+ @Override
+ public ByteBuffer writeObject(Object object) {
+ return ByteBuffer.wrap(SERIALIZER.encode(object));
+ }
+
+ @Override
+ public <T> T readObject(ByteBuffer buffer) {
+ return SERIALIZER.decode(buffer);
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseState.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseState.java
new file mode 100644
index 00000000..b3dd1c44
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseState.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.onosproject.store.service.Transaction;
+import org.onosproject.store.service.Versioned;
+
+import net.kuujo.copycat.state.Command;
+import net.kuujo.copycat.state.Initializer;
+import net.kuujo.copycat.state.Query;
+import net.kuujo.copycat.state.StateContext;
+
+/**
+ * Database state.
+ *
+ */
+public interface DatabaseState<K, V> {
+
+ /**
+ * Initializes the database state.
+ *
+ * @param context The map state context.
+ */
+ @Initializer
+ void init(StateContext<DatabaseState<K, V>> context);
+
+ @Query
+ Set<String> maps();
+
+ @Query
+ Map<String, Long> counters();
+
+ @Query
+ int mapSize(String mapName);
+
+ @Query
+ boolean mapIsEmpty(String mapName);
+
+ @Query
+ boolean mapContainsKey(String mapName, K key);
+
+ @Query
+ boolean mapContainsValue(String mapName, V value);
+
+ @Query
+ Versioned<V> mapGet(String mapName, K key);
+
+ @Command
+ Result<UpdateResult<K, V>> mapUpdate(String mapName, K key, Match<V> valueMatch, Match<Long> versionMatch, V value);
+
+ @Command
+ Result<Void> mapClear(String mapName);
+
+ @Query
+ Set<K> mapKeySet(String mapName);
+
+ @Query
+ Collection<Versioned<V>> mapValues(String mapName);
+
+ @Query
+ Set<Entry<K, Versioned<V>>> mapEntrySet(String mapName);
+
+ @Command
+ Long counterAddAndGet(String counterName, long delta);
+
+ @Command
+ Long counterGetAndAdd(String counterName, long delta);
+
+ @Query
+ Long queueSize(String queueName);
+
+ @Query
+ byte[] queuePeek(String queueName);
+
+ @Command
+ byte[] queuePop(String queueName);
+
+ @Command
+ void queuePush(String queueName, byte[] entry);
+
+ @Query
+ Long counterGet(String counterName);
+
+ @Command
+ CommitResponse prepareAndCommit(Transaction transaction);
+
+ @Command
+ boolean prepare(Transaction transaction);
+
+ @Command
+ CommitResponse commit(Transaction transaction);
+
+ @Command
+ boolean rollback(Transaction transaction);
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAsyncAtomicCounter.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAsyncAtomicCounter.java
new file mode 100644
index 00000000..7a439c34
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAsyncAtomicCounter.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import org.onosproject.store.service.AsyncAtomicCounter;
+
+import java.util.concurrent.CompletableFuture;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Default implementation for a distributed AsyncAtomicCounter backed by
+ * partitioned Raft DB.
+ * <p>
+ * The initial value will be zero.
+ */
+public class DefaultAsyncAtomicCounter implements AsyncAtomicCounter {
+
+ private final String name;
+ private final Database database;
+ private final MeteringAgent monitor;
+
+ private static final String PRIMITIVE_NAME = "atomicCounter";
+ private static final String INCREMENT_AND_GET = "incrementAndGet";
+ private static final String GET_AND_INCREMENT = "getAndIncrement";
+ private static final String GET_AND_ADD = "getAndAdd";
+ private static final String ADD_AND_GET = "addAndGet";
+ private static final String GET = "get";
+
+ public DefaultAsyncAtomicCounter(String name,
+ Database database,
+ boolean meteringEnabled) {
+ this.name = checkNotNull(name);
+ this.database = checkNotNull(database);
+ this.monitor = new MeteringAgent(PRIMITIVE_NAME, name, meteringEnabled);
+ }
+
+ @Override
+ public CompletableFuture<Long> incrementAndGet() {
+ final MeteringAgent.Context timer = monitor.startTimer(INCREMENT_AND_GET);
+ return addAndGet(1L)
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Long> get() {
+ final MeteringAgent.Context timer = monitor.startTimer(GET);
+ return database.counterGet(name)
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Long> getAndIncrement() {
+ final MeteringAgent.Context timer = monitor.startTimer(GET_AND_INCREMENT);
+ return getAndAdd(1L)
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Long> getAndAdd(long delta) {
+ final MeteringAgent.Context timer = monitor.startTimer(GET_AND_ADD);
+ return database.counterGetAndAdd(name, delta)
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Long> addAndGet(long delta) {
+ final MeteringAgent.Context timer = monitor.startTimer(ADD_AND_GET);
+ return database.counterAddAndGet(name, delta)
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAsyncConsistentMap.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAsyncConsistentMap.java
new file mode 100644
index 00000000..0ea66861
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAsyncConsistentMap.java
@@ -0,0 +1,465 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.collect.Maps;
+import org.onlab.util.HexString;
+import org.onlab.util.SharedExecutors;
+import org.onlab.util.Tools;
+import org.onosproject.core.ApplicationId;
+import org.onosproject.store.service.AsyncConsistentMap;
+import org.onosproject.store.service.ConsistentMapException;
+import org.onosproject.store.service.MapEvent;
+import org.onosproject.store.service.MapEventListener;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.Versioned;
+import org.slf4j.Logger;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.onosproject.store.consistent.impl.StateMachineUpdate.Target.MAP_UPDATE;
+import static org.onosproject.store.consistent.impl.StateMachineUpdate.Target.TX_COMMIT;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * AsyncConsistentMap implementation that is backed by a Raft consensus
+ * based database.
+ *
+ * @param <K> type of key.
+ * @param <V> type of value.
+ */
+public class DefaultAsyncConsistentMap<K, V> implements AsyncConsistentMap<K, V> {
+
+ private final String name;
+ private final ApplicationId applicationId;
+ private final Database database;
+ private final Serializer serializer;
+ private final boolean readOnly;
+ private final boolean purgeOnUninstall;
+
+ private static final String PRIMITIVE_NAME = "consistentMap";
+ private static final String SIZE = "size";
+ private static final String IS_EMPTY = "isEmpty";
+ private static final String CONTAINS_KEY = "containsKey";
+ private static final String CONTAINS_VALUE = "containsValue";
+ private static final String GET = "get";
+ private static final String COMPUTE_IF = "computeIf";
+ private static final String PUT = "put";
+ private static final String PUT_AND_GET = "putAndGet";
+ private static final String PUT_IF_ABSENT = "putIfAbsent";
+ private static final String REMOVE = "remove";
+ private static final String CLEAR = "clear";
+ private static final String KEY_SET = "keySet";
+ private static final String VALUES = "values";
+ private static final String ENTRY_SET = "entrySet";
+ private static final String REPLACE = "replace";
+ private static final String COMPUTE_IF_ABSENT = "computeIfAbsent";
+
+ private final Set<MapEventListener<K, V>> listeners = new CopyOnWriteArraySet<>();
+
+ private final Logger log = getLogger(getClass());
+ private final MeteringAgent monitor;
+
+ private static final String ERROR_NULL_KEY = "Key cannot be null";
+ private static final String ERROR_NULL_VALUE = "Null values are not allowed";
+
+ private final LoadingCache<K, String> keyCache = CacheBuilder.newBuilder()
+ .softValues()
+ .build(new CacheLoader<K, String>() {
+
+ @Override
+ public String load(K key) {
+ return HexString.toHexString(serializer.encode(key));
+ }
+ });
+
+ protected K dK(String key) {
+ return serializer.decode(HexString.fromHexString(key));
+ }
+
+ public DefaultAsyncConsistentMap(String name,
+ ApplicationId applicationId,
+ Database database,
+ Serializer serializer,
+ boolean readOnly,
+ boolean purgeOnUninstall,
+ boolean meteringEnabled) {
+ this.name = checkNotNull(name, "map name cannot be null");
+ this.applicationId = applicationId;
+ this.database = checkNotNull(database, "database cannot be null");
+ this.serializer = checkNotNull(serializer, "serializer cannot be null");
+ this.readOnly = readOnly;
+ this.purgeOnUninstall = purgeOnUninstall;
+ this.database.registerConsumer(update -> {
+ SharedExecutors.getSingleThreadExecutor().execute(() -> {
+ if (listeners.isEmpty()) {
+ return;
+ }
+ try {
+ if (update.target() == MAP_UPDATE) {
+ Result<UpdateResult<String, byte[]>> result = update.output();
+ if (result.success() && result.value().mapName().equals(name)) {
+ MapEvent<K, V> mapEvent = result.value()
+ .<K, V>map(this::dK,
+ v -> serializer.decode(Tools.copyOf(v)))
+ .toMapEvent();
+ notifyListeners(mapEvent);
+ }
+ } else if (update.target() == TX_COMMIT) {
+ CommitResponse response = update.output();
+ if (response.success()) {
+ response.updates().forEach(u -> {
+ if (u.mapName().equals(name)) {
+ MapEvent<K, V> mapEvent =
+ u.<K, V>map(this::dK,
+ v -> serializer.decode(Tools.copyOf(v)))
+ .toMapEvent();
+ notifyListeners(mapEvent);
+ }
+ });
+ }
+ }
+ } catch (Exception e) {
+ log.warn("Error notifying listeners", e);
+ }
+ });
+ });
+ this.monitor = new MeteringAgent(PRIMITIVE_NAME, name, meteringEnabled);
+ }
+
+ /**
+ * Returns this map name.
+ * @return map name
+ */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * Returns the serializer for map entries.
+ * @return map entry serializer
+ */
+ public Serializer serializer() {
+ return serializer;
+ }
+
+ /**
+ * Returns the applicationId owning this map.
+ * @return application Id
+ */
+ public ApplicationId applicationId() {
+ return applicationId;
+ }
+
+ /**
+ * Returns whether the map entries should be purged when the application
+ * owning it is uninstalled.
+ * @return true is map needs to cleared on app uninstall; false otherwise
+ */
+ public boolean purgeOnUninstall() {
+ return purgeOnUninstall;
+ }
+
+ @Override
+ public CompletableFuture<Integer> size() {
+ final MeteringAgent.Context timer = monitor.startTimer(SIZE);
+ return database.mapSize(name)
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Boolean> isEmpty() {
+ final MeteringAgent.Context timer = monitor.startTimer(IS_EMPTY);
+ return database.mapIsEmpty(name)
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Boolean> containsKey(K key) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ final MeteringAgent.Context timer = monitor.startTimer(CONTAINS_KEY);
+ return database.mapContainsKey(name, keyCache.getUnchecked(key))
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Boolean> containsValue(V value) {
+ checkNotNull(value, ERROR_NULL_VALUE);
+ final MeteringAgent.Context timer = monitor.startTimer(CONTAINS_VALUE);
+ return database.mapContainsValue(name, serializer.encode(value))
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Versioned<V>> get(K key) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ final MeteringAgent.Context timer = monitor.startTimer(GET);
+ return database.mapGet(name, keyCache.getUnchecked(key))
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenApply(v -> v != null ? v.map(serializer::decode) : null);
+ }
+
+ @Override
+ public CompletableFuture<Versioned<V>> computeIfAbsent(K key,
+ Function<? super K, ? extends V> mappingFunction) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(mappingFunction, "Mapping function cannot be null");
+ final MeteringAgent.Context timer = monitor.startTimer(COMPUTE_IF_ABSENT);
+ return updateAndGet(key, Match.ifNull(), Match.any(), mappingFunction.apply(key))
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenApply(v -> v.newValue());
+ }
+
+ @Override
+ public CompletableFuture<Versioned<V>> computeIfPresent(K key,
+ BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ return computeIf(key, Objects::nonNull, remappingFunction);
+ }
+
+ @Override
+ public CompletableFuture<Versioned<V>> compute(K key,
+ BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ return computeIf(key, v -> true, remappingFunction);
+ }
+
+ @Override
+ public CompletableFuture<Versioned<V>> computeIf(K key,
+ Predicate<? super V> condition,
+ BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(condition, "predicate function cannot be null");
+ checkNotNull(remappingFunction, "Remapping function cannot be null");
+ final MeteringAgent.Context timer = monitor.startTimer(COMPUTE_IF);
+ return get(key).thenCompose(r1 -> {
+ V existingValue = r1 == null ? null : r1.value();
+ // if the condition evaluates to false, return existing value.
+ if (!condition.test(existingValue)) {
+ return CompletableFuture.completedFuture(r1);
+ }
+
+ AtomicReference<V> computedValue = new AtomicReference<>();
+ // if remappingFunction throws an exception, return the exception.
+ try {
+ computedValue.set(remappingFunction.apply(key, existingValue));
+ } catch (Exception e) {
+ return Tools.exceptionalFuture(e);
+ }
+ if (computedValue.get() == null && r1 == null) {
+ return CompletableFuture.completedFuture(null);
+ }
+ Match<V> valueMatcher = r1 == null ? Match.ifNull() : Match.any();
+ Match<Long> versionMatcher = r1 == null ? Match.any() : Match.ifValue(r1.version());
+ return updateAndGet(key, valueMatcher, versionMatcher, computedValue.get())
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenApply(v -> {
+ if (v.updated()) {
+ return v.newValue();
+ } else {
+ throw new ConsistentMapException.ConcurrentModification();
+ }
+ });
+ });
+ }
+
+ @Override
+ public CompletableFuture<Versioned<V>> put(K key, V value) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(value, ERROR_NULL_VALUE);
+ final MeteringAgent.Context timer = monitor.startTimer(PUT);
+ return updateAndGet(key, Match.any(), Match.any(), value).thenApply(v -> v.oldValue())
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Versioned<V>> putAndGet(K key, V value) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(value, ERROR_NULL_VALUE);
+ final MeteringAgent.Context timer = monitor.startTimer(PUT_AND_GET);
+ return updateAndGet(key, Match.any(), Match.any(), value).thenApply(v -> v.newValue())
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Versioned<V>> remove(K key) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ final MeteringAgent.Context timer = monitor.startTimer(REMOVE);
+ return updateAndGet(key, Match.any(), Match.any(), null).thenApply(v -> v.oldValue())
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Void> clear() {
+ checkIfUnmodifiable();
+ final MeteringAgent.Context timer = monitor.startTimer(CLEAR);
+ return database.mapClear(name).thenApply(this::unwrapResult)
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Set<K>> keySet() {
+ final MeteringAgent.Context timer = monitor.startTimer(KEY_SET);
+ return database.mapKeySet(name)
+ .thenApply(s -> s
+ .stream()
+ .map(this::dK)
+ .collect(Collectors.toSet()))
+ .whenComplete((r, e) -> timer.stop(e));
+ }
+
+ @Override
+ public CompletableFuture<Collection<Versioned<V>>> values() {
+ final MeteringAgent.Context timer = monitor.startTimer(VALUES);
+ return database.mapValues(name)
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenApply(c -> c
+ .stream()
+ .map(v -> v.<V>map(serializer::decode))
+ .collect(Collectors.toList()));
+ }
+
+ @Override
+ public CompletableFuture<Set<Entry<K, Versioned<V>>>> entrySet() {
+ final MeteringAgent.Context timer = monitor.startTimer(ENTRY_SET);
+ return database.mapEntrySet(name)
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenApply(s -> s
+ .stream()
+ .map(this::mapRawEntry)
+ .collect(Collectors.toSet()));
+ }
+
+ @Override
+ public CompletableFuture<Versioned<V>> putIfAbsent(K key, V value) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(value, ERROR_NULL_VALUE);
+ final MeteringAgent.Context timer = monitor.startTimer(PUT_IF_ABSENT);
+ return updateAndGet(key, Match.ifNull(), Match.any(), value)
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenApply(v -> v.oldValue());
+ }
+
+ @Override
+ public CompletableFuture<Boolean> remove(K key, V value) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(value, ERROR_NULL_VALUE);
+ final MeteringAgent.Context timer = monitor.startTimer(REMOVE);
+ return updateAndGet(key, Match.ifValue(value), Match.any(), null)
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenApply(v -> v.updated());
+ }
+
+ @Override
+ public CompletableFuture<Boolean> remove(K key, long version) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ final MeteringAgent.Context timer = monitor.startTimer(REMOVE);
+ return updateAndGet(key, Match.any(), Match.ifValue(version), null)
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenApply(v -> v.updated());
+ }
+
+ @Override
+ public CompletableFuture<Boolean> replace(K key, V oldValue, V newValue) {
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(oldValue, ERROR_NULL_VALUE);
+ checkNotNull(newValue, ERROR_NULL_VALUE);
+ final MeteringAgent.Context timer = monitor.startTimer(REPLACE);
+ return updateAndGet(key, Match.ifValue(oldValue), Match.any(), newValue)
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenApply(v -> v.updated());
+ }
+
+ @Override
+ public CompletableFuture<Boolean> replace(K key, long oldVersion, V newValue) {
+ final MeteringAgent.Context timer = monitor.startTimer(REPLACE);
+ return updateAndGet(key, Match.any(), Match.ifValue(oldVersion), newValue)
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenApply(v -> v.updated());
+ }
+
+ private Map.Entry<K, Versioned<V>> mapRawEntry(Map.Entry<String, Versioned<byte[]>> e) {
+ return Maps.immutableEntry(dK(e.getKey()), e.getValue().<V>map(serializer::decode));
+ }
+
+ private CompletableFuture<UpdateResult<K, V>> updateAndGet(K key,
+ Match<V> oldValueMatch,
+ Match<Long> oldVersionMatch,
+ V value) {
+ checkIfUnmodifiable();
+ return database.mapUpdate(name,
+ keyCache.getUnchecked(key),
+ oldValueMatch.map(serializer::encode),
+ oldVersionMatch,
+ value == null ? null : serializer.encode(value))
+ .thenApply(this::unwrapResult)
+ .thenApply(r -> r.<K, V>map(this::dK, serializer::decode));
+ }
+
+ private <T> T unwrapResult(Result<T> result) {
+ if (result.status() == Result.Status.LOCKED) {
+ throw new ConsistentMapException.ConcurrentModification();
+ } else if (result.success()) {
+ return result.value();
+ } else {
+ throw new IllegalStateException("Must not be here");
+ }
+ }
+
+ private void checkIfUnmodifiable() {
+ if (readOnly) {
+ throw new UnsupportedOperationException();
+ }
+ }
+
+ @Override
+ public void addListener(MapEventListener<K, V> listener) {
+ listeners.add(listener);
+ }
+
+ @Override
+ public void removeListener(MapEventListener<K, V> listener) {
+ listeners.remove(listener);
+ }
+
+ protected void notifyListeners(MapEvent<K, V> event) {
+ if (event == null) {
+ return;
+ }
+ listeners.forEach(listener -> {
+ try {
+ listener.event(event);
+ } catch (Exception e) {
+ log.warn("Failure notifying listener about {}", event, e);
+ }
+ });
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicCounter.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicCounter.java
new file mode 100644
index 00000000..64886e41
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicCounter.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import org.onosproject.store.service.AsyncAtomicCounter;
+import org.onosproject.store.service.AtomicCounter;
+import org.onosproject.store.service.StorageException;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Default implementation for a distributed AtomicCounter backed by
+ * partitioned Raft DB.
+ * <p>
+ * The initial value will be zero.
+ */
+public class DefaultAtomicCounter implements AtomicCounter {
+
+ private static final int OPERATION_TIMEOUT_MILLIS = 5000;
+
+ private final AsyncAtomicCounter asyncCounter;
+
+ public DefaultAtomicCounter(String name,
+ Database database,
+ boolean meteringEnabled) {
+ asyncCounter = new DefaultAsyncAtomicCounter(name, database, meteringEnabled);
+ }
+
+ @Override
+ public long incrementAndGet() {
+ return complete(asyncCounter.incrementAndGet());
+ }
+
+ @Override
+ public long getAndIncrement() {
+ return complete(asyncCounter.getAndIncrement());
+ }
+
+ @Override
+ public long getAndAdd(long delta) {
+ return complete(asyncCounter.getAndAdd(delta));
+ }
+
+ @Override
+ public long addAndGet(long delta) {
+ return complete(asyncCounter.getAndAdd(delta));
+ }
+
+ @Override
+ public long get() {
+ return complete(asyncCounter.get());
+ }
+
+ private static <T> T complete(CompletableFuture<T> future) {
+ try {
+ return future.get(OPERATION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new StorageException.Interrupted();
+ } catch (TimeoutException e) {
+ throw new StorageException.Timeout();
+ } catch (ExecutionException e) {
+ throw new StorageException(e.getCause());
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicCounterBuilder.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicCounterBuilder.java
new file mode 100644
index 00000000..dba4443b
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicCounterBuilder.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import org.onosproject.store.service.AsyncAtomicCounter;
+import org.onosproject.store.service.AtomicCounter;
+import org.onosproject.store.service.AtomicCounterBuilder;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * Default implementation of AtomicCounterBuilder.
+ */
+public class DefaultAtomicCounterBuilder implements AtomicCounterBuilder {
+
+ private String name;
+ private boolean partitionsEnabled = true;
+ private final Database partitionedDatabase;
+ private final Database inMemoryDatabase;
+ private boolean metering = true;
+
+ public DefaultAtomicCounterBuilder(Database inMemoryDatabase, Database partitionedDatabase) {
+ this.inMemoryDatabase = inMemoryDatabase;
+ this.partitionedDatabase = partitionedDatabase;
+ }
+
+ @Override
+ public AtomicCounterBuilder withName(String name) {
+ checkArgument(name != null && !name.isEmpty());
+ this.name = name;
+ return this;
+ }
+
+ @Override
+ public AtomicCounterBuilder withPartitionsDisabled() {
+ partitionsEnabled = false;
+ return this;
+ }
+
+ @Override
+ public AtomicCounter build() {
+ validateInputs();
+ Database database = partitionsEnabled ? partitionedDatabase : inMemoryDatabase;
+ return new DefaultAtomicCounter(name, database, metering);
+ }
+
+ @Override
+ public AsyncAtomicCounter buildAsyncCounter() {
+ validateInputs();
+ Database database = partitionsEnabled ? partitionedDatabase : inMemoryDatabase;
+ return new DefaultAsyncAtomicCounter(name, database, metering);
+ }
+
+ @Override
+ public AtomicCounterBuilder withMeteringDisabled() {
+ metering = false;
+ return this;
+ }
+
+ private void validateInputs() {
+ checkState(name != null, "name must be specified");
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicValue.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicValue.java
new file mode 100644
index 00000000..e8c93f31
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicValue.java
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import org.onosproject.store.service.AtomicValue;
+import org.onosproject.store.service.AtomicValueEvent;
+import org.onosproject.store.service.AtomicValueEventListener;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.MapEvent;
+import org.onosproject.store.service.MapEventListener;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.Versioned;
+
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+/**
+ * Default implementation of AtomicValue.
+ *
+ * @param <V> value type
+ */
+public class DefaultAtomicValue<V> implements AtomicValue<V> {
+
+ private final Set<AtomicValueEventListener<V>> listeners = new CopyOnWriteArraySet<>();
+ private final ConsistentMap<String, byte[]> valueMap;
+ private final String name;
+ private final Serializer serializer;
+ private final MapEventListener<String, byte[]> mapEventListener = new InternalMapEventListener();
+ private final MeteringAgent monitor;
+
+ private static final String COMPONENT_NAME = "atomicValue";
+ private static final String GET = "get";
+ private static final String GET_AND_SET = "getAndSet";
+ private static final String COMPARE_AND_SET = "compareAndSet";
+
+ public DefaultAtomicValue(ConsistentMap<String, byte[]> valueMap,
+ String name,
+ boolean meteringEnabled,
+ Serializer serializer) {
+ this.valueMap = valueMap;
+ this.name = name;
+ this.serializer = serializer;
+ this.monitor = new MeteringAgent(COMPONENT_NAME, name, meteringEnabled);
+ }
+
+ @Override
+ public boolean compareAndSet(V expect, V update) {
+ final MeteringAgent.Context newTimer = monitor.startTimer(COMPARE_AND_SET);
+ try {
+ if (expect == null) {
+ if (update == null) {
+ return true;
+ }
+ return valueMap.putIfAbsent(name, serializer.encode(update)) == null;
+ } else {
+ if (update == null) {
+ return valueMap.remove(name, serializer.encode(expect));
+ }
+ return valueMap.replace(name, serializer.encode(expect), serializer.encode(update));
+ }
+ } finally {
+ newTimer.stop(null);
+ }
+ }
+
+ @Override
+ public V get() {
+ final MeteringAgent.Context newTimer = monitor.startTimer(GET);
+ try {
+ Versioned<byte[]> rawValue = valueMap.get(name);
+ return rawValue == null ? null : serializer.decode(rawValue.value());
+ } finally {
+ newTimer.stop(null);
+ }
+ }
+
+ @Override
+ public V getAndSet(V value) {
+ final MeteringAgent.Context newTimer = monitor.startTimer(GET_AND_SET);
+ try {
+ Versioned<byte[]> previousValue = value == null ?
+ valueMap.remove(name) : valueMap.put(name, serializer.encode(value));
+ return previousValue == null ? null : serializer.decode(previousValue.value());
+ } finally {
+ newTimer.stop(null);
+ }
+ }
+
+ @Override
+ public void set(V value) {
+ getAndSet(value);
+ }
+
+ @Override
+ public void addListener(AtomicValueEventListener<V> listener) {
+ synchronized (listeners) {
+ if (listeners.add(listener)) {
+ if (listeners.size() == 1) {
+ valueMap.addListener(mapEventListener);
+ }
+ }
+ }
+ }
+
+ @Override
+ public void removeListener(AtomicValueEventListener<V> listener) {
+ synchronized (listeners) {
+ if (listeners.remove(listener)) {
+ if (listeners.size() == 0) {
+ valueMap.removeListener(mapEventListener);
+ }
+ }
+ }
+ }
+
+ private class InternalMapEventListener implements MapEventListener<String, byte[]> {
+
+ @Override
+ public void event(MapEvent<String, byte[]> mapEvent) {
+ V newValue = mapEvent.type() == MapEvent.Type.REMOVE ? null : serializer.decode(mapEvent.value().value());
+ AtomicValueEvent<V> atomicValueEvent = new AtomicValueEvent<>(name, AtomicValueEvent.Type.UPDATE, newValue);
+ listeners.forEach(l -> l.event(atomicValueEvent));
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicValueBuilder.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicValueBuilder.java
new file mode 100644
index 00000000..b39004b3
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultAtomicValueBuilder.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.AtomicValue;
+import org.onosproject.store.service.AtomicValueBuilder;
+import org.onosproject.store.service.ConsistentMapBuilder;
+import org.onosproject.store.service.Serializer;
+
+/**
+ * Default implementation of AtomicValueBuilder.
+ *
+ * @param <V> value type
+ */
+public class DefaultAtomicValueBuilder<V> implements AtomicValueBuilder<V> {
+
+ private Serializer serializer;
+ private String name;
+ private ConsistentMapBuilder<String, byte[]> mapBuilder;
+ private boolean metering = true;
+
+ public DefaultAtomicValueBuilder(DatabaseManager manager) {
+ mapBuilder = manager.<String, byte[]>consistentMapBuilder()
+ .withName("onos-atomic-values")
+ .withMeteringDisabled()
+ .withSerializer(Serializer.using(KryoNamespaces.BASIC));
+ }
+
+ @Override
+ public AtomicValueBuilder<V> withName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ @Override
+ public AtomicValueBuilder<V> withSerializer(Serializer serializer) {
+ this.serializer = serializer;
+ return this;
+ }
+
+ @Override
+ public AtomicValueBuilder<V> withPartitionsDisabled() {
+ mapBuilder.withPartitionsDisabled();
+ return this;
+ }
+
+ @Override
+ public AtomicValueBuilder<V> withMeteringDisabled() {
+ metering = false;
+ return this;
+ }
+
+ @Override
+ public AtomicValue<V> build() {
+ return new DefaultAtomicValue<>(mapBuilder.build(), name, metering, serializer);
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultConsistentMap.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultConsistentMap.java
new file mode 100644
index 00000000..6f7b5487
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultConsistentMap.java
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.Set;
+
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.ConsistentMapException;
+import org.onosproject.store.service.MapEventListener;
+import org.onosproject.store.service.Versioned;
+
+/**
+ * ConsistentMap implementation that is backed by a Raft consensus
+ * based database.
+ *
+ * @param <K> type of key.
+ * @param <V> type of value.
+ */
+public class DefaultConsistentMap<K, V> implements ConsistentMap<K, V> {
+
+ private static final int OPERATION_TIMEOUT_MILLIS = 5000;
+
+ private final DefaultAsyncConsistentMap<K, V> asyncMap;
+ private Map<K, V> javaMap;
+
+ public String name() {
+ return asyncMap.name();
+ }
+
+ public DefaultConsistentMap(DefaultAsyncConsistentMap<K, V> asyncMap) {
+ this.asyncMap = asyncMap;
+ }
+
+ @Override
+ public int size() {
+ return complete(asyncMap.size());
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return complete(asyncMap.isEmpty());
+ }
+
+ @Override
+ public boolean containsKey(K key) {
+ return complete(asyncMap.containsKey(key));
+ }
+
+ @Override
+ public boolean containsValue(V value) {
+ return complete(asyncMap.containsValue(value));
+ }
+
+ @Override
+ public Versioned<V> get(K key) {
+ return complete(asyncMap.get(key));
+ }
+
+ @Override
+ public Versioned<V> computeIfAbsent(K key,
+ Function<? super K, ? extends V> mappingFunction) {
+ return complete(asyncMap.computeIfAbsent(key, mappingFunction));
+ }
+
+ @Override
+ public Versioned<V> computeIfPresent(K key,
+ BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ return complete(asyncMap.computeIfPresent(key, remappingFunction));
+ }
+
+ @Override
+ public Versioned<V> compute(K key,
+ BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ return complete(asyncMap.compute(key, remappingFunction));
+ }
+
+ @Override
+ public Versioned<V> computeIf(K key,
+ Predicate<? super V> condition,
+ BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ return complete(asyncMap.computeIf(key, condition, remappingFunction));
+ }
+
+ @Override
+ public Versioned<V> put(K key, V value) {
+ return complete(asyncMap.put(key, value));
+ }
+
+ @Override
+ public Versioned<V> putAndGet(K key, V value) {
+ return complete(asyncMap.putAndGet(key, value));
+ }
+
+ @Override
+ public Versioned<V> remove(K key) {
+ return complete(asyncMap.remove(key));
+ }
+
+ @Override
+ public void clear() {
+ complete(asyncMap.clear());
+ }
+
+ @Override
+ public Set<K> keySet() {
+ return complete(asyncMap.keySet());
+ }
+
+ @Override
+ public Collection<Versioned<V>> values() {
+ return complete(asyncMap.values());
+ }
+
+ @Override
+ public Set<Entry<K, Versioned<V>>> entrySet() {
+ return complete(asyncMap.entrySet());
+ }
+
+ @Override
+ public Versioned<V> putIfAbsent(K key, V value) {
+ return complete(asyncMap.putIfAbsent(key, value));
+ }
+
+ @Override
+ public boolean remove(K key, V value) {
+ return complete(asyncMap.remove(key, value));
+ }
+
+ @Override
+ public boolean remove(K key, long version) {
+ return complete(asyncMap.remove(key, version));
+ }
+
+ @Override
+ public boolean replace(K key, V oldValue, V newValue) {
+ return complete(asyncMap.replace(key, oldValue, newValue));
+ }
+
+ @Override
+ public boolean replace(K key, long oldVersion, V newValue) {
+ return complete(asyncMap.replace(key, oldVersion, newValue));
+ }
+
+ private static <T> T complete(CompletableFuture<T> future) {
+ try {
+ return future.get(OPERATION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new ConsistentMapException.Interrupted();
+ } catch (TimeoutException e) {
+ throw new ConsistentMapException.Timeout();
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof ConsistentMapException) {
+ throw (ConsistentMapException) e.getCause();
+ } else {
+ throw new ConsistentMapException(e.getCause());
+ }
+ }
+ }
+
+ @Override
+ public void addListener(MapEventListener<K, V> listener) {
+ asyncMap.addListener(listener);
+ }
+
+ @Override
+ public void removeListener(MapEventListener<K, V> listener) {
+ asyncMap.addListener(listener);
+ }
+
+ @Override
+ public Map<K, V> asJavaMap() {
+ synchronized (this) {
+ if (javaMap == null) {
+ javaMap = new ConsistentMapBackedJavaMap<>(this);
+ }
+ }
+ return javaMap;
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultConsistentMapBuilder.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultConsistentMapBuilder.java
new file mode 100644
index 00000000..0e11794e
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultConsistentMapBuilder.java
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import org.onosproject.core.ApplicationId;
+import org.onosproject.store.service.AsyncConsistentMap;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.ConsistentMapBuilder;
+import org.onosproject.store.service.Serializer;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * Default Consistent Map builder.
+ *
+ * @param <K> type for map key
+ * @param <V> type for map value
+ */
+public class DefaultConsistentMapBuilder<K, V> implements ConsistentMapBuilder<K, V> {
+
+ private Serializer serializer;
+ private String name;
+ private ApplicationId applicationId;
+ private boolean purgeOnUninstall = false;
+ private boolean partitionsEnabled = true;
+ private boolean readOnly = false;
+ private boolean metering = true;
+ private boolean relaxedReadConsistency = false;
+ private final DatabaseManager manager;
+
+ public DefaultConsistentMapBuilder(DatabaseManager manager) {
+ this.manager = manager;
+ }
+
+ @Override
+ public ConsistentMapBuilder<K, V> withName(String name) {
+ checkArgument(name != null && !name.isEmpty());
+ this.name = name;
+ return this;
+ }
+
+ @Override
+ public ConsistentMapBuilder<K, V> withApplicationId(ApplicationId id) {
+ checkArgument(id != null);
+ this.applicationId = id;
+ return this;
+ }
+
+ @Override
+ public ConsistentMapBuilder<K, V> withPurgeOnUninstall() {
+ purgeOnUninstall = true;
+ return this;
+ }
+
+ @Override
+ public ConsistentMapBuilder<K, V> withMeteringDisabled() {
+ metering = false;
+ return this;
+ }
+
+ @Override
+ public ConsistentMapBuilder<K, V> withSerializer(Serializer serializer) {
+ checkArgument(serializer != null);
+ this.serializer = serializer;
+ return this;
+ }
+
+ @Override
+ public ConsistentMapBuilder<K, V> withPartitionsDisabled() {
+ partitionsEnabled = false;
+ return this;
+ }
+
+ @Override
+ public ConsistentMapBuilder<K, V> withUpdatesDisabled() {
+ readOnly = true;
+ return this;
+ }
+
+ @Override
+ public ConsistentMapBuilder<K, V> withRelaxedReadConsistency() {
+ relaxedReadConsistency = true;
+ return this;
+ }
+
+ private void validateInputs() {
+ checkState(name != null, "name must be specified");
+ checkState(serializer != null, "serializer must be specified");
+ if (purgeOnUninstall) {
+ checkState(applicationId != null, "ApplicationId must be specified when purgeOnUninstall is enabled");
+ }
+ }
+
+ @Override
+ public ConsistentMap<K, V> build() {
+ return new DefaultConsistentMap<>(buildAndRegisterMap());
+ }
+
+ @Override
+ public AsyncConsistentMap<K, V> buildAsyncMap() {
+ return buildAndRegisterMap();
+ }
+
+ private DefaultAsyncConsistentMap<K, V> buildAndRegisterMap() {
+ validateInputs();
+ Database database = partitionsEnabled ? manager.partitionedDatabase : manager.inMemoryDatabase;
+ if (relaxedReadConsistency) {
+ return manager.registerMap(
+ new AsyncCachingConsistentMap<>(name,
+ applicationId,
+ database,
+ serializer,
+ readOnly,
+ purgeOnUninstall,
+ metering));
+ } else {
+ return manager.registerMap(
+ new DefaultAsyncConsistentMap<>(name,
+ applicationId,
+ database,
+ serializer,
+ readOnly,
+ purgeOnUninstall,
+ metering));
+ }
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDatabase.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDatabase.java
new file mode 100644
index 00000000..4d9776ee
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDatabase.java
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import net.kuujo.copycat.state.StateMachine;
+import net.kuujo.copycat.resource.internal.AbstractResource;
+import net.kuujo.copycat.resource.internal.ResourceManager;
+import net.kuujo.copycat.state.internal.DefaultStateMachine;
+import net.kuujo.copycat.util.concurrent.Futures;
+import net.kuujo.copycat.util.function.TriConsumer;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+import org.onosproject.store.service.Transaction;
+import org.onosproject.store.service.Versioned;
+
+import com.google.common.collect.Sets;
+
+/**
+ * Default database.
+ */
+public class DefaultDatabase extends AbstractResource<Database> implements Database {
+ private final StateMachine<DatabaseState<String, byte[]>> stateMachine;
+ private DatabaseProxy<String, byte[]> proxy;
+ private final Set<Consumer<StateMachineUpdate>> consumers = Sets.newCopyOnWriteArraySet();
+ private final TriConsumer<String, Object, Object> watcher = new InternalStateMachineWatcher();
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ public DefaultDatabase(ResourceManager context) {
+ super(context);
+ this.stateMachine = new DefaultStateMachine(context,
+ DatabaseState.class,
+ DefaultDatabaseState.class,
+ DefaultDatabase.class.getClassLoader());
+ this.stateMachine.addStartupTask(() -> {
+ stateMachine.registerWatcher(watcher);
+ return CompletableFuture.completedFuture(null);
+ });
+ this.stateMachine.addShutdownTask(() -> {
+ stateMachine.unregisterWatcher(watcher);
+ return CompletableFuture.completedFuture(null);
+ });
+ }
+
+ /**
+ * If the database is closed, returning a failed CompletableFuture. Otherwise, calls the given supplier to
+ * return the completed future result.
+ *
+ * @param supplier The supplier to call if the database is open.
+ * @param <T> The future result type.
+ * @return A completable future that if this database is closed is immediately failed.
+ */
+ protected <T> CompletableFuture<T> checkOpen(Supplier<CompletableFuture<T>> supplier) {
+ if (proxy == null) {
+ return Futures.exceptionalFuture(new IllegalStateException("Database closed"));
+ }
+ return supplier.get();
+ }
+
+ @Override
+ public CompletableFuture<Set<String>> maps() {
+ return checkOpen(() -> proxy.maps());
+ }
+
+ @Override
+ public CompletableFuture<Map<String, Long>> counters() {
+ return checkOpen(() -> proxy.counters());
+ }
+
+ @Override
+ public CompletableFuture<Integer> mapSize(String mapName) {
+ return checkOpen(() -> proxy.mapSize(mapName));
+ }
+
+ @Override
+ public CompletableFuture<Boolean> mapIsEmpty(String mapName) {
+ return checkOpen(() -> proxy.mapIsEmpty(mapName));
+ }
+
+ @Override
+ public CompletableFuture<Boolean> mapContainsKey(String mapName, String key) {
+ return checkOpen(() -> proxy.mapContainsKey(mapName, key));
+ }
+
+ @Override
+ public CompletableFuture<Boolean> mapContainsValue(String mapName, byte[] value) {
+ return checkOpen(() -> proxy.mapContainsValue(mapName, value));
+ }
+
+ @Override
+ public CompletableFuture<Versioned<byte[]>> mapGet(String mapName, String key) {
+ return checkOpen(() -> proxy.mapGet(mapName, key));
+ }
+
+ @Override
+ public CompletableFuture<Result<UpdateResult<String, byte[]>>> mapUpdate(
+ String mapName, String key, Match<byte[]> valueMatch, Match<Long> versionMatch, byte[] value) {
+ return checkOpen(() -> proxy.mapUpdate(mapName, key, valueMatch, versionMatch, value));
+ }
+
+ @Override
+ public CompletableFuture<Result<Void>> mapClear(String mapName) {
+ return checkOpen(() -> proxy.mapClear(mapName));
+ }
+
+ @Override
+ public CompletableFuture<Set<String>> mapKeySet(String mapName) {
+ return checkOpen(() -> proxy.mapKeySet(mapName));
+ }
+
+ @Override
+ public CompletableFuture<Collection<Versioned<byte[]>>> mapValues(String mapName) {
+ return checkOpen(() -> proxy.mapValues(mapName));
+ }
+
+ @Override
+ public CompletableFuture<Set<Map.Entry<String, Versioned<byte[]>>>> mapEntrySet(String mapName) {
+ return checkOpen(() -> proxy.mapEntrySet(mapName));
+ }
+
+ @Override
+ public CompletableFuture<Long> counterGet(String counterName) {
+ return checkOpen(() -> proxy.counterGet(counterName));
+ }
+
+ @Override
+ public CompletableFuture<Long> counterAddAndGet(String counterName, long delta) {
+ return checkOpen(() -> proxy.counterAddAndGet(counterName, delta));
+ }
+
+ @Override
+ public CompletableFuture<Long> counterGetAndAdd(String counterName, long delta) {
+ return checkOpen(() -> proxy.counterGetAndAdd(counterName, delta));
+ }
+
+ @Override
+ public CompletableFuture<Long> queueSize(String queueName) {
+ return checkOpen(() -> proxy.queueSize(queueName));
+ }
+
+ @Override
+ public CompletableFuture<Void> queuePush(String queueName, byte[] entry) {
+ return checkOpen(() -> proxy.queuePush(queueName, entry));
+ }
+
+ @Override
+ public CompletableFuture<byte[]> queuePop(String queueName) {
+ return checkOpen(() -> proxy.queuePop(queueName));
+ }
+
+ @Override
+ public CompletableFuture<byte[]> queuePeek(String queueName) {
+ return checkOpen(() -> proxy.queuePeek(queueName));
+ }
+
+ @Override
+ public CompletableFuture<CommitResponse> prepareAndCommit(Transaction transaction) {
+ return checkOpen(() -> proxy.prepareAndCommit(transaction));
+ }
+
+ @Override
+ public CompletableFuture<Boolean> prepare(Transaction transaction) {
+ return checkOpen(() -> proxy.prepare(transaction));
+ }
+
+ @Override
+ public CompletableFuture<CommitResponse> commit(Transaction transaction) {
+ return checkOpen(() -> proxy.commit(transaction));
+ }
+
+ @Override
+ public CompletableFuture<Boolean> rollback(Transaction transaction) {
+ return checkOpen(() -> proxy.rollback(transaction));
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public synchronized CompletableFuture<Database> open() {
+ return runStartupTasks()
+ .thenCompose(v -> stateMachine.open())
+ .thenRun(() -> {
+ this.proxy = stateMachine.createProxy(DatabaseProxy.class, this.getClass().getClassLoader());
+ })
+ .thenApply(v -> null);
+ }
+
+ @Override
+ public synchronized CompletableFuture<Void> close() {
+ proxy = null;
+ return stateMachine.close()
+ .thenCompose(v -> runShutdownTasks());
+ }
+
+ @Override
+ public int hashCode() {
+ return name().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof Database) {
+ return name().equals(((Database) other).name());
+ }
+ return false;
+ }
+
+ @Override
+ public void registerConsumer(Consumer<StateMachineUpdate> consumer) {
+ consumers.add(consumer);
+ }
+
+ @Override
+ public void unregisterConsumer(Consumer<StateMachineUpdate> consumer) {
+ consumers.remove(consumer);
+ }
+
+ private class InternalStateMachineWatcher implements TriConsumer<String, Object, Object> {
+ @Override
+ public void accept(String name, Object input, Object output) {
+ StateMachineUpdate update = new StateMachineUpdate(name, input, output);
+ consumers.forEach(consumer -> consumer.accept(update));
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDatabaseState.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDatabaseState.java
new file mode 100644
index 00000000..9d3505bd
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDatabaseState.java
@@ -0,0 +1,368 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+import java.util.Set;
+
+import org.onosproject.store.service.DatabaseUpdate;
+import org.onosproject.store.service.Transaction;
+import org.onosproject.store.service.Versioned;
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import net.kuujo.copycat.state.Initializer;
+import net.kuujo.copycat.state.StateContext;
+
+/**
+ * Default database state.
+ */
+public class DefaultDatabaseState implements DatabaseState<String, byte[]> {
+ private Long nextVersion;
+ private Map<String, AtomicLong> counters;
+ private Map<String, Map<String, Versioned<byte[]>>> maps;
+ private Map<String, Queue<byte[]>> queues;
+
+ /**
+ * This locks map has a structure similar to the "tables" map above and
+ * holds all the provisional updates made during a transaction's prepare phase.
+ * The entry value is represented as the tuple: (transactionId, newValue)
+ * If newValue == null that signifies this update is attempting to
+ * delete the existing value.
+ * This map also serves as a lock on the entries that are being updated.
+ * The presence of a entry in this map indicates that element is
+ * participating in a transaction and is currently locked for updates.
+ */
+ private Map<String, Map<String, Update>> locks;
+
+ @Initializer
+ @Override
+ public void init(StateContext<DatabaseState<String, byte[]>> context) {
+ counters = context.get("counters");
+ if (counters == null) {
+ counters = Maps.newConcurrentMap();
+ context.put("counters", counters);
+ }
+ maps = context.get("maps");
+ if (maps == null) {
+ maps = Maps.newConcurrentMap();
+ context.put("maps", maps);
+ }
+ locks = context.get("locks");
+ if (locks == null) {
+ locks = Maps.newConcurrentMap();
+ context.put("locks", locks);
+ }
+ queues = context.get("queues");
+ if (queues == null) {
+ queues = Maps.newConcurrentMap();
+ context.put("queues", queues);
+ }
+ nextVersion = context.get("nextVersion");
+ if (nextVersion == null) {
+ nextVersion = new Long(0);
+ context.put("nextVersion", nextVersion);
+ }
+ }
+
+ @Override
+ public Set<String> maps() {
+ return ImmutableSet.copyOf(maps.keySet());
+ }
+
+ @Override
+ public Map<String, Long> counters() {
+ Map<String, Long> counterMap = Maps.newHashMap();
+ counters.forEach((k, v) -> counterMap.put(k, v.get()));
+ return counterMap;
+ }
+
+ @Override
+ public int mapSize(String mapName) {
+ return getMap(mapName).size();
+ }
+
+ @Override
+ public boolean mapIsEmpty(String mapName) {
+ return getMap(mapName).isEmpty();
+ }
+
+ @Override
+ public boolean mapContainsKey(String mapName, String key) {
+ return getMap(mapName).containsKey(key);
+ }
+
+ @Override
+ public boolean mapContainsValue(String mapName, byte[] value) {
+ return getMap(mapName).values().stream().anyMatch(v -> Arrays.equals(v.value(), value));
+ }
+
+ @Override
+ public Versioned<byte[]> mapGet(String mapName, String key) {
+ return getMap(mapName).get(key);
+ }
+
+
+ @Override
+ public Result<UpdateResult<String, byte[]>> mapUpdate(
+ String mapName,
+ String key,
+ Match<byte[]> valueMatch,
+ Match<Long> versionMatch,
+ byte[] value) {
+ if (isLockedForUpdates(mapName, key)) {
+ return Result.locked();
+ }
+ Versioned<byte[]> currentValue = getMap(mapName).get(key);
+ if (!valueMatch.matches(currentValue == null ? null : currentValue.value()) ||
+ !versionMatch.matches(currentValue == null ? null : currentValue.version())) {
+ return Result.ok(new UpdateResult<>(false, mapName, key, currentValue, currentValue));
+ } else {
+ if (value == null) {
+ if (currentValue == null) {
+ return Result.ok(new UpdateResult<>(false, mapName, key, null, null));
+ } else {
+ getMap(mapName).remove(key);
+ return Result.ok(new UpdateResult<>(true, mapName, key, currentValue, null));
+ }
+ }
+ Versioned<byte[]> newValue = new Versioned<>(value, ++nextVersion);
+ getMap(mapName).put(key, newValue);
+ return Result.ok(new UpdateResult<>(true, mapName, key, currentValue, newValue));
+ }
+ }
+
+ @Override
+ public Result<Void> mapClear(String mapName) {
+ if (areTransactionsInProgress(mapName)) {
+ return Result.locked();
+ }
+ getMap(mapName).clear();
+ return Result.ok(null);
+ }
+
+ @Override
+ public Set<String> mapKeySet(String mapName) {
+ return ImmutableSet.copyOf(getMap(mapName).keySet());
+ }
+
+ @Override
+ public Collection<Versioned<byte[]>> mapValues(String mapName) {
+ return ImmutableList.copyOf(getMap(mapName).values());
+ }
+
+ @Override
+ public Set<Entry<String, Versioned<byte[]>>> mapEntrySet(String mapName) {
+ return ImmutableSet.copyOf(getMap(mapName)
+ .entrySet()
+ .stream()
+ .map(entry -> Maps.immutableEntry(entry.getKey(), entry.getValue()))
+ .collect(Collectors.toSet()));
+ }
+
+ @Override
+ public Long counterAddAndGet(String counterName, long delta) {
+ return getCounter(counterName).addAndGet(delta);
+ }
+
+ @Override
+ public Long counterGetAndAdd(String counterName, long delta) {
+ return getCounter(counterName).getAndAdd(delta);
+ }
+
+ @Override
+ public Long counterGet(String counterName) {
+ return getCounter(counterName).get();
+ }
+
+ @Override
+ public Long queueSize(String queueName) {
+ return Long.valueOf(getQueue(queueName).size());
+ }
+
+ @Override
+ public byte[] queuePeek(String queueName) {
+ return getQueue(queueName).peek();
+ }
+
+ @Override
+ public byte[] queuePop(String queueName) {
+ return getQueue(queueName).poll();
+ }
+
+ @Override
+ public void queuePush(String queueName, byte[] entry) {
+ getQueue(queueName).offer(entry);
+ }
+
+ @Override
+ public CommitResponse prepareAndCommit(Transaction transaction) {
+ if (prepare(transaction)) {
+ return commit(transaction);
+ }
+ return CommitResponse.failure();
+ }
+
+ @Override
+ public boolean prepare(Transaction transaction) {
+ if (transaction.updates().stream().anyMatch(update ->
+ isLockedByAnotherTransaction(update.mapName(),
+ update.key(),
+ transaction.id()))) {
+ return false;
+ }
+
+ if (transaction.updates().stream().allMatch(this::isUpdatePossible)) {
+ transaction.updates().forEach(update -> doProvisionalUpdate(update, transaction.id()));
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public CommitResponse commit(Transaction transaction) {
+ return CommitResponse.success(Lists.transform(transaction.updates(),
+ update -> commitProvisionalUpdate(update, transaction.id())));
+ }
+
+ @Override
+ public boolean rollback(Transaction transaction) {
+ transaction.updates().forEach(update -> undoProvisionalUpdate(update, transaction.id()));
+ return true;
+ }
+
+ private Map<String, Versioned<byte[]>> getMap(String mapName) {
+ return maps.computeIfAbsent(mapName, name -> Maps.newConcurrentMap());
+ }
+
+ private Map<String, Update> getLockMap(String mapName) {
+ return locks.computeIfAbsent(mapName, name -> Maps.newConcurrentMap());
+ }
+
+ private AtomicLong getCounter(String counterName) {
+ return counters.computeIfAbsent(counterName, name -> new AtomicLong(0));
+ }
+
+ private Queue<byte[]> getQueue(String queueName) {
+ return queues.computeIfAbsent(queueName, name -> new LinkedList<>());
+ }
+
+ private boolean isUpdatePossible(DatabaseUpdate update) {
+ Versioned<byte[]> existingEntry = mapGet(update.mapName(), update.key());
+ switch (update.type()) {
+ case PUT:
+ case REMOVE:
+ return true;
+ case PUT_IF_ABSENT:
+ return existingEntry == null;
+ case PUT_IF_VERSION_MATCH:
+ return existingEntry != null && existingEntry.version() == update.currentVersion();
+ case PUT_IF_VALUE_MATCH:
+ return existingEntry != null && Arrays.equals(existingEntry.value(), update.currentValue());
+ case REMOVE_IF_VERSION_MATCH:
+ return existingEntry == null || existingEntry.version() == update.currentVersion();
+ case REMOVE_IF_VALUE_MATCH:
+ return existingEntry == null || Arrays.equals(existingEntry.value(), update.currentValue());
+ default:
+ throw new IllegalStateException("Unsupported type: " + update.type());
+ }
+ }
+
+ private void doProvisionalUpdate(DatabaseUpdate update, long transactionId) {
+ Map<String, Update> lockMap = getLockMap(update.mapName());
+ switch (update.type()) {
+ case PUT:
+ case PUT_IF_ABSENT:
+ case PUT_IF_VERSION_MATCH:
+ case PUT_IF_VALUE_MATCH:
+ lockMap.put(update.key(), new Update(transactionId, update.value()));
+ break;
+ case REMOVE:
+ case REMOVE_IF_VERSION_MATCH:
+ case REMOVE_IF_VALUE_MATCH:
+ lockMap.put(update.key(), new Update(transactionId, null));
+ break;
+ default:
+ throw new IllegalStateException("Unsupported type: " + update.type());
+ }
+ }
+
+ private UpdateResult<String, byte[]> commitProvisionalUpdate(DatabaseUpdate update, long transactionId) {
+ String mapName = update.mapName();
+ String key = update.key();
+ Update provisionalUpdate = getLockMap(mapName).get(key);
+ if (Objects.equal(transactionId, provisionalUpdate.transactionId())) {
+ getLockMap(mapName).remove(key);
+ } else {
+ throw new IllegalStateException("Invalid transaction Id");
+ }
+ return mapUpdate(mapName, key, Match.any(), Match.any(), provisionalUpdate.value()).value();
+ }
+
+ private void undoProvisionalUpdate(DatabaseUpdate update, long transactionId) {
+ String mapName = update.mapName();
+ String key = update.key();
+ Update provisionalUpdate = getLockMap(mapName).get(key);
+ if (provisionalUpdate == null) {
+ return;
+ }
+ if (Objects.equal(transactionId, provisionalUpdate.transactionId())) {
+ getLockMap(mapName).remove(key);
+ }
+ }
+
+ private boolean isLockedByAnotherTransaction(String mapName, String key, long transactionId) {
+ Update update = getLockMap(mapName).get(key);
+ return update != null && !Objects.equal(transactionId, update.transactionId());
+ }
+
+ private boolean isLockedForUpdates(String mapName, String key) {
+ return getLockMap(mapName).containsKey(key);
+ }
+
+ private boolean areTransactionsInProgress(String mapName) {
+ return !getLockMap(mapName).isEmpty();
+ }
+
+ private class Update {
+ private final long transactionId;
+ private final byte[] value;
+
+ public Update(long txId, byte[] value) {
+ this.transactionId = txId;
+ this.value = value;
+ }
+
+ public long transactionId() {
+ return this.transactionId;
+ }
+
+ public byte[] value() {
+ return this.value;
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedQueue.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedQueue.java
new file mode 100644
index 00000000..5f69fde8
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedQueue.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Futures;
+
+import org.onlab.util.SharedExecutors;
+import org.onosproject.store.service.DistributedQueue;
+import org.onosproject.store.service.Serializer;
+
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.onosproject.store.consistent.impl.StateMachineUpdate.Target.QUEUE_PUSH;
+
+/**
+ * DistributedQueue implementation that provides FIFO ordering semantics.
+ *
+ * @param <E> queue entry type
+ */
+public class DefaultDistributedQueue<E> implements DistributedQueue<E> {
+
+ private final String name;
+ private final Database database;
+ private final Serializer serializer;
+ private final Set<CompletableFuture<E>> pendingFutures = Sets.newIdentityHashSet();
+
+ private static final String PRIMITIVE_NAME = "distributedQueue";
+ private static final String SIZE = "size";
+ private static final String PUSH = "push";
+ private static final String POP = "pop";
+ private static final String PEEK = "peek";
+
+ private static final String ERROR_NULL_ENTRY = "Null entries are not allowed";
+ private final MeteringAgent monitor;
+
+ public DefaultDistributedQueue(String name,
+ Database database,
+ Serializer serializer,
+ boolean meteringEnabled) {
+ this.name = checkNotNull(name, "queue name cannot be null");
+ this.database = checkNotNull(database, "database cannot be null");
+ this.serializer = checkNotNull(serializer, "serializer cannot be null");
+ this.monitor = new MeteringAgent(PRIMITIVE_NAME, name, meteringEnabled);
+ this.database.registerConsumer(update -> {
+ SharedExecutors.getSingleThreadExecutor().execute(() -> {
+ if (update.target() == QUEUE_PUSH) {
+ List<Object> input = update.input();
+ String queueName = (String) input.get(0);
+ if (queueName.equals(name)) {
+ tryPoll();
+ }
+ }
+ });
+ });
+ }
+
+ @Override
+ public long size() {
+ final MeteringAgent.Context timer = monitor.startTimer(SIZE);
+ return Futures.getUnchecked(database.queueSize(name).whenComplete((r, e) -> timer.stop(e)));
+ }
+
+ @Override
+ public void push(E entry) {
+ checkNotNull(entry, ERROR_NULL_ENTRY);
+ final MeteringAgent.Context timer = monitor.startTimer(PUSH);
+ Futures.getUnchecked(database.queuePush(name, serializer.encode(entry))
+ .whenComplete((r, e) -> timer.stop(e)));
+ }
+
+ @Override
+ public CompletableFuture<E> pop() {
+ final MeteringAgent.Context timer = monitor.startTimer(POP);
+ return database.queuePop(name)
+ .whenComplete((r, e) -> timer.stop(e))
+ .thenCompose(v -> {
+ if (v != null) {
+ return CompletableFuture.<E>completedFuture(serializer.decode(v));
+ }
+ CompletableFuture<E> newPendingFuture = new CompletableFuture<>();
+ pendingFutures.add(newPendingFuture);
+ return newPendingFuture;
+ });
+
+ }
+
+ @Override
+ public E peek() {
+ final MeteringAgent.Context timer = monitor.startTimer(PEEK);
+ return Futures.getUnchecked(database.queuePeek(name)
+ .thenApply(v -> v != null ? serializer.<E>decode(v) : null)
+ .whenComplete((r, e) -> timer.stop(e)));
+ }
+
+ public String name() {
+ return name;
+ }
+
+ protected void tryPoll() {
+ Set<CompletableFuture<E>> completedFutures = Sets.newHashSet();
+ for (CompletableFuture<E> future : pendingFutures) {
+ E entry = Futures.getUnchecked(database.queuePop(name)
+ .thenApply(v -> v != null ? serializer.decode(v) : null));
+ if (entry != null) {
+ future.complete(entry);
+ completedFutures.add(future);
+ } else {
+ break;
+ }
+ }
+ pendingFutures.removeAll(completedFutures);
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedQueueBuilder.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedQueueBuilder.java
new file mode 100644
index 00000000..d6654e27
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedQueueBuilder.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import org.onosproject.store.service.DistributedQueue;
+import org.onosproject.store.service.DistributedQueueBuilder;
+import org.onosproject.store.service.Serializer;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * Default implementation of a {@code DistributedQueueBuilder}.
+ *
+ * @param <E> queue entry type
+ */
+public class DefaultDistributedQueueBuilder<E> implements DistributedQueueBuilder<E> {
+
+ private Serializer serializer;
+ private String name;
+ private boolean persistenceEnabled = true;
+ private final DatabaseManager databaseManager;
+ private boolean metering = true;
+
+ public DefaultDistributedQueueBuilder(DatabaseManager databaseManager) {
+ this.databaseManager = databaseManager;
+ }
+
+ @Override
+ public DistributedQueueBuilder<E> withName(String name) {
+ checkArgument(name != null && !name.isEmpty());
+ this.name = name;
+ return this;
+ }
+
+ @Override
+ public DistributedQueueBuilder<E> withSerializer(Serializer serializer) {
+ checkArgument(serializer != null);
+ this.serializer = serializer;
+ return this;
+ }
+
+ @Override
+ public DistributedQueueBuilder<E> withMeteringDisabled() {
+ metering = false;
+ return this;
+ }
+
+ @Override
+ public DistributedQueueBuilder<E> withPersistenceDisabled() {
+ persistenceEnabled = false;
+ return this;
+ }
+
+ private boolean validInputs() {
+ return name != null && serializer != null;
+ }
+
+ @Override
+ public DistributedQueue<E> build() {
+ checkState(validInputs());
+ return new DefaultDistributedQueue<>(
+ name,
+ persistenceEnabled ? databaseManager.partitionedDatabase : databaseManager.inMemoryDatabase,
+ serializer,
+ metering);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedSet.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedSet.java
new file mode 100644
index 00000000..677724df
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedSet.java
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.DistributedSet;
+import org.onosproject.store.service.MapEvent;
+import org.onosproject.store.service.MapEventListener;
+import org.onosproject.store.service.SetEvent;
+import org.onosproject.store.service.SetEventListener;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Implementation of distributed set that is backed by a ConsistentMap.
+
+ * @param <E> set element type
+ */
+public class DefaultDistributedSet<E> implements DistributedSet<E> {
+
+ private static final String CONTAINS = "contains";
+ private static final String PRIMITIVE_NAME = "distributedSet";
+ private static final String SIZE = "size";
+ private static final String IS_EMPTY = "isEmpty";
+ private static final String ITERATOR = "iterator";
+ private static final String TO_ARRAY = "toArray";
+ private static final String ADD = "add";
+ private static final String REMOVE = "remove";
+ private static final String CONTAINS_ALL = "containsAll";
+ private static final String ADD_ALL = "addAll";
+ private static final String RETAIN_ALL = "retainAll";
+ private static final String REMOVE_ALL = "removeAll";
+ private static final String CLEAR = "clear";
+
+ private final String name;
+ private final ConsistentMap<E, Boolean> backingMap;
+ private final Map<SetEventListener<E>, MapEventListener<E, Boolean>> listenerMapping = Maps.newIdentityHashMap();
+ private final MeteringAgent monitor;
+
+ public DefaultDistributedSet(String name, boolean meteringEnabled, ConsistentMap<E, Boolean> backingMap) {
+ this.name = name;
+ this.backingMap = backingMap;
+ monitor = new MeteringAgent(PRIMITIVE_NAME, name, meteringEnabled);
+ }
+
+ @Override
+ public int size() {
+ final MeteringAgent.Context timer = monitor.startTimer(SIZE);
+ try {
+ return backingMap.size();
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public boolean isEmpty() {
+ final MeteringAgent.Context timer = monitor.startTimer(IS_EMPTY);
+ try {
+ return backingMap.isEmpty();
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public boolean contains(Object o) {
+ final MeteringAgent.Context timer = monitor.startTimer(CONTAINS);
+ try {
+ return backingMap.containsKey((E) o);
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public Iterator<E> iterator() {
+ final MeteringAgent.Context timer = monitor.startTimer(ITERATOR);
+ //Do we have to measure this guy?
+ try {
+ return backingMap.keySet().iterator();
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public Object[] toArray() {
+ final MeteringAgent.Context timer = monitor.startTimer(TO_ARRAY);
+ try {
+ return backingMap.keySet().stream().toArray();
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public <T> T[] toArray(T[] a) {
+ final MeteringAgent.Context timer = monitor.startTimer(TO_ARRAY);
+ try {
+ return backingMap.keySet().stream().toArray(size -> a);
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public boolean add(E e) {
+ final MeteringAgent.Context timer = monitor.startTimer(ADD);
+ try {
+ return backingMap.putIfAbsent(e, true) == null;
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public boolean remove(Object o) {
+ final MeteringAgent.Context timer = monitor.startTimer(REMOVE);
+ try {
+ return backingMap.remove((E) o) != null;
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public boolean containsAll(Collection<?> c) {
+ final MeteringAgent.Context timer = monitor.startTimer(CONTAINS_ALL);
+ try {
+ return c.stream()
+ .allMatch(this::contains);
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public boolean addAll(Collection<? extends E> c) {
+ final MeteringAgent.Context timer = monitor.startTimer(ADD_ALL);
+ try {
+ return c.stream()
+ .map(this::add)
+ .reduce(Boolean::logicalOr)
+ .orElse(false);
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public boolean retainAll(Collection<?> c) {
+ final MeteringAgent.Context timer = monitor.startTimer(RETAIN_ALL);
+ try {
+ Set<?> retainSet = Sets.newHashSet(c);
+ return backingMap.keySet()
+ .stream()
+ .filter(k -> !retainSet.contains(k))
+ .map(this::remove)
+ .reduce(Boolean::logicalOr)
+ .orElse(false);
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public boolean removeAll(Collection<?> c) {
+ final MeteringAgent.Context timer = monitor.startTimer(REMOVE_ALL);
+ try {
+ Set<?> removeSet = Sets.newHashSet(c);
+ return backingMap.keySet()
+ .stream()
+ .filter(removeSet::contains)
+ .map(this::remove)
+ .reduce(Boolean::logicalOr)
+ .orElse(false);
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public void clear() {
+ final MeteringAgent.Context timer = monitor.startTimer(CLEAR);
+ try {
+ backingMap.clear();
+ } finally {
+ timer.stop(null);
+ }
+ }
+
+ @Override
+ public void addListener(SetEventListener<E> listener) {
+ MapEventListener<E, Boolean> mapEventListener = mapEvent -> {
+ if (mapEvent.type() == MapEvent.Type.INSERT) {
+ listener.event(new SetEvent<>(name, SetEvent.Type.ADD, mapEvent.key()));
+ } else if (mapEvent.type() == MapEvent.Type.REMOVE) {
+ listener.event(new SetEvent<>(name, SetEvent.Type.REMOVE, mapEvent.key()));
+ }
+ };
+ if (listenerMapping.putIfAbsent(listener, mapEventListener) == null) {
+ backingMap.addListener(mapEventListener);
+ }
+ }
+
+ @Override
+ public void removeListener(SetEventListener<E> listener) {
+ MapEventListener<E, Boolean> mapEventListener = listenerMapping.remove(listener);
+ if (mapEventListener != null) {
+ backingMap.removeListener(mapEventListener);
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedSetBuilder.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedSetBuilder.java
new file mode 100644
index 00000000..f7957f39
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDistributedSetBuilder.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import org.onosproject.core.ApplicationId;
+import org.onosproject.store.service.ConsistentMapBuilder;
+import org.onosproject.store.service.DistributedSet;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.DistributedSetBuilder;
+
+/**
+ * Default distributed set builder.
+ *
+ * @param <E> type for set elements
+ */
+public class DefaultDistributedSetBuilder<E> implements DistributedSetBuilder<E> {
+
+ private String name;
+ private ConsistentMapBuilder<E, Boolean> mapBuilder;
+ private boolean metering = true;
+
+ public DefaultDistributedSetBuilder(DatabaseManager manager) {
+ this.mapBuilder = manager.consistentMapBuilder();
+ mapBuilder.withMeteringDisabled();
+ }
+
+ @Override
+ public DistributedSetBuilder<E> withName(String name) {
+ mapBuilder.withName(name);
+ this.name = name;
+ return this;
+ }
+
+ @Override
+ public DistributedSetBuilder<E> withApplicationId(ApplicationId id) {
+ mapBuilder.withApplicationId(id);
+ return this;
+ }
+
+ @Override
+ public DistributedSetBuilder<E> withPurgeOnUninstall() {
+ mapBuilder.withPurgeOnUninstall();
+ return this;
+ }
+
+ @Override
+ public DistributedSetBuilder<E> withSerializer(Serializer serializer) {
+ mapBuilder.withSerializer(serializer);
+ return this;
+ }
+
+ @Override
+ public DistributedSetBuilder<E> withUpdatesDisabled() {
+ mapBuilder.withUpdatesDisabled();
+ return this;
+ }
+
+ @Override
+ public DistributedSetBuilder<E> withRelaxedReadConsistency() {
+ mapBuilder.withRelaxedReadConsistency();
+ return this;
+ }
+
+ @Override
+ public DistributedSetBuilder<E> withPartitionsDisabled() {
+ mapBuilder.withPartitionsDisabled();
+ return this;
+ }
+
+ @Override
+ public DistributedSetBuilder<E> withMeteringDisabled() {
+ metering = false;
+ return this;
+ }
+
+ @Override
+ public DistributedSet<E> build() {
+ return new DefaultDistributedSet<E>(name, metering, mapBuilder.build());
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransaction.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransaction.java
new file mode 100644
index 00000000..2ff7a2dc
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransaction.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import java.util.List;
+
+import org.onosproject.store.service.DatabaseUpdate;
+import org.onosproject.store.service.Transaction;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * A Default transaction implementation.
+ */
+public class DefaultTransaction implements Transaction {
+
+ private final long transactionId;
+ private final List<DatabaseUpdate> updates;
+ private final State state;
+ private final long lastUpdated;
+
+ public DefaultTransaction(long transactionId, List<DatabaseUpdate> updates) {
+ this(transactionId, updates, State.PREPARING, System.currentTimeMillis());
+ }
+
+ private DefaultTransaction(long transactionId, List<DatabaseUpdate> updates, State state, long lastUpdated) {
+ this.transactionId = transactionId;
+ this.updates = ImmutableList.copyOf(updates);
+ this.state = state;
+ this.lastUpdated = lastUpdated;
+ }
+
+ @Override
+ public long id() {
+ return transactionId;
+ }
+
+ @Override
+ public List<DatabaseUpdate> updates() {
+ return updates;
+ }
+
+ @Override
+ public State state() {
+ return state;
+ }
+
+ @Override
+ public Transaction transition(State newState) {
+ return new DefaultTransaction(transactionId, updates, newState, System.currentTimeMillis());
+ }
+
+ @Override
+ public long lastUpdated() {
+ return lastUpdated;
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionContext.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionContext.java
new file mode 100644
index 00000000..b66f424b
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionContext.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
+
+import static com.google.common.base.Preconditions.*;
+
+import org.onosproject.store.service.ConsistentMapBuilder;
+import org.onosproject.store.service.DatabaseUpdate;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.Transaction;
+import org.onosproject.store.service.TransactionContext;
+import org.onosproject.store.service.TransactionalMap;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.Futures;
+
+/**
+ * Default TransactionContext implementation.
+ */
+public class DefaultTransactionContext implements TransactionContext {
+ private static final String TX_NOT_OPEN_ERROR = "Transaction Context is not open";
+
+ @SuppressWarnings("rawtypes")
+ private final Map<String, DefaultTransactionalMap> txMaps = Maps.newConcurrentMap();
+ private boolean isOpen = false;
+ private final Database database;
+ private final long transactionId;
+ private final Supplier<ConsistentMapBuilder> mapBuilderSupplier;
+
+ public DefaultTransactionContext(long transactionId,
+ Database database,
+ Supplier<ConsistentMapBuilder> mapBuilderSupplier) {
+ this.transactionId = transactionId;
+ this.database = checkNotNull(database);
+ this.mapBuilderSupplier = checkNotNull(mapBuilderSupplier);
+ }
+
+ @Override
+ public long transactionId() {
+ return transactionId;
+ }
+
+ @Override
+ public void begin() {
+ checkState(!isOpen, "Transaction Context is already open");
+ isOpen = true;
+ }
+
+ @Override
+ public boolean isOpen() {
+ return isOpen;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <K, V> TransactionalMap<K, V> getTransactionalMap(String mapName,
+ Serializer serializer) {
+ checkState(isOpen, TX_NOT_OPEN_ERROR);
+ checkNotNull(mapName);
+ checkNotNull(serializer);
+ return txMaps.computeIfAbsent(mapName, name -> new DefaultTransactionalMap<>(
+ name,
+ mapBuilderSupplier.get().withName(name).withSerializer(serializer).build(),
+ this,
+ serializer));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void commit() {
+ // TODO: rework commit implementation to be more intuitive
+ checkState(isOpen, TX_NOT_OPEN_ERROR);
+ CommitResponse response = null;
+ try {
+ List<DatabaseUpdate> updates = Lists.newLinkedList();
+ txMaps.values().forEach(m -> updates.addAll(m.prepareDatabaseUpdates()));
+ Transaction transaction = new DefaultTransaction(transactionId, updates);
+ response = Futures.getUnchecked(database.prepareAndCommit(transaction));
+ } finally {
+ if (response != null && !response.success()) {
+ abort();
+ }
+ isOpen = false;
+ }
+ }
+
+ @Override
+ public void abort() {
+ if (isOpen) {
+ try {
+ txMaps.values().forEach(m -> m.rollback());
+ } finally {
+ isOpen = false;
+ }
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionContextBuilder.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionContextBuilder.java
new file mode 100644
index 00000000..f20bfb80
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionContextBuilder.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import org.onosproject.store.service.TransactionContext;
+import org.onosproject.store.service.TransactionContextBuilder;
+
+/**
+ * The default implementation of a transaction context builder. This builder
+ * generates a {@link DefaultTransactionContext}.
+ */
+public class DefaultTransactionContextBuilder implements TransactionContextBuilder {
+
+ private boolean partitionsEnabled = true;
+ private final DatabaseManager manager;
+ private final long transactionId;
+
+ public DefaultTransactionContextBuilder(DatabaseManager manager, long transactionId) {
+ this.manager = manager;
+ this.transactionId = transactionId;
+ }
+
+ @Override
+ public TransactionContextBuilder withPartitionsDisabled() {
+ partitionsEnabled = false;
+ return this;
+ }
+
+ @Override
+ public TransactionContext build() {
+ return new DefaultTransactionContext(
+ transactionId,
+ partitionsEnabled ? manager.partitionedDatabase : manager.inMemoryDatabase,
+ () -> partitionsEnabled ? manager.consistentMapBuilder()
+ : manager.consistentMapBuilder().withPartitionsDisabled());
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionalMap.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionalMap.java
new file mode 100644
index 00000000..ade70335
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultTransactionalMap.java
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.onlab.util.HexString;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.DatabaseUpdate;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.TransactionContext;
+import org.onosproject.store.service.TransactionalMap;
+import org.onosproject.store.service.Versioned;
+
+import static com.google.common.base.Preconditions.*;
+
+import com.google.common.base.Objects;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+/**
+ * Default Transactional Map implementation that provides a repeatable reads
+ * transaction isolation level.
+ *
+ * @param <K> key type
+ * @param <V> value type.
+ */
+public class DefaultTransactionalMap<K, V> implements TransactionalMap<K, V> {
+
+ private final TransactionContext txContext;
+ private static final String TX_CLOSED_ERROR = "Transaction is closed";
+ private final ConsistentMap<K, V> backingMap;
+ private final String name;
+ private final Serializer serializer;
+ private final Map<K, Versioned<V>> readCache = Maps.newConcurrentMap();
+ private final Map<K, V> writeCache = Maps.newConcurrentMap();
+ private final Set<K> deleteSet = Sets.newConcurrentHashSet();
+
+ private static final String ERROR_NULL_VALUE = "Null values are not allowed";
+ private static final String ERROR_NULL_KEY = "Null key is not allowed";
+
+ private final LoadingCache<K, String> keyCache = CacheBuilder.newBuilder()
+ .softValues()
+ .build(new CacheLoader<K, String>() {
+
+ @Override
+ public String load(K key) {
+ return HexString.toHexString(serializer.encode(key));
+ }
+ });
+
+ protected K dK(String key) {
+ return serializer.decode(HexString.fromHexString(key));
+ }
+
+ public DefaultTransactionalMap(
+ String name,
+ ConsistentMap<K, V> backingMap,
+ TransactionContext txContext,
+ Serializer serializer) {
+ this.name = name;
+ this.backingMap = backingMap;
+ this.txContext = txContext;
+ this.serializer = serializer;
+ }
+
+ @Override
+ public V get(K key) {
+ checkState(txContext.isOpen(), TX_CLOSED_ERROR);
+ checkNotNull(key, ERROR_NULL_KEY);
+ if (deleteSet.contains(key)) {
+ return null;
+ }
+ V latest = writeCache.get(key);
+ if (latest != null) {
+ return latest;
+ } else {
+ Versioned<V> v = readCache.computeIfAbsent(key, k -> backingMap.get(k));
+ return v != null ? v.value() : null;
+ }
+ }
+
+ @Override
+ public V put(K key, V value) {
+ checkState(txContext.isOpen(), TX_CLOSED_ERROR);
+ checkNotNull(value, ERROR_NULL_VALUE);
+
+ V latest = get(key);
+ writeCache.put(key, value);
+ deleteSet.remove(key);
+ return latest;
+ }
+
+ @Override
+ public V remove(K key) {
+ checkState(txContext.isOpen(), TX_CLOSED_ERROR);
+ V latest = get(key);
+ if (latest != null) {
+ writeCache.remove(key);
+ deleteSet.add(key);
+ }
+ return latest;
+ }
+
+ @Override
+ public boolean remove(K key, V value) {
+ checkState(txContext.isOpen(), TX_CLOSED_ERROR);
+ checkNotNull(value, ERROR_NULL_VALUE);
+ V latest = get(key);
+ if (Objects.equal(value, latest)) {
+ remove(key);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public boolean replace(K key, V oldValue, V newValue) {
+ checkState(txContext.isOpen(), TX_CLOSED_ERROR);
+ checkNotNull(oldValue, ERROR_NULL_VALUE);
+ checkNotNull(newValue, ERROR_NULL_VALUE);
+ V latest = get(key);
+ if (Objects.equal(oldValue, latest)) {
+ put(key, newValue);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public V putIfAbsent(K key, V value) {
+ checkState(txContext.isOpen(), TX_CLOSED_ERROR);
+ checkNotNull(value, ERROR_NULL_VALUE);
+ V latest = get(key);
+ if (latest == null) {
+ put(key, value);
+ }
+ return latest;
+ }
+
+ protected List<DatabaseUpdate> prepareDatabaseUpdates() {
+ List<DatabaseUpdate> updates = Lists.newLinkedList();
+ deleteSet.forEach(key -> {
+ Versioned<V> original = readCache.get(key);
+ if (original != null) {
+ updates.add(DatabaseUpdate.newBuilder()
+ .withMapName(name)
+ .withType(DatabaseUpdate.Type.REMOVE_IF_VERSION_MATCH)
+ .withKey(keyCache.getUnchecked(key))
+ .withCurrentVersion(original.version())
+ .build());
+ }
+ });
+ writeCache.forEach((key, value) -> {
+ Versioned<V> original = readCache.get(key);
+ if (original == null) {
+ updates.add(DatabaseUpdate.newBuilder()
+ .withMapName(name)
+ .withType(DatabaseUpdate.Type.PUT_IF_ABSENT)
+ .withKey(keyCache.getUnchecked(key))
+ .withValue(serializer.encode(value))
+ .build());
+ } else {
+ updates.add(DatabaseUpdate.newBuilder()
+ .withMapName(name)
+ .withType(DatabaseUpdate.Type.PUT_IF_VERSION_MATCH)
+ .withKey(keyCache.getUnchecked(key))
+ .withCurrentVersion(original.version())
+ .withValue(serializer.encode(value))
+ .build());
+ }
+ });
+ return updates;
+ }
+
+ /**
+ * Discards all changes made to this transactional map.
+ */
+ protected void rollback() {
+ readCache.clear();
+ writeCache.clear();
+ deleteSet.clear();
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DistributedLeadershipManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DistributedLeadershipManager.java
new file mode 100644
index 00000000..1882b1b5
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DistributedLeadershipManager.java
@@ -0,0 +1,605 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.MapDifference;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import org.apache.commons.lang.math.RandomUtils;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onosproject.cluster.ClusterEvent;
+import org.onosproject.cluster.ClusterEvent.Type;
+import org.onosproject.cluster.ClusterEventListener;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.Leadership;
+import org.onosproject.cluster.LeadershipEvent;
+import org.onosproject.cluster.LeadershipEventListener;
+import org.onosproject.cluster.LeadershipService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.event.ListenerRegistry;
+import org.onosproject.event.EventDeliveryService;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.ConsistentMapException;
+import org.onosproject.store.service.MapEvent;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.StorageService;
+import org.onosproject.store.service.Versioned;
+import org.slf4j.Logger;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Set;
+import java.util.List;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static org.onlab.util.Tools.groupedThreads;
+import static org.slf4j.LoggerFactory.getLogger;
+import static org.onosproject.cluster.ControllerNode.State.ACTIVE;
+import static org.onosproject.cluster.ControllerNode.State.INACTIVE;
+
+/**
+ * Distributed Lock Manager implemented on top of ConsistentMap.
+ * <p>
+ * This implementation makes use of ClusterService's failure
+ * detection capabilities to detect and purge stale locks.
+ * TODO: Ensure lock safety and liveness.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class DistributedLeadershipManager implements LeadershipService {
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected EventDeliveryService eventDispatcher;
+
+ private final Logger log = getLogger(getClass());
+ private ScheduledExecutorService electionRunner;
+ private ScheduledExecutorService lockExecutor;
+ private ScheduledExecutorService staleLeadershipPurgeExecutor;
+ private ScheduledExecutorService leadershipRefresher;
+
+ private ConsistentMap<String, NodeId> leaderMap;
+ private ConsistentMap<String, List<NodeId>> candidateMap;
+
+ private ListenerRegistry<LeadershipEvent, LeadershipEventListener> listenerRegistry;
+ private final Map<String, Leadership> leaderBoard = Maps.newConcurrentMap();
+ private final Map<String, Leadership> candidateBoard = Maps.newConcurrentMap();
+ private final ClusterEventListener clusterEventListener = new InternalClusterEventListener();
+
+ private NodeId localNodeId;
+ private Set<String> activeTopics = Sets.newConcurrentHashSet();
+ private Map<String, CompletableFuture<Leadership>> pendingFutures = Maps.newConcurrentMap();
+
+ // The actual delay is randomly chosen from the interval [0, WAIT_BEFORE_RETRY_MILLIS)
+ private static final int WAIT_BEFORE_RETRY_MILLIS = 150;
+ private static final int DELAY_BETWEEN_LEADER_LOCK_ATTEMPTS_SEC = 2;
+ private static final int LEADERSHIP_REFRESH_INTERVAL_SEC = 2;
+ private static final int DELAY_BETWEEN_STALE_LEADERSHIP_PURGE_ATTEMPTS_SEC = 2;
+
+ private final AtomicBoolean staleLeadershipPurgeScheduled = new AtomicBoolean(false);
+
+ private static final Serializer SERIALIZER = Serializer.using(KryoNamespaces.API);
+
+ @Activate
+ public void activate() {
+ leaderMap = storageService.<String, NodeId>consistentMapBuilder()
+ .withName("onos-topic-leaders")
+ .withSerializer(SERIALIZER)
+ .withPartitionsDisabled().build();
+ candidateMap = storageService.<String, List<NodeId>>consistentMapBuilder()
+ .withName("onos-topic-candidates")
+ .withSerializer(SERIALIZER)
+ .withPartitionsDisabled().build();
+
+ leaderMap.addListener(event -> {
+ log.debug("Received {}", event);
+ LeadershipEvent.Type leadershipEventType = null;
+ if (event.type() == MapEvent.Type.INSERT || event.type() == MapEvent.Type.UPDATE) {
+ leadershipEventType = LeadershipEvent.Type.LEADER_ELECTED;
+ } else if (event.type() == MapEvent.Type.REMOVE) {
+ leadershipEventType = LeadershipEvent.Type.LEADER_BOOTED;
+ }
+ onLeadershipEvent(new LeadershipEvent(
+ leadershipEventType,
+ new Leadership(event.key(),
+ event.value().value(),
+ event.value().version(),
+ event.value().creationTime())));
+ });
+
+ candidateMap.addListener(event -> {
+ log.debug("Received {}", event);
+ if (event.type() != MapEvent.Type.INSERT && event.type() != MapEvent.Type.UPDATE) {
+ log.error("Entries must not be removed from candidate map");
+ return;
+ }
+ onLeadershipEvent(new LeadershipEvent(
+ LeadershipEvent.Type.CANDIDATES_CHANGED,
+ new Leadership(event.key(),
+ event.value().value(),
+ event.value().version(),
+ event.value().creationTime())));
+ });
+
+ localNodeId = clusterService.getLocalNode().id();
+
+ electionRunner = Executors.newSingleThreadScheduledExecutor(
+ groupedThreads("onos/store/leadership", "election-runner"));
+ lockExecutor = Executors.newScheduledThreadPool(
+ 4, groupedThreads("onos/store/leadership", "election-thread-%d"));
+ staleLeadershipPurgeExecutor = Executors.newSingleThreadScheduledExecutor(
+ groupedThreads("onos/store/leadership", "stale-leadership-evictor"));
+ leadershipRefresher = Executors.newSingleThreadScheduledExecutor(
+ groupedThreads("onos/store/leadership", "refresh-thread"));
+
+ clusterService.addListener(clusterEventListener);
+
+ electionRunner.scheduleWithFixedDelay(
+ this::electLeaders, 0, DELAY_BETWEEN_LEADER_LOCK_ATTEMPTS_SEC, TimeUnit.SECONDS);
+
+ leadershipRefresher.scheduleWithFixedDelay(
+ this::refreshLeaderBoard, 0, LEADERSHIP_REFRESH_INTERVAL_SEC, TimeUnit.SECONDS);
+
+ listenerRegistry = new ListenerRegistry<>();
+ eventDispatcher.addSink(LeadershipEvent.class, listenerRegistry);
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ if (clusterService.getNodes().size() > 1) {
+ // FIXME: Determine why this takes ~50 seconds to shutdown on a single node!
+ leaderBoard.forEach((topic, leadership) -> {
+ if (localNodeId.equals(leadership.leader())) {
+ withdraw(topic);
+ }
+ });
+ }
+
+ clusterService.removeListener(clusterEventListener);
+ eventDispatcher.removeSink(LeadershipEvent.class);
+
+ electionRunner.shutdown();
+ lockExecutor.shutdown();
+ staleLeadershipPurgeExecutor.shutdown();
+ leadershipRefresher.shutdown();
+
+ log.info("Stopped");
+ }
+
+ @Override
+ public Map<String, Leadership> getLeaderBoard() {
+ return ImmutableMap.copyOf(leaderBoard);
+ }
+
+ @Override
+ public Map<String, List<NodeId>> getCandidates() {
+ return Maps.toMap(candidateBoard.keySet(), this::getCandidates);
+ }
+
+ @Override
+ public List<NodeId> getCandidates(String path) {
+ Leadership current = candidateBoard.get(path);
+ return current == null ? ImmutableList.of() : ImmutableList.copyOf(current.candidates());
+ }
+
+ @Override
+ public NodeId getLeader(String path) {
+ Leadership leadership = leaderBoard.get(path);
+ return leadership != null ? leadership.leader() : null;
+ }
+
+ @Override
+ public Leadership getLeadership(String path) {
+ checkArgument(path != null);
+ return leaderBoard.get(path);
+ }
+
+ @Override
+ public Set<String> ownedTopics(NodeId nodeId) {
+ checkArgument(nodeId != null);
+ return leaderBoard.entrySet()
+ .stream()
+ .filter(entry -> nodeId.equals(entry.getValue().leader()))
+ .map(Entry::getKey)
+ .collect(Collectors.toSet());
+ }
+
+ @Override
+ public CompletableFuture<Leadership> runForLeadership(String path) {
+ log.debug("Running for leadership for topic: {}", path);
+ CompletableFuture<Leadership> resultFuture = new CompletableFuture<>();
+ doRunForLeadership(path, resultFuture);
+ return resultFuture;
+ }
+
+ private void doRunForLeadership(String path, CompletableFuture<Leadership> future) {
+ try {
+ Versioned<List<NodeId>> candidates = candidateMap.computeIf(path,
+ currentList -> currentList == null || !currentList.contains(localNodeId),
+ (topic, currentList) -> {
+ if (currentList == null) {
+ return ImmutableList.of(localNodeId);
+ } else {
+ List<NodeId> newList = Lists.newLinkedList();
+ newList.addAll(currentList);
+ newList.add(localNodeId);
+ return newList;
+ }
+ });
+ log.debug("In the leadership race for topic {} with candidates {}", path, candidates);
+ activeTopics.add(path);
+ Leadership leadership = electLeader(path, candidates.value());
+ if (leadership == null) {
+ pendingFutures.put(path, future);
+ } else {
+ future.complete(leadership);
+ }
+ } catch (ConsistentMapException e) {
+ log.debug("Failed to enter topic leader race for {}. Retrying.", path, e);
+ rerunForLeadership(path, future);
+ }
+ }
+
+ @Override
+ public CompletableFuture<Void> withdraw(String path) {
+ activeTopics.remove(path);
+ CompletableFuture<Void> resultFuture = new CompletableFuture<>();
+ doWithdraw(path, resultFuture);
+ return resultFuture;
+ }
+
+
+ private void doWithdraw(String path, CompletableFuture<Void> future) {
+ if (activeTopics.contains(path)) {
+ future.completeExceptionally(new CancellationException(String.format("%s is now a active topic", path)));
+ }
+ try {
+ leaderMap.computeIf(path,
+ localNodeId::equals,
+ (topic, leader) -> null);
+ candidateMap.computeIf(path,
+ candidates -> candidates != null && candidates.contains(localNodeId),
+ (topic, candidates) -> candidates.stream()
+ .filter(nodeId -> !localNodeId.equals(nodeId))
+ .collect(Collectors.toList()));
+ future.complete(null);
+ } catch (Exception e) {
+ log.debug("Failed to verify (and clear) any lock this node might be holding for {}", path, e);
+ retryWithdraw(path, future);
+ }
+ }
+
+ @Override
+ public boolean stepdown(String path) {
+ if (!activeTopics.contains(path) || !Objects.equals(localNodeId, getLeader(path))) {
+ return false;
+ }
+
+ try {
+ return leaderMap.computeIf(path,
+ localNodeId::equals,
+ (topic, leader) -> null) == null;
+ } catch (Exception e) {
+ log.warn("Error executing stepdown for {}", path, e);
+ }
+ return false;
+ }
+
+ @Override
+ public void addListener(LeadershipEventListener listener) {
+ listenerRegistry.addListener(listener);
+ }
+
+ @Override
+ public void removeListener(LeadershipEventListener listener) {
+ listenerRegistry.removeListener(listener);
+ }
+
+ @Override
+ public boolean makeTopCandidate(String path, NodeId nodeId) {
+ Versioned<List<NodeId>> candidateList = candidateMap.computeIf(path,
+ candidates -> candidates != null &&
+ candidates.contains(nodeId) &&
+ !nodeId.equals(Iterables.getFirst(candidates, null)),
+ (topic, candidates) -> {
+ List<NodeId> updatedCandidates = new ArrayList<>(candidates.size());
+ updatedCandidates.add(nodeId);
+ candidates.stream().filter(id -> !nodeId.equals(id)).forEach(updatedCandidates::add);
+ return updatedCandidates;
+ });
+ List<NodeId> candidates = candidateList != null ? candidateList.value() : Collections.emptyList();
+ return candidates.size() > 0 && nodeId.equals(candidates.get(0));
+ }
+
+ private Leadership electLeader(String path, List<NodeId> candidates) {
+ Leadership currentLeadership = getLeadership(path);
+ if (currentLeadership != null) {
+ return currentLeadership;
+ } else {
+ NodeId topCandidate = candidates
+ .stream()
+ .filter(n -> clusterService.getState(n) == ACTIVE)
+ .findFirst()
+ .orElse(null);
+ try {
+ Versioned<NodeId> leader = localNodeId.equals(topCandidate)
+ ? leaderMap.computeIfAbsent(path, p -> localNodeId) : leaderMap.get(path);
+ if (leader != null) {
+ Leadership newLeadership = new Leadership(path,
+ leader.value(),
+ leader.version(),
+ leader.creationTime());
+ // Since reads only go through the local copy of leader board, we ought to update it
+ // first before returning from this method.
+ // This is to ensure a subsequent read will not read a stale value.
+ onLeadershipEvent(new LeadershipEvent(LeadershipEvent.Type.LEADER_ELECTED, newLeadership));
+ return newLeadership;
+ }
+ } catch (Exception e) {
+ log.debug("Failed to elect leader for {}", path, e);
+ }
+ }
+ return null;
+ }
+
+ private void electLeaders() {
+ try {
+ candidateMap.entrySet().forEach(entry -> {
+ String path = entry.getKey();
+ Versioned<List<NodeId>> candidates = entry.getValue();
+ // for active topics, check if this node can become a leader (if it isn't already)
+ if (activeTopics.contains(path)) {
+ lockExecutor.submit(() -> {
+ Leadership leadership = electLeader(path, candidates.value());
+ if (leadership != null) {
+ CompletableFuture<Leadership> future = pendingFutures.remove(path);
+ if (future != null) {
+ future.complete(leadership);
+ }
+ }
+ });
+ }
+ // Raise a CANDIDATES_CHANGED event to force refresh local candidate board
+ // and also to update local listeners.
+ // Don't worry about duplicate events as they will be suppressed.
+ onLeadershipEvent(new LeadershipEvent(LeadershipEvent.Type.CANDIDATES_CHANGED,
+ new Leadership(path,
+ candidates.value(),
+ candidates.version(),
+ candidates.creationTime())));
+ });
+ } catch (Exception e) {
+ log.debug("Failure electing leaders", e);
+ }
+ }
+
+ private void onLeadershipEvent(LeadershipEvent leadershipEvent) {
+ log.trace("Leadership Event: time = {} type = {} event = {}",
+ leadershipEvent.time(), leadershipEvent.type(),
+ leadershipEvent);
+
+ Leadership leadershipUpdate = leadershipEvent.subject();
+ LeadershipEvent.Type eventType = leadershipEvent.type();
+ String topic = leadershipUpdate.topic();
+
+ AtomicBoolean updateAccepted = new AtomicBoolean(false);
+ if (eventType.equals(LeadershipEvent.Type.LEADER_ELECTED)) {
+ leaderBoard.compute(topic, (k, currentLeadership) -> {
+ if (currentLeadership == null || currentLeadership.epoch() < leadershipUpdate.epoch()) {
+ updateAccepted.set(true);
+ return leadershipUpdate;
+ }
+ return currentLeadership;
+ });
+ } else if (eventType.equals(LeadershipEvent.Type.LEADER_BOOTED)) {
+ leaderBoard.compute(topic, (k, currentLeadership) -> {
+ if (currentLeadership == null || currentLeadership.epoch() <= leadershipUpdate.epoch()) {
+ updateAccepted.set(true);
+ // FIXME: Removing entries from leaderboard is not safe and should be visited.
+ return null;
+ }
+ return currentLeadership;
+ });
+ } else if (eventType.equals(LeadershipEvent.Type.CANDIDATES_CHANGED)) {
+ candidateBoard.compute(topic, (k, currentInfo) -> {
+ if (currentInfo == null || currentInfo.epoch() < leadershipUpdate.epoch()) {
+ updateAccepted.set(true);
+ return leadershipUpdate;
+ }
+ return currentInfo;
+ });
+ } else {
+ throw new IllegalStateException("Unknown event type.");
+ }
+
+ if (updateAccepted.get()) {
+ eventDispatcher.post(leadershipEvent);
+ }
+ }
+
+ private void rerunForLeadership(String path, CompletableFuture<Leadership> future) {
+ lockExecutor.schedule(
+ () -> doRunForLeadership(path, future),
+ RandomUtils.nextInt(WAIT_BEFORE_RETRY_MILLIS),
+ TimeUnit.MILLISECONDS);
+ }
+
+ private void retryWithdraw(String path, CompletableFuture<Void> future) {
+ lockExecutor.schedule(
+ () -> doWithdraw(path, future),
+ RandomUtils.nextInt(WAIT_BEFORE_RETRY_MILLIS),
+ TimeUnit.MILLISECONDS);
+ }
+
+ private void scheduleStaleLeadershipPurge(int afterDelaySec) {
+ if (staleLeadershipPurgeScheduled.compareAndSet(false, true)) {
+ staleLeadershipPurgeExecutor.schedule(
+ this::purgeStaleLeadership,
+ afterDelaySec,
+ TimeUnit.SECONDS);
+ }
+ }
+
+ /**
+ * Purges locks held by inactive nodes and evicts inactive nodes from candidacy.
+ */
+ private void purgeStaleLeadership() {
+ AtomicBoolean rerunPurge = new AtomicBoolean(false);
+ try {
+ staleLeadershipPurgeScheduled.set(false);
+ leaderMap.entrySet()
+ .stream()
+ .filter(e -> clusterService.getState(e.getValue().value()) == INACTIVE)
+ .forEach(entry -> {
+ String path = entry.getKey();
+ NodeId nodeId = entry.getValue().value();
+ try {
+ leaderMap.computeIf(path, nodeId::equals, (topic, leader) -> null);
+ } catch (Exception e) {
+ log.debug("Failed to purge stale lock held by {} for {}", nodeId, path, e);
+ rerunPurge.set(true);
+ }
+ });
+
+ candidateMap.entrySet()
+ .forEach(entry -> {
+ String path = entry.getKey();
+ Versioned<List<NodeId>> candidates = entry.getValue();
+ List<NodeId> candidatesList = candidates != null
+ ? candidates.value() : Collections.emptyList();
+ List<NodeId> activeCandidatesList =
+ candidatesList.stream()
+ .filter(n -> clusterService.getState(n) == ACTIVE)
+ .filter(n -> !localNodeId.equals(n) || activeTopics.contains(path))
+ .collect(Collectors.toList());
+ if (activeCandidatesList.size() < candidatesList.size()) {
+ Set<NodeId> removedCandidates =
+ Sets.difference(Sets.newHashSet(candidatesList),
+ Sets.newHashSet(activeCandidatesList));
+ try {
+ candidateMap.computeIf(path,
+ c -> c.stream()
+ .filter(n -> clusterService.getState(n) == INACTIVE)
+ .count() > 0,
+ (topic, c) -> c.stream()
+ .filter(n -> clusterService.getState(n) == ACTIVE)
+ .filter(n -> !localNodeId.equals(n) ||
+ activeTopics.contains(path))
+ .collect(Collectors.toList()));
+ } catch (Exception e) {
+ log.debug("Failed to evict inactive candidates {} from "
+ + "candidate list for {}", removedCandidates, path, e);
+ rerunPurge.set(true);
+ }
+ }
+ });
+ } catch (Exception e) {
+ log.debug("Failure purging state leadership.", e);
+ rerunPurge.set(true);
+ }
+
+ if (rerunPurge.get()) {
+ log.debug("Rescheduling stale leadership purge due to errors encountered in previous run");
+ scheduleStaleLeadershipPurge(DELAY_BETWEEN_STALE_LEADERSHIP_PURGE_ATTEMPTS_SEC);
+ }
+ }
+
+ private void refreshLeaderBoard() {
+ try {
+ Map<String, Leadership> newLeaderBoard = Maps.newHashMap();
+ leaderMap.entrySet().forEach(entry -> {
+ String path = entry.getKey();
+ Versioned<NodeId> leader = entry.getValue();
+ Leadership leadership = new Leadership(path,
+ leader.value(),
+ leader.version(),
+ leader.creationTime());
+ newLeaderBoard.put(path, leadership);
+ });
+
+ // first take snapshot of current leader board.
+ Map<String, Leadership> currentLeaderBoard = ImmutableMap.copyOf(leaderBoard);
+
+ MapDifference<String, Leadership> diff = Maps.difference(currentLeaderBoard, newLeaderBoard);
+
+ // evict stale leaders
+ diff.entriesOnlyOnLeft().forEach((path, leadership) -> {
+ log.debug("Evicting {} from leaderboard. It is no longer active leader.", leadership);
+ onLeadershipEvent(new LeadershipEvent(LeadershipEvent.Type.LEADER_BOOTED, leadership));
+ });
+
+ // add missing leaders
+ diff.entriesOnlyOnRight().forEach((path, leadership) -> {
+ log.debug("Adding {} to leaderboard. It is now the active leader.", leadership);
+ onLeadershipEvent(new LeadershipEvent(LeadershipEvent.Type.LEADER_ELECTED, leadership));
+ });
+
+ // add updated leaders
+ diff.entriesDiffering().forEach((path, difference) -> {
+ Leadership current = difference.leftValue();
+ Leadership updated = difference.rightValue();
+ if (current.epoch() < updated.epoch()) {
+ log.debug("Updated {} in leaderboard.", updated);
+ onLeadershipEvent(new LeadershipEvent(LeadershipEvent.Type.LEADER_ELECTED, updated));
+ }
+ });
+ } catch (Exception e) {
+ log.debug("Failed to refresh leader board", e);
+ }
+ }
+
+ private class InternalClusterEventListener implements ClusterEventListener {
+
+ @Override
+ public void event(ClusterEvent event) {
+ if (event.type() == Type.INSTANCE_DEACTIVATED || event.type() == Type.INSTANCE_REMOVED) {
+ scheduleStaleLeadershipPurge(0);
+ }
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Match.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Match.java
new file mode 100644
index 00000000..5f707d62
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Match.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+
+import java.util.Arrays;
+import java.util.Objects;
+import java.util.function.Function;
+
+/**
+ * Utility class for checking matching values.
+ *
+ * @param <T> type of value
+ */
+public final class Match<T> {
+
+ private final boolean matchAny;
+ private final T value;
+
+ /**
+ * Returns a Match that matches any value.
+ * @param <T> match type
+ * @return new instance
+ */
+ public static <T> Match<T> any() {
+ return new Match<>();
+ }
+
+ /**
+ * Returns a Match that matches null values.
+ * @param <T> match type
+ * @return new instance
+ */
+ public static <T> Match<T> ifNull() {
+ return ifValue(null);
+ }
+
+ /**
+ * Returns a Match that matches only specified value.
+ * @param value value to match
+ * @param <T> match type
+ * @return new instance
+ */
+ public static <T> Match<T> ifValue(T value) {
+ return new Match<>(value);
+ }
+
+ private Match() {
+ matchAny = true;
+ value = null;
+ }
+
+ private Match(T value) {
+ matchAny = false;
+ this.value = value;
+ }
+
+ /**
+ * Maps this instance to a Match of another type.
+ * @param mapper transformation function
+ * @param <V> new match type
+ * @return new instance
+ */
+ public <V> Match<V> map(Function<T, V> mapper) {
+ if (matchAny) {
+ return any();
+ } else if (value == null) {
+ return ifNull();
+ } else {
+ return ifValue(mapper.apply(value));
+ }
+ }
+
+ /**
+ * Checks if this instance matches specified value.
+ * @param other other value
+ * @return true if matches; false otherwise
+ */
+ public boolean matches(T other) {
+ if (matchAny) {
+ return true;
+ } else if (other == null) {
+ return value == null;
+ } else {
+ if (value instanceof byte[]) {
+ return Arrays.equals((byte[]) value, (byte[]) other);
+ }
+ return Objects.equals(value, other);
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(matchAny, value);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof Match)) {
+ return false;
+ }
+ Match<T> that = (Match<T>) other;
+ return Objects.equals(this.matchAny, that.matchAny) &&
+ Objects.equals(this.value, that.value);
+ }
+
+ @Override
+ public String toString() {
+ return toStringHelper(this)
+ .add("matchAny", matchAny)
+ .add("value", value)
+ .toString();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/MeteringAgent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/MeteringAgent.java
new file mode 100644
index 00000000..6475bf7b
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/MeteringAgent.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Timer;
+import com.google.common.collect.Maps;
+import org.onlab.metrics.MetricsComponent;
+import org.onlab.metrics.MetricsFeature;
+import org.onlab.metrics.MetricsService;
+import org.onlab.osgi.DefaultServiceDirectory;
+
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Agent that implements usage and performance monitoring via the metrics service.
+ */
+public class MeteringAgent {
+
+ private Counter exceptionCounter;
+ private Counter perObjExceptionCounter;
+ private MetricsService metricsService;
+ private MetricsComponent metricsComponent;
+ private MetricsFeature metricsFeature;
+ private final Map<String, Timer> perObjOpTimers = Maps.newConcurrentMap();
+ private final Map<String, Timer> perOpTimers = Maps.newConcurrentMap();
+ private Timer perPrimitiveTimer;
+ private Timer perObjTimer;
+ private MetricsFeature wildcard;
+ private final boolean activated;
+ private Context nullTimer;
+
+ /**
+ * Constructs a new MeteringAgent for a given distributed primitive.
+ * Instantiates the metrics service
+ * Initializes all the general metrics for that object
+ *
+ * @param primitiveName Type of primitive to be metered
+ * @param objName Global name of the primitive
+ * @param activated boolean flag for whether metering is enabled or not
+ */
+ public MeteringAgent(String primitiveName, String objName, boolean activated) {
+ checkNotNull(objName, "Object name cannot be null");
+ this.activated = activated;
+ nullTimer = new Context(null, "");
+ if (this.activated) {
+ this.metricsService = DefaultServiceDirectory.getService(MetricsService.class);
+ this.metricsComponent = metricsService.registerComponent(primitiveName);
+ this.metricsFeature = metricsComponent.registerFeature(objName);
+ this.wildcard = metricsComponent.registerFeature("*");
+ this.perObjTimer = metricsService.createTimer(metricsComponent, metricsFeature, "*");
+ this.perPrimitiveTimer = metricsService.createTimer(metricsComponent, wildcard, "*");
+ this.perObjExceptionCounter = metricsService.createCounter(metricsComponent, metricsFeature, "exceptions");
+ this.exceptionCounter = metricsService.createCounter(metricsComponent, wildcard, "exceptions");
+ }
+ }
+
+ /**
+ * Initializes a specific timer for a given operation.
+ *
+ * @param op Specific operation being metered
+ * @return timer context
+ */
+ public Context startTimer(String op) {
+ if (!activated) {
+ return nullTimer;
+ }
+ // Check if timer exists, if it doesn't creates it
+ final Timer currTimer = perObjOpTimers.computeIfAbsent(op, timer ->
+ metricsService.createTimer(metricsComponent, metricsFeature, op));
+ perOpTimers.computeIfAbsent(op, timer -> metricsService.createTimer(metricsComponent, wildcard, op));
+ // Starts timer
+ return new Context(currTimer.time(), op);
+ }
+
+ /**
+ * Timer.Context with a specific operation.
+ */
+ public class Context {
+ private final Timer.Context context;
+ private final String operation;
+
+ /**
+ * Constructs Context.
+ *
+ * @param context context
+ * @param operation operation name
+ */
+ public Context(Timer.Context context, String operation) {
+ this.context = context;
+ this.operation = operation;
+ }
+
+ /**
+ * Stops timer given a specific context and updates all related metrics.
+ * @param e throwable
+ */
+ public void stop(Throwable e) {
+ if (!activated) {
+ return;
+ }
+ if (e == null) {
+ //Stop and updates timer with specific measurements per map, per operation
+ final long time = context.stop();
+ //updates timer with aggregated measurements per map
+ perOpTimers.get(operation).update(time, TimeUnit.NANOSECONDS);
+ //updates timer with aggregated measurements per map
+ perObjTimer.update(time, TimeUnit.NANOSECONDS);
+ //updates timer with aggregated measurements per all Consistent Maps
+ perPrimitiveTimer.update(time, TimeUnit.NANOSECONDS);
+ } else {
+ exceptionCounter.inc();
+ perObjExceptionCounter.inc();
+ }
+ }
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/PartitionedDatabase.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/PartitionedDatabase.java
new file mode 100644
index 00000000..a294681e
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/PartitionedDatabase.java
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+
+import org.onosproject.store.service.DatabaseUpdate;
+import org.onosproject.store.service.Transaction;
+import org.onosproject.store.service.Versioned;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import net.kuujo.copycat.Task;
+import net.kuujo.copycat.cluster.Cluster;
+import net.kuujo.copycat.resource.ResourceState;
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * A database that partitions the keys across one or more database partitions.
+ */
+public class PartitionedDatabase implements Database {
+
+ private final String name;
+ private final Partitioner<String> partitioner;
+ private final List<Database> partitions;
+ private final AtomicBoolean isOpen = new AtomicBoolean(false);
+ private static final String DB_NOT_OPEN = "Partitioned Database is not open";
+ private TransactionManager transactionManager;
+
+ public PartitionedDatabase(
+ String name,
+ Collection<Database> partitions) {
+ this.name = name;
+ this.partitions = partitions
+ .stream()
+ .sorted((db1, db2) -> db1.name().compareTo(db2.name()))
+ .collect(Collectors.toList());
+ this.partitioner = new SimpleKeyHashPartitioner(this.partitions);
+ }
+
+ /**
+ * Returns the databases for individual partitions.
+ * @return list of database partitions
+ */
+ public List<Database> getPartitions() {
+ return partitions;
+ }
+
+ /**
+ * Returns true if the database is open.
+ * @return true if open, false otherwise
+ */
+ @Override
+ public boolean isOpen() {
+ return isOpen.get();
+ }
+
+ @Override
+ public CompletableFuture<Set<String>> maps() {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ Set<String> mapNames = Sets.newConcurrentHashSet();
+ return CompletableFuture.allOf(partitions
+ .stream()
+ .map(db -> db.maps().thenApply(mapNames::addAll))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> mapNames);
+ }
+
+ @Override
+ public CompletableFuture<Map<String, Long>> counters() {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ Map<String, Long> counters = Maps.newConcurrentMap();
+ return CompletableFuture.allOf(partitions
+ .stream()
+ .map(db -> db.counters()
+ .thenApply(m -> {
+ counters.putAll(m);
+ return null;
+ }))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> counters);
+ }
+
+ @Override
+ public CompletableFuture<Integer> mapSize(String mapName) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ AtomicInteger totalSize = new AtomicInteger(0);
+ return CompletableFuture.allOf(partitions
+ .stream()
+ .map(p -> p.mapSize(mapName).thenApply(totalSize::addAndGet))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> totalSize.get());
+ }
+
+ @Override
+ public CompletableFuture<Boolean> mapIsEmpty(String mapName) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return mapSize(mapName).thenApply(size -> size == 0);
+ }
+
+ @Override
+ public CompletableFuture<Boolean> mapContainsKey(String mapName, String key) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return partitioner.getPartition(mapName, key).mapContainsKey(mapName, key);
+ }
+
+ @Override
+ public CompletableFuture<Boolean> mapContainsValue(String mapName, byte[] value) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ AtomicBoolean containsValue = new AtomicBoolean(false);
+ return CompletableFuture.allOf(partitions
+ .stream()
+ .map(p -> p.mapContainsValue(mapName, value)
+ .thenApply(v -> containsValue.compareAndSet(false, v)))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> containsValue.get());
+ }
+
+ @Override
+ public CompletableFuture<Versioned<byte[]>> mapGet(String mapName, String key) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return partitioner.getPartition(mapName, key).mapGet(mapName, key);
+ }
+
+ @Override
+ public CompletableFuture<Result<UpdateResult<String, byte[]>>> mapUpdate(
+ String mapName, String key, Match<byte[]> valueMatch,
+ Match<Long> versionMatch, byte[] value) {
+ return partitioner.getPartition(mapName, key).mapUpdate(mapName, key, valueMatch, versionMatch, value);
+
+ }
+
+ @Override
+ public CompletableFuture<Result<Void>> mapClear(String mapName) {
+ AtomicBoolean isLocked = new AtomicBoolean(false);
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return CompletableFuture.allOf(partitions
+ .stream()
+ .map(p -> p.mapClear(mapName)
+ .thenApply(v -> isLocked.compareAndSet(false, Result.Status.LOCKED == v.status())))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> isLocked.get() ? Result.locked() : Result.ok(null));
+ }
+
+ @Override
+ public CompletableFuture<Set<String>> mapKeySet(String mapName) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ Set<String> keySet = Sets.newConcurrentHashSet();
+ return CompletableFuture.allOf(partitions
+ .stream()
+ .map(p -> p.mapKeySet(mapName).thenApply(keySet::addAll))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> keySet);
+ }
+
+ @Override
+ public CompletableFuture<Collection<Versioned<byte[]>>> mapValues(String mapName) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ List<Versioned<byte[]>> values = new CopyOnWriteArrayList<>();
+ return CompletableFuture.allOf(partitions
+ .stream()
+ .map(p -> p.mapValues(mapName).thenApply(values::addAll))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> values);
+ }
+
+ @Override
+ public CompletableFuture<Set<Entry<String, Versioned<byte[]>>>> mapEntrySet(String mapName) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ Set<Entry<String, Versioned<byte[]>>> entrySet = Sets.newConcurrentHashSet();
+ return CompletableFuture.allOf(partitions
+ .stream()
+ .map(p -> p.mapEntrySet(mapName).thenApply(entrySet::addAll))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> entrySet);
+ }
+
+ @Override
+ public CompletableFuture<Long> counterGet(String counterName) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return partitioner.getPartition(counterName, counterName).counterGet(counterName);
+ }
+
+ @Override
+ public CompletableFuture<Long> counterAddAndGet(String counterName, long delta) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return partitioner.getPartition(counterName, counterName).counterAddAndGet(counterName, delta);
+ }
+
+ @Override
+ public CompletableFuture<Long> counterGetAndAdd(String counterName, long delta) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return partitioner.getPartition(counterName, counterName).counterGetAndAdd(counterName, delta);
+ }
+
+
+ @Override
+ public CompletableFuture<Long> queueSize(String queueName) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return partitioner.getPartition(queueName, queueName).queueSize(queueName);
+ }
+
+ @Override
+ public CompletableFuture<Void> queuePush(String queueName, byte[] entry) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return partitioner.getPartition(queueName, queueName).queuePush(queueName, entry);
+ }
+
+ @Override
+ public CompletableFuture<byte[]> queuePop(String queueName) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return partitioner.getPartition(queueName, queueName).queuePop(queueName);
+ }
+
+ @Override
+ public CompletableFuture<byte[]> queuePeek(String queueName) {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return partitioner.getPartition(queueName, queueName).queuePeek(queueName);
+ }
+
+ @Override
+ public CompletableFuture<CommitResponse> prepareAndCommit(Transaction transaction) {
+ Map<Database, Transaction> subTransactions = createSubTransactions(transaction);
+ if (subTransactions.isEmpty()) {
+ return CompletableFuture.completedFuture(CommitResponse.success(ImmutableList.of()));
+ } else if (subTransactions.size() == 1) {
+ Entry<Database, Transaction> entry =
+ subTransactions.entrySet().iterator().next();
+ return entry.getKey().prepareAndCommit(entry.getValue());
+ } else {
+ if (transactionManager == null) {
+ throw new IllegalStateException("TransactionManager is not initialized");
+ }
+ return transactionManager.execute(transaction);
+ }
+ }
+
+ @Override
+ public CompletableFuture<Boolean> prepare(Transaction transaction) {
+ Map<Database, Transaction> subTransactions = createSubTransactions(transaction);
+ AtomicBoolean status = new AtomicBoolean(true);
+ return CompletableFuture.allOf(subTransactions.entrySet()
+ .stream()
+ .map(entry -> entry
+ .getKey()
+ .prepare(entry.getValue())
+ .thenApply(v -> status.compareAndSet(true, v)))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> status.get());
+ }
+
+ @Override
+ public CompletableFuture<CommitResponse> commit(Transaction transaction) {
+ Map<Database, Transaction> subTransactions = createSubTransactions(transaction);
+ AtomicBoolean success = new AtomicBoolean(true);
+ List<UpdateResult<String, byte[]>> allUpdates = Lists.newArrayList();
+ return CompletableFuture.allOf(subTransactions.entrySet()
+ .stream()
+ .map(entry -> entry.getKey().commit(entry.getValue())
+ .thenAccept(response -> {
+ success.set(success.get() && response.success());
+ if (success.get()) {
+ allUpdates.addAll(response.updates());
+ }
+ }))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> success.get() ?
+ CommitResponse.success(allUpdates) : CommitResponse.failure());
+ }
+
+ @Override
+ public CompletableFuture<Boolean> rollback(Transaction transaction) {
+ Map<Database, Transaction> subTransactions = createSubTransactions(transaction);
+ return CompletableFuture.allOf(subTransactions.entrySet()
+ .stream()
+ .map(entry -> entry.getKey().rollback(entry.getValue()))
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> true);
+ }
+
+ @Override
+ public CompletableFuture<Database> open() {
+ return CompletableFuture.allOf(partitions
+ .stream()
+ .map(Database::open)
+ .toArray(CompletableFuture[]::new))
+ .thenApply(v -> {
+ isOpen.set(true);
+ return this;
+ });
+ }
+
+ @Override
+ public CompletableFuture<Void> close() {
+ checkState(isOpen.get(), DB_NOT_OPEN);
+ return CompletableFuture.allOf(partitions
+ .stream()
+ .map(database -> database.close())
+ .toArray(CompletableFuture[]::new));
+ }
+
+ @Override
+ public boolean isClosed() {
+ return !isOpen.get();
+ }
+
+ @Override
+ public String name() {
+ return name;
+ }
+
+ @Override
+ public Cluster cluster() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Database addStartupTask(Task<CompletableFuture<Void>> task) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Database addShutdownTask(Task<CompletableFuture<Void>> task) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ResourceState state() {
+ throw new UnsupportedOperationException();
+ }
+
+ private Map<Database, Transaction> createSubTransactions(
+ Transaction transaction) {
+ Map<Database, List<DatabaseUpdate>> perPartitionUpdates = Maps.newHashMap();
+ for (DatabaseUpdate update : transaction.updates()) {
+ Database partition = partitioner.getPartition(update.mapName(), update.key());
+ List<DatabaseUpdate> partitionUpdates =
+ perPartitionUpdates.computeIfAbsent(partition, k -> Lists.newLinkedList());
+ partitionUpdates.add(update);
+ }
+ Map<Database, Transaction> subTransactions = Maps.newHashMap();
+ perPartitionUpdates.forEach((k, v) -> subTransactions.put(k, new DefaultTransaction(transaction.id(), v)));
+ return subTransactions;
+ }
+
+ protected void setTransactionManager(TransactionManager transactionManager) {
+ this.transactionManager = transactionManager;
+ }
+
+ @Override
+ public void registerConsumer(Consumer<StateMachineUpdate> consumer) {
+ partitions.forEach(p -> p.registerConsumer(consumer));
+ }
+
+ @Override
+ public void unregisterConsumer(Consumer<StateMachineUpdate> consumer) {
+ partitions.forEach(p -> p.unregisterConsumer(consumer));
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Partitioner.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Partitioner.java
new file mode 100644
index 00000000..de630b90
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Partitioner.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+/**
+ * Partitioner is responsible for mapping keys to individual database partitions.
+ *
+ * @param <K> key type.
+ */
+public interface Partitioner<K> {
+
+ /**
+ * Returns the database partition.
+ * @param mapName map name
+ * @param key key
+ * @return Database partition
+ */
+ Database getPartition(String mapName, K key);
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Result.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Result.java
new file mode 100644
index 00000000..856f706d
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/Result.java
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+
+import java.util.Objects;
+
+/**
+ * Result of a database update operation.
+ *
+ * @param <V> return value type
+ */
+public final class Result<V> {
+
+ public enum Status {
+ /**
+ * Indicates a successful update.
+ */
+ OK,
+
+ /**
+ * Indicates a failure due to underlying state being locked by another transaction.
+ */
+ LOCKED
+ }
+
+ private final Status status;
+ private final V value;
+
+ /**
+ * Creates a new Result instance with the specified value with status set to Status.OK.
+ *
+ * @param <V> result value type
+ * @param value result value
+ * @return Result instance
+ */
+ public static <V> Result<V> ok(V value) {
+ return new Result<>(value, Status.OK);
+ }
+
+ /**
+ * Creates a new Result instance with status set to Status.LOCKED.
+ *
+ * @param <V> result value type
+ * @return Result instance
+ */
+ public static <V> Result<V> locked() {
+ return new Result<>(null, Status.LOCKED);
+ }
+
+ private Result(V value, Status status) {
+ this.value = value;
+ this.status = status;
+ }
+
+ /**
+ * Returns true if this result indicates a successful execution i.e status is Status.OK.
+ *
+ * @return true if successful, false otherwise
+ */
+ public boolean success() {
+ return status == Status.OK;
+ }
+
+ /**
+ * Returns the status of database update operation.
+ *
+ * @return database update status
+ */
+ public Status status() {
+ return status;
+ }
+
+ /**
+ * Returns the return value for the update.
+ *
+ * @return value returned by database update. If the status is another
+ * other than Status.OK, this returns a null
+ */
+ public V value() {
+ return value;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(value, status);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof Result)) {
+ return false;
+ }
+ Result<V> that = (Result<V>) other;
+ return Objects.equals(this.value, that.value) &&
+ Objects.equals(this.status, that.status);
+ }
+
+ @Override
+ public String toString() {
+ return toStringHelper(this)
+ .add("status", status)
+ .add("value", value)
+ .toString();
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/SimpleKeyHashPartitioner.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/SimpleKeyHashPartitioner.java
new file mode 100644
index 00000000..40864286
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/SimpleKeyHashPartitioner.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import java.util.List;
+
+/**
+ * A simple Partitioner for mapping keys to database partitions.
+ * <p>
+ * This class uses a md5 hash based hashing scheme for hashing the key to
+ * a partition.
+ *
+ */
+public class SimpleKeyHashPartitioner extends DatabasePartitioner {
+
+ public SimpleKeyHashPartitioner(List<Database> partitions) {
+ super(partitions);
+ }
+
+ @Override
+ public Database getPartition(String mapName, String key) {
+ return partitions.get(hash(key) % partitions.size());
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/SimpleTableHashPartitioner.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/SimpleTableHashPartitioner.java
new file mode 100644
index 00000000..8dc26e0f
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/SimpleTableHashPartitioner.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.consistent.impl;
+
+import java.util.List;
+
+/**
+ * A simple Partitioner that uses the map name hash to
+ * pick a partition.
+ * <p>
+ * This class uses a md5 hash based hashing scheme for hashing the map name to
+ * a partition. This partitioner maps all keys for a map to the same database
+ * partition.
+ */
+public class SimpleTableHashPartitioner extends DatabasePartitioner {
+
+ public SimpleTableHashPartitioner(List<Database> partitions) {
+ super(partitions);
+ }
+
+ @Override
+ public Database getPartition(String mapName, String key) {
+ return partitions.get(hash(mapName) % partitions.size());
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/StateMachineUpdate.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/StateMachineUpdate.java
new file mode 100644
index 00000000..72356d0b
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/StateMachineUpdate.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+
+/**
+ * Representation of a state machine update.
+ */
+public class StateMachineUpdate {
+
+ /**
+ * Target data structure type this update is for.
+ */
+ enum Target {
+ /**
+ * Update is for a map.
+ */
+ MAP_UPDATE,
+
+ /**
+ * Update is a transaction commit.
+ */
+ TX_COMMIT,
+
+ /**
+ * Update is a queue push.
+ */
+ QUEUE_PUSH,
+
+ /**
+ * Update is for some other operation.
+ */
+ OTHER
+ }
+
+ private final String operationName;
+ private final Object input;
+ private final Object output;
+
+ public StateMachineUpdate(String operationName, Object input, Object output) {
+ this.operationName = operationName;
+ this.input = input;
+ this.output = output;
+ }
+
+ public Target target() {
+ // FIXME: This check is brittle
+ if (operationName.contains("mapUpdate")) {
+ return Target.MAP_UPDATE;
+ } else if (operationName.contains("commit") || operationName.contains("prepareAndCommit")) {
+ return Target.TX_COMMIT;
+ } else if (operationName.contains("queuePush")) {
+ return Target.QUEUE_PUSH;
+ } else {
+ return Target.OTHER;
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public <T> T input() {
+ return (T) input;
+ }
+
+ @SuppressWarnings("unchecked")
+ public <T> T output() {
+ return (T) output;
+ }
+
+ @Override
+ public String toString() {
+ return toStringHelper(this)
+ .add("name", operationName)
+ .add("input", input)
+ .add("output", output)
+ .toString();
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/TransactionManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/TransactionManager.java
new file mode 100644
index 00000000..fc6e58d0
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/TransactionManager.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.concurrent.CompletableFuture;
+import java.util.stream.Collectors;
+
+import org.onlab.util.KryoNamespace;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.AsyncConsistentMap;
+import org.onosproject.store.service.ConsistentMapBuilder;
+import org.onosproject.store.service.DatabaseUpdate;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.Transaction;
+import org.onosproject.store.service.Versioned;
+import org.onosproject.store.service.Transaction.State;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Agent that runs the two phase commit protocol.
+ */
+public class TransactionManager {
+
+ private static final KryoNamespace KRYO_NAMESPACE = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.BASIC)
+ .nextId(KryoNamespace.FLOATING_ID)
+ .register(Versioned.class)
+ .register(DatabaseUpdate.class)
+ .register(DatabaseUpdate.Type.class)
+ .register(DefaultTransaction.class)
+ .register(Transaction.State.class)
+ .build();
+
+ private final Serializer serializer = Serializer.using(Arrays.asList(KRYO_NAMESPACE));
+ private final Database database;
+ private final AsyncConsistentMap<Long, Transaction> transactions;
+
+ /**
+ * Constructs a new TransactionManager for the specified database instance.
+ *
+ * @param database database
+ * @param mapBuilder builder for ConsistentMap instances
+ */
+ public TransactionManager(Database database, ConsistentMapBuilder<Long, Transaction> mapBuilder) {
+ this.database = checkNotNull(database, "database cannot be null");
+ this.transactions = mapBuilder.withName("onos-transactions")
+ .withSerializer(serializer)
+ .buildAsyncMap();
+ }
+
+ /**
+ * Executes the specified transaction by employing a two phase commit protocol.
+ *
+ * @param transaction transaction to commit
+ * @return transaction result. Result value true indicates a successful commit, false
+ * indicates abort
+ */
+ public CompletableFuture<CommitResponse> execute(Transaction transaction) {
+ // clean up if this transaction in already in a terminal state.
+ if (transaction.state() == Transaction.State.COMMITTED ||
+ transaction.state() == Transaction.State.ROLLEDBACK) {
+ return transactions.remove(transaction.id()).thenApply(v -> CommitResponse.success(ImmutableList.of()));
+ } else if (transaction.state() == Transaction.State.COMMITTING) {
+ return commit(transaction);
+ } else if (transaction.state() == Transaction.State.ROLLINGBACK) {
+ return rollback(transaction).thenApply(v -> CommitResponse.success(ImmutableList.of()));
+ } else {
+ return prepare(transaction).thenCompose(v -> v ? commit(transaction) : rollback(transaction));
+ }
+ }
+
+
+ /**
+ * Returns all transactions in the system.
+ *
+ * @return future for a collection of transactions
+ */
+ public CompletableFuture<Collection<Transaction>> getTransactions() {
+ return transactions.values().thenApply(c -> {
+ Collection<Transaction> txns = c.stream().map(v -> v.value()).collect(Collectors.toList());
+ return txns;
+ });
+ }
+
+ private CompletableFuture<Boolean> prepare(Transaction transaction) {
+ return transactions.put(transaction.id(), transaction)
+ .thenCompose(v -> database.prepare(transaction))
+ .thenCompose(status -> transactions.put(
+ transaction.id(),
+ transaction.transition(status ? State.COMMITTING : State.ROLLINGBACK))
+ .thenApply(v -> status));
+ }
+
+ private CompletableFuture<CommitResponse> commit(Transaction transaction) {
+ return database.commit(transaction)
+ .whenComplete((r, e) -> transactions.put(
+ transaction.id(),
+ transaction.transition(Transaction.State.COMMITTED)));
+ }
+
+ private CompletableFuture<CommitResponse> rollback(Transaction transaction) {
+ return database.rollback(transaction)
+ .thenCompose(v -> transactions.put(
+ transaction.id(),
+ transaction.transition(Transaction.State.ROLLEDBACK)))
+ .thenApply(v -> CommitResponse.failure());
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/UpdateResult.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/UpdateResult.java
new file mode 100644
index 00000000..50b78dd4
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/UpdateResult.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.consistent.impl;
+
+import java.util.function.Function;
+
+import org.onosproject.store.service.MapEvent;
+import org.onosproject.store.service.Versioned;
+
+/**
+ * Result of a update operation.
+ * <p>
+ * Both old and new values are accessible along with a flag that indicates if the
+ * the value was updated. If flag is false, oldValue and newValue both
+ * point to the same unmodified value.
+ * @param <V> result type
+ */
+public class UpdateResult<K, V> {
+
+ private final boolean updated;
+ private final String mapName;
+ private final K key;
+ private final Versioned<V> oldValue;
+ private final Versioned<V> newValue;
+
+ public UpdateResult(boolean updated, String mapName, K key, Versioned<V> oldValue, Versioned<V> newValue) {
+ this.updated = updated;
+ this.mapName = mapName;
+ this.key = key;
+ this.oldValue = oldValue;
+ this.newValue = newValue;
+ }
+
+ public boolean updated() {
+ return updated;
+ }
+
+ public String mapName() {
+ return mapName;
+ }
+
+ public K key() {
+ return key;
+ }
+
+ public Versioned<V> oldValue() {
+ return oldValue;
+ }
+
+ public Versioned<V> newValue() {
+ return newValue;
+ }
+
+ public <K1, V1> UpdateResult<K1, V1> map(Function<K, K1> keyTransform, Function<V, V1> valueMapper) {
+ return new UpdateResult<>(updated,
+ mapName,
+ keyTransform.apply(key),
+ oldValue == null ? null : oldValue.map(valueMapper),
+ newValue == null ? null : newValue.map(valueMapper));
+ }
+
+ public MapEvent<K, V> toMapEvent() {
+ if (!updated) {
+ return null;
+ } else {
+ MapEvent.Type eventType = oldValue == null ?
+ MapEvent.Type.INSERT : newValue == null ? MapEvent.Type.REMOVE : MapEvent.Type.UPDATE;
+ Versioned<V> eventValue = eventType == MapEvent.Type.REMOVE ? oldValue : newValue;
+ return new MapEvent<>(mapName(), eventType, key(), eventValue);
+ }
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/package-info.java
new file mode 100644
index 00000000..3dae86b5
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/consistent/impl/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of partitioned and distributed store facility capable of
+ * providing consistent update semantics.
+ */
+package org.onosproject.store.consistent.impl; \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/AppIdEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/AppIdEvent.java
new file mode 100644
index 00000000..9f021b13
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/AppIdEvent.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.core.impl;
+
+import org.onosproject.core.ApplicationId;
+import org.onosproject.event.AbstractEvent;
+
+/**
+ * Application ID event.
+ */
+public class AppIdEvent extends AbstractEvent<AppIdEvent.Type, ApplicationId> {
+
+ public enum Type {
+ APP_REGISTERED
+ }
+
+ protected AppIdEvent(Type type, ApplicationId subject) {
+ super(type, subject);
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/AppIdStoreDelegate.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/AppIdStoreDelegate.java
new file mode 100644
index 00000000..6240a311
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/AppIdStoreDelegate.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.core.impl;
+
+import org.onosproject.store.StoreDelegate;
+
+/**
+ * Application ID store delegate.
+ */
+public interface AppIdStoreDelegate extends StoreDelegate<AppIdEvent> {
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/ConsistentApplicationIdStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/ConsistentApplicationIdStore.java
new file mode 100644
index 00000000..e54b0ee5
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/ConsistentApplicationIdStore.java
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.core.impl;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ScheduledExecutorService;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onlab.util.Tools;
+import org.onosproject.core.ApplicationId;
+import org.onosproject.core.ApplicationIdStore;
+import org.onosproject.core.DefaultApplicationId;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.AtomicCounter;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.StorageException;
+import org.onosproject.store.service.StorageService;
+import org.onosproject.store.service.Versioned;
+import org.slf4j.Logger;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+
+/**
+ * ApplicationIdStore implementation on top of {@code AtomicCounter}
+ * and {@code ConsistentMap} primitives.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class ConsistentApplicationIdStore implements ApplicationIdStore {
+
+ private final Logger log = getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ private AtomicCounter appIdCounter;
+ private ConsistentMap<String, ApplicationId> registeredIds;
+ private Map<String, ApplicationId> nameToAppIdCache = Maps.newConcurrentMap();
+ private Map<Short, ApplicationId> idToAppIdCache = Maps.newConcurrentMap();
+ private ScheduledExecutorService executor;
+
+ private static final Serializer SERIALIZER = Serializer.using(new KryoNamespace.Builder()
+ .register(KryoNamespaces.API)
+ .nextId(KryoNamespaces.BEGIN_USER_CUSTOM_ID)
+ .build());
+
+ @Activate
+ public void activate() {
+ appIdCounter = storageService.atomicCounterBuilder()
+ .withName("onos-app-id-counter")
+ .withPartitionsDisabled()
+ .build();
+
+ registeredIds = storageService.<String, ApplicationId>consistentMapBuilder()
+ .withName("onos-app-ids")
+ .withPartitionsDisabled()
+ .withSerializer(SERIALIZER)
+ .build();
+
+ primeAppIds();
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ executor.shutdown();
+ log.info("Stopped");
+ }
+
+ @Override
+ public Set<ApplicationId> getAppIds() {
+ // TODO: Rework this when we have notification support in ConsistentMap.
+ primeAppIds();
+ return ImmutableSet.copyOf(nameToAppIdCache.values());
+ }
+
+ @Override
+ public ApplicationId getAppId(Short id) {
+ if (!idToAppIdCache.containsKey(id)) {
+ primeAppIds();
+ }
+ return idToAppIdCache.get(id);
+ }
+
+ @Override
+ public ApplicationId getAppId(String name) {
+ ApplicationId appId = nameToAppIdCache.computeIfAbsent(name, key -> {
+ Versioned<ApplicationId> existingAppId = registeredIds.get(key);
+ return existingAppId != null ? existingAppId.value() : null;
+ });
+ if (appId != null) {
+ idToAppIdCache.putIfAbsent(appId.id(), appId);
+ }
+ return appId;
+ }
+
+ @Override
+ public ApplicationId registerApplication(String name) {
+ ApplicationId appId = nameToAppIdCache.computeIfAbsent(name, key -> {
+ Versioned<ApplicationId> existingAppId = registeredIds.get(name);
+ if (existingAppId == null) {
+ int id = Tools.retryable(appIdCounter::incrementAndGet, StorageException.class, 1, 2000)
+ .get()
+ .intValue();
+ DefaultApplicationId newAppId = new DefaultApplicationId(id, name);
+ existingAppId = registeredIds.putIfAbsent(name, newAppId);
+ if (existingAppId != null) {
+ return existingAppId.value();
+ } else {
+ return newAppId;
+ }
+ } else {
+ return existingAppId.value();
+ }
+ });
+ idToAppIdCache.putIfAbsent(appId.id(), appId);
+ return appId;
+ }
+
+ private void primeAppIds() {
+ registeredIds.values()
+ .stream()
+ .map(Versioned::value)
+ .forEach(appId -> {
+ nameToAppIdCache.putIfAbsent(appId.name(), appId);
+ idToAppIdCache.putIfAbsent(appId.id(), appId);
+ });
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/ConsistentIdBlockStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/ConsistentIdBlockStore.java
new file mode 100644
index 00000000..8913742d
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/ConsistentIdBlockStore.java
@@ -0,0 +1,64 @@
+package org.onosproject.store.core.impl;
+
+import com.google.common.collect.Maps;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.Tools;
+import org.onosproject.core.IdBlock;
+import org.onosproject.core.IdBlockStore;
+import org.onosproject.store.service.AtomicCounter;
+import org.onosproject.store.service.StorageException;
+import org.onosproject.store.service.StorageService;
+import org.slf4j.Logger;
+
+import java.util.Map;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Implementation of {@code IdBlockStore} using {@code AtomicCounter}.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class ConsistentIdBlockStore implements IdBlockStore {
+
+ private static final int MAX_TRIES = 5;
+ private static final int RETRY_DELAY_MS = 2_000;
+
+ private final Logger log = getLogger(getClass());
+ private final Map<String, AtomicCounter> topicCounters = Maps.newConcurrentMap();
+
+ private static final long DEFAULT_BLOCK_SIZE = 0x100000L;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Activate
+ public void activate() {
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ @Override
+ public IdBlock getIdBlock(String topic) {
+ AtomicCounter counter = topicCounters
+ .computeIfAbsent(topic,
+ name -> storageService.atomicCounterBuilder()
+ .withName(name)
+ .build());
+ Long blockBase = Tools.retryable(counter::getAndAdd,
+ StorageException.class,
+ MAX_TRIES,
+ RETRY_DELAY_MS).apply(DEFAULT_BLOCK_SIZE);
+ return new IdBlock(blockBase, DEFAULT_BLOCK_SIZE);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/LogicalClockManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/LogicalClockManager.java
new file mode 100644
index 00000000..ccf0f326
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/LogicalClockManager.java
@@ -0,0 +1,51 @@
+package org.onosproject.store.core.impl;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onosproject.store.Timestamp;
+import org.onosproject.store.impl.LogicalTimestamp;
+import org.onosproject.store.service.AtomicCounter;
+import org.onosproject.store.service.LogicalClockService;
+import org.onosproject.store.service.StorageService;
+import org.slf4j.Logger;
+
+/**
+ * LogicalClockService implementation based on a AtomicCounter.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class LogicalClockManager implements LogicalClockService {
+
+ private final Logger log = getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ private static final String SYSTEM_LOGICAL_CLOCK_COUNTER_NAME = "sys-clock-counter";
+ private AtomicCounter atomicCounter;
+
+ @Activate
+ public void activate() {
+ atomicCounter = storageService.atomicCounterBuilder()
+ .withName(SYSTEM_LOGICAL_CLOCK_COUNTER_NAME)
+ .withPartitionsDisabled()
+ .build();
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ @Override
+ public Timestamp getTimestamp() {
+ return new LogicalTimestamp(atomicCounter.incrementAndGet());
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/package-info.java
new file mode 100644
index 00000000..bb758b10
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/core/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of a distributed application ID registry store using Hazelcast.
+ */
+package org.onosproject.store.core.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceAntiEntropyAdvertisement.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceAntiEntropyAdvertisement.java
new file mode 100644
index 00000000..491d1334
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceAntiEntropyAdvertisement.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Map;
+
+import org.onosproject.cluster.NodeId;
+import org.onosproject.net.DeviceId;
+import org.onosproject.store.Timestamp;
+
+
+/**
+ * Device Advertisement message.
+ */
+public class DeviceAntiEntropyAdvertisement {
+
+ private final NodeId sender;
+ private final Map<DeviceFragmentId, Timestamp> deviceFingerPrints;
+ private final Map<PortFragmentId, Timestamp> portFingerPrints;
+ private final Map<DeviceId, Timestamp> offline;
+
+
+ public DeviceAntiEntropyAdvertisement(NodeId sender,
+ Map<DeviceFragmentId, Timestamp> devices,
+ Map<PortFragmentId, Timestamp> ports,
+ Map<DeviceId, Timestamp> offline) {
+ this.sender = checkNotNull(sender);
+ this.deviceFingerPrints = checkNotNull(devices);
+ this.portFingerPrints = checkNotNull(ports);
+ this.offline = checkNotNull(offline);
+ }
+
+ public NodeId sender() {
+ return sender;
+ }
+
+ public Map<DeviceFragmentId, Timestamp> deviceFingerPrints() {
+ return deviceFingerPrints;
+ }
+
+ public Map<PortFragmentId, Timestamp> ports() {
+ return portFingerPrints;
+ }
+
+ public Map<DeviceId, Timestamp> offline() {
+ return offline;
+ }
+
+ // For serializer
+ @SuppressWarnings("unused")
+ private DeviceAntiEntropyAdvertisement() {
+ this.sender = null;
+ this.deviceFingerPrints = null;
+ this.portFingerPrints = null;
+ this.offline = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceAntiEntropyRequest.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceAntiEntropyRequest.java
new file mode 100644
index 00000000..a719a770
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceAntiEntropyRequest.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Collection;
+
+import org.onosproject.cluster.NodeId;
+
+/**
+ * Message to request for other peers information.
+ */
+public class DeviceAntiEntropyRequest {
+
+ private final NodeId sender;
+ private final Collection<DeviceFragmentId> devices;
+ private final Collection<PortFragmentId> ports;
+
+ public DeviceAntiEntropyRequest(NodeId sender,
+ Collection<DeviceFragmentId> devices,
+ Collection<PortFragmentId> ports) {
+
+ this.sender = checkNotNull(sender);
+ this.devices = checkNotNull(devices);
+ this.ports = checkNotNull(ports);
+ }
+
+ public NodeId sender() {
+ return sender;
+ }
+
+ public Collection<DeviceFragmentId> devices() {
+ return devices;
+ }
+
+ public Collection<PortFragmentId> ports() {
+ return ports;
+ }
+
+ // For serializer
+ @SuppressWarnings("unused")
+ private DeviceAntiEntropyRequest() {
+ this.sender = null;
+ this.devices = null;
+ this.ports = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceClockManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceClockManager.java
new file mode 100644
index 00000000..da5bd5de
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceClockManager.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.mastership.MastershipTerm;
+import org.onosproject.mastership.MastershipTermService;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.device.DeviceClockService;
+import org.onosproject.store.Timestamp;
+import org.onosproject.store.impl.MastershipBasedTimestamp;
+import org.slf4j.Logger;
+
+/**
+ * Clock service to issue Timestamp based on Device Mastership.
+ */
+@Component(immediate = true)
+@Service
+public class DeviceClockManager implements DeviceClockService {
+
+ private final Logger log = getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipTermService mastershipTermService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ protected NodeId localNodeId;
+
+ private final AtomicLong ticker = new AtomicLong(0);
+
+ @Activate
+ public void activate() {
+ localNodeId = clusterService.getLocalNode().id();
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ @Override
+ public Timestamp getTimestamp(DeviceId deviceId) {
+ MastershipTerm term = mastershipTermService.getMastershipTerm(deviceId);
+ if (term == null || !localNodeId.equals(term.master())) {
+ throw new IllegalStateException("Requesting timestamp for " + deviceId + " without mastership");
+ }
+ return new MastershipBasedTimestamp(term.termNumber(), ticker.incrementAndGet());
+ }
+
+ @Override
+ public boolean isTimestampAvailable(DeviceId deviceId) {
+ MastershipTerm term = mastershipTermService.getMastershipTerm(deviceId);
+ return term != null && localNodeId.equals(term.master());
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceDescriptions.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceDescriptions.java
new file mode 100644
index 00000000..fd7fcd80
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceDescriptions.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.onosproject.net.DefaultAnnotations.union;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.onosproject.net.PortNumber;
+import org.onosproject.net.SparseAnnotations;
+import org.onosproject.net.device.DefaultDeviceDescription;
+import org.onosproject.net.device.DefaultPortDescription;
+import org.onosproject.net.device.DeviceDescription;
+import org.onosproject.net.device.OchPortDescription;
+import org.onosproject.net.device.OduCltPortDescription;
+import org.onosproject.net.device.OmsPortDescription;
+import org.onosproject.net.device.PortDescription;
+import org.onosproject.store.Timestamp;
+import org.onosproject.store.impl.Timestamped;
+
+/*
+ * Collection of Description of a Device and Ports, given from a Provider.
+ */
+class DeviceDescriptions {
+
+ private volatile Timestamped<DeviceDescription> deviceDesc;
+
+ private final ConcurrentMap<PortNumber, Timestamped<PortDescription>> portDescs;
+
+ public DeviceDescriptions(Timestamped<DeviceDescription> desc) {
+ this.deviceDesc = checkNotNull(desc);
+ this.portDescs = new ConcurrentHashMap<>();
+ }
+
+ public Timestamp getLatestTimestamp() {
+ Timestamp latest = deviceDesc.timestamp();
+ for (Timestamped<PortDescription> desc : portDescs.values()) {
+ if (desc.timestamp().compareTo(latest) > 0) {
+ latest = desc.timestamp();
+ }
+ }
+ return latest;
+ }
+
+ public Timestamped<DeviceDescription> getDeviceDesc() {
+ return deviceDesc;
+ }
+
+ public Timestamped<PortDescription> getPortDesc(PortNumber number) {
+ return portDescs.get(number);
+ }
+
+ public Map<PortNumber, Timestamped<PortDescription>> getPortDescs() {
+ return Collections.unmodifiableMap(portDescs);
+ }
+
+ /**
+ * Puts DeviceDescription, merging annotations as necessary.
+ *
+ * @param newDesc new DeviceDescription
+ */
+ public void putDeviceDesc(Timestamped<DeviceDescription> newDesc) {
+ Timestamped<DeviceDescription> oldOne = deviceDesc;
+ Timestamped<DeviceDescription> newOne = newDesc;
+ if (oldOne != null) {
+ SparseAnnotations merged = union(oldOne.value().annotations(),
+ newDesc.value().annotations());
+ newOne = new Timestamped<DeviceDescription>(
+ new DefaultDeviceDescription(newDesc.value(), merged),
+ newDesc.timestamp());
+ }
+ deviceDesc = newOne;
+ }
+
+ /**
+ * Puts PortDescription, merging annotations as necessary.
+ *
+ * @param newDesc new PortDescription
+ */
+ public void putPortDesc(Timestamped<PortDescription> newDesc) {
+ Timestamped<PortDescription> oldOne = portDescs.get(newDesc.value().portNumber());
+ Timestamped<PortDescription> newOne = newDesc;
+ if (oldOne != null) {
+ SparseAnnotations merged = union(oldOne.value().annotations(),
+ newDesc.value().annotations());
+ newOne = null;
+ switch (newDesc.value().type()) {
+ case OMS:
+ OmsPortDescription omsDesc = (OmsPortDescription) (newDesc.value());
+ newOne = new Timestamped<PortDescription>(
+ new OmsPortDescription(
+ omsDesc, omsDesc.minFrequency(), omsDesc.maxFrequency(), omsDesc.grid(), merged),
+ newDesc.timestamp());
+ break;
+ case OCH:
+ OchPortDescription ochDesc = (OchPortDescription) (newDesc.value());
+ newOne = new Timestamped<PortDescription>(
+ new OchPortDescription(
+ ochDesc, ochDesc.signalType(), ochDesc.isTunable(), ochDesc.lambda(), merged),
+ newDesc.timestamp());
+ break;
+ case ODUCLT:
+ OduCltPortDescription ocDesc = (OduCltPortDescription) (newDesc.value());
+ newOne = new Timestamped<PortDescription>(
+ new OduCltPortDescription(
+ ocDesc, ocDesc.signalType(), merged),
+ newDesc.timestamp());
+ break;
+ default:
+ newOne = new Timestamped<PortDescription>(
+ new DefaultPortDescription(newDesc.value(), merged),
+ newDesc.timestamp());
+ }
+ }
+ portDescs.put(newOne.value().portNumber(), newOne);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceFragmentId.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceFragmentId.java
new file mode 100644
index 00000000..214f4c23
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceFragmentId.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import java.util.Objects;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.provider.ProviderId;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Identifier for DeviceDesctiption from a Provider.
+ */
+public final class DeviceFragmentId {
+ public final ProviderId providerId;
+ public final DeviceId deviceId;
+
+ public DeviceFragmentId(DeviceId deviceId, ProviderId providerId) {
+ this.providerId = providerId;
+ this.deviceId = deviceId;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(providerId, deviceId);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof DeviceFragmentId)) {
+ return false;
+ }
+ DeviceFragmentId that = (DeviceFragmentId) obj;
+ return Objects.equals(this.deviceId, that.deviceId) &&
+ Objects.equals(this.providerId, that.providerId);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("deviceId", deviceId)
+ .toString();
+ }
+
+ // for serializer
+ @SuppressWarnings("unused")
+ private DeviceFragmentId() {
+ this.providerId = null;
+ this.deviceId = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceInjectedEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceInjectedEvent.java
new file mode 100644
index 00000000..6f93963a
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceInjectedEvent.java
@@ -0,0 +1,49 @@
+package org.onosproject.store.device.impl;
+
+import com.google.common.base.MoreObjects;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.device.DeviceDescription;
+import org.onosproject.net.provider.ProviderId;
+
+public class DeviceInjectedEvent {
+ private final ProviderId providerId;
+ private final DeviceId deviceId;
+ private final DeviceDescription deviceDescription;
+
+ protected DeviceInjectedEvent(
+ ProviderId providerId,
+ DeviceId deviceId,
+ DeviceDescription deviceDescription) {
+ this.providerId = providerId;
+ this.deviceId = deviceId;
+ this.deviceDescription = deviceDescription;
+ }
+
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ public DeviceDescription deviceDescription() {
+ return deviceDescription;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("deviceId", deviceId)
+ .add("deviceDescription", deviceDescription)
+ .toString();
+ }
+
+ // for serializer
+ protected DeviceInjectedEvent() {
+ this.providerId = null;
+ this.deviceId = null;
+ this.deviceDescription = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceKey.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceKey.java
new file mode 100644
index 00000000..0896bf18
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/DeviceKey.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import java.util.Objects;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.provider.ProviderId;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Key for DeviceDescriptions in ECDeviceStore.
+ */
+public class DeviceKey {
+ private final ProviderId providerId;
+ private final DeviceId deviceId;
+
+ public DeviceKey(ProviderId providerId, DeviceId deviceId) {
+ this.providerId = providerId;
+ this.deviceId = deviceId;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(providerId, deviceId);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof DeviceKey)) {
+ return false;
+ }
+ DeviceKey that = (DeviceKey) obj;
+ return Objects.equals(this.deviceId, that.deviceId) &&
+ Objects.equals(this.providerId, that.providerId);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("deviceId", deviceId)
+ .toString();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/ECDeviceStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/ECDeviceStore.java
new file mode 100644
index 00000000..2dae55bb
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/ECDeviceStore.java
@@ -0,0 +1,784 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Verify.verify;
+import static org.onosproject.net.DefaultAnnotations.merge;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.packet.ChassisId;
+import org.onlab.util.KryoNamespace;
+import org.onlab.util.SharedExecutors;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.mastership.MastershipTermService;
+import org.onosproject.net.Annotations;
+import org.onosproject.net.AnnotationsUtil;
+import org.onosproject.net.DefaultAnnotations;
+import org.onosproject.net.DefaultDevice;
+import org.onosproject.net.DefaultPort;
+import org.onosproject.net.Device;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.MastershipRole;
+import org.onosproject.net.OchPort;
+import org.onosproject.net.OduCltPort;
+import org.onosproject.net.OmsPort;
+import org.onosproject.net.Port;
+import org.onosproject.net.PortNumber;
+import org.onosproject.net.Device.Type;
+import org.onosproject.net.device.DefaultPortStatistics;
+import org.onosproject.net.device.DeviceClockService;
+import org.onosproject.net.device.DeviceDescription;
+import org.onosproject.net.device.DeviceEvent;
+import org.onosproject.net.device.DeviceStore;
+import org.onosproject.net.device.DeviceStoreDelegate;
+import org.onosproject.net.device.OchPortDescription;
+import org.onosproject.net.device.OduCltPortDescription;
+import org.onosproject.net.device.OmsPortDescription;
+import org.onosproject.net.device.PortDescription;
+import org.onosproject.net.device.PortStatistics;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.impl.MastershipBasedTimestamp;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.onosproject.store.serializers.custom.DistributedStoreSerializers;
+import org.onosproject.store.service.DistributedSet;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapEvent;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.SetEvent;
+import org.onosproject.store.service.SetEventListener;
+import org.onosproject.store.service.WallClockTimestamp;
+
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.PUT;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.REMOVE;
+
+import org.onosproject.store.service.EventuallyConsistentMapListener;
+import org.onosproject.store.service.StorageService;
+import org.slf4j.Logger;
+
+import static org.onosproject.net.device.DeviceEvent.Type.*;
+import static org.onosproject.store.device.impl.GossipDeviceStoreMessageSubjects.DEVICE_INJECTED;
+import static org.onosproject.store.device.impl.GossipDeviceStoreMessageSubjects.DEVICE_REMOVE_REQ;
+import static org.onosproject.store.device.impl.GossipDeviceStoreMessageSubjects.PORT_INJECTED;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Futures;
+
+/**
+ * Manages the inventory of devices using a {@code EventuallyConsistentMap}.
+ */
+@Component(immediate = true, enabled = false)
+@Service
+public class ECDeviceStore
+ extends AbstractStore<DeviceEvent, DeviceStoreDelegate>
+ implements DeviceStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private static final String DEVICE_NOT_FOUND = "Device with ID %s not found";
+
+ private final Map<DeviceId, Device> devices = Maps.newConcurrentMap();
+ private final Map<DeviceId, Map<PortNumber, Port>> devicePorts = Maps.newConcurrentMap();
+ Set<DeviceId> pendingAvailableChangeUpdates = Sets.newConcurrentHashSet();
+
+ private EventuallyConsistentMap<DeviceKey, DeviceDescription> deviceDescriptions;
+ private EventuallyConsistentMap<PortKey, PortDescription> portDescriptions;
+ private EventuallyConsistentMap<DeviceId, Map<PortNumber, PortStatistics>> devicePortStats;
+ private EventuallyConsistentMap<DeviceId, Map<PortNumber, PortStatistics>> devicePortDeltaStats;
+
+ private DistributedSet<DeviceId> availableDevices;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipTermService mastershipTermService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected DeviceClockService deviceClockService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ private NodeId localNodeId;
+ private EventuallyConsistentMapListener<DeviceKey, DeviceDescription> deviceUpdateListener =
+ new InternalDeviceChangeEventListener();
+ private EventuallyConsistentMapListener<PortKey, PortDescription> portUpdateListener =
+ new InternalPortChangeEventListener();
+ private final EventuallyConsistentMapListener<DeviceId, Map<PortNumber, PortStatistics>> portStatsListener =
+ new InternalPortStatsListener();
+ private final SetEventListener<DeviceId> deviceStatusTracker =
+ new InternalDeviceStatusTracker();
+
+ protected static final KryoSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(DistributedStoreSerializers.STORE_COMMON)
+ .nextId(DistributedStoreSerializers.STORE_CUSTOM_BEGIN)
+ .register(DeviceInjectedEvent.class)
+ .register(PortInjectedEvent.class)
+ .build();
+ }
+ };
+
+ protected static final KryoNamespace.Builder SERIALIZER_BUILDER = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .register(DeviceKey.class)
+ .register(PortKey.class)
+ .register(DeviceKey.class)
+ .register(PortKey.class)
+ .register(MastershipBasedTimestamp.class);
+
+ @Activate
+ public void activate() {
+ localNodeId = clusterService.getLocalNode().id();
+
+ deviceDescriptions = storageService.<DeviceKey, DeviceDescription>eventuallyConsistentMapBuilder()
+ .withName("onos-device-descriptions")
+ .withSerializer(SERIALIZER_BUILDER)
+ .withTimestampProvider((k, v) -> {
+ try {
+ return deviceClockService.getTimestamp(k.deviceId());
+ } catch (IllegalStateException e) {
+ return null;
+ }
+ }).build();
+
+ portDescriptions = storageService.<PortKey, PortDescription>eventuallyConsistentMapBuilder()
+ .withName("onos-port-descriptions")
+ .withSerializer(SERIALIZER_BUILDER)
+ .withTimestampProvider((k, v) -> {
+ try {
+ return deviceClockService.getTimestamp(k.deviceId());
+ } catch (IllegalStateException e) {
+ return null;
+ }
+ }).build();
+
+ devicePortStats = storageService.<DeviceId, Map<PortNumber, PortStatistics>>eventuallyConsistentMapBuilder()
+ .withName("onos-port-stats")
+ .withSerializer(SERIALIZER_BUILDER)
+ .withAntiEntropyPeriod(5, TimeUnit.SECONDS)
+ .withTimestampProvider((k, v) -> new WallClockTimestamp())
+ .withTombstonesDisabled()
+ .build();
+
+ devicePortDeltaStats = storageService.<DeviceId, Map<PortNumber, PortStatistics>>
+ eventuallyConsistentMapBuilder()
+ .withName("onos-port-stats-delta")
+ .withSerializer(SERIALIZER_BUILDER)
+ .withAntiEntropyPeriod(5, TimeUnit.SECONDS)
+ .withTimestampProvider((k, v) -> new WallClockTimestamp())
+ .withTombstonesDisabled()
+ .build();
+
+ clusterCommunicator.addSubscriber(DEVICE_INJECTED,
+ SERIALIZER::decode,
+ this::injectDevice,
+ SERIALIZER::encode,
+ SharedExecutors.getPoolThreadExecutor());
+
+ clusterCommunicator.addSubscriber(PORT_INJECTED,
+ SERIALIZER::decode,
+ this::injectPort,
+ SERIALIZER::encode,
+ SharedExecutors.getPoolThreadExecutor());
+
+ availableDevices = storageService.<DeviceId>setBuilder()
+ .withName("onos-online-devices")
+ .withSerializer(Serializer.using(KryoNamespaces.API))
+ .withPartitionsDisabled()
+ .withRelaxedReadConsistency()
+ .build();
+
+ deviceDescriptions.addListener(deviceUpdateListener);
+ portDescriptions.addListener(portUpdateListener);
+ devicePortStats.addListener(portStatsListener);
+ availableDevices.addListener(deviceStatusTracker);
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ devicePortStats.removeListener(portStatsListener);
+ deviceDescriptions.removeListener(deviceUpdateListener);
+ portDescriptions.removeListener(portUpdateListener);
+ availableDevices.removeListener(deviceStatusTracker);
+ devicePortStats.destroy();
+ devicePortDeltaStats.destroy();
+ deviceDescriptions.destroy();
+ portDescriptions.destroy();
+ devices.clear();
+ devicePorts.clear();
+ clusterCommunicator.removeSubscriber(DEVICE_INJECTED);
+ clusterCommunicator.removeSubscriber(PORT_INJECTED);
+ log.info("Stopped");
+ }
+
+ @Override
+ public Iterable<Device> getDevices() {
+ return devices.values();
+ }
+
+ @Override
+ public int getDeviceCount() {
+ return devices.size();
+ }
+
+ @Override
+ public Device getDevice(DeviceId deviceId) {
+ return devices.get(deviceId);
+ }
+
+ @Override
+ public DeviceEvent createOrUpdateDevice(ProviderId providerId,
+ DeviceId deviceId,
+ DeviceDescription deviceDescription) {
+ NodeId master = mastershipService.getMasterFor(deviceId);
+ if (localNodeId.equals(master)) {
+ deviceDescriptions.put(new DeviceKey(providerId, deviceId), deviceDescription);
+ return refreshDeviceCache(providerId, deviceId);
+ } else {
+ DeviceInjectedEvent deviceInjectedEvent = new DeviceInjectedEvent(providerId, deviceId, deviceDescription);
+ return Futures.getUnchecked(
+ clusterCommunicator.sendAndReceive(deviceInjectedEvent,
+ DEVICE_INJECTED,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ master));
+ }
+ }
+
+ private DeviceEvent refreshDeviceCache(ProviderId providerId, DeviceId deviceId) {
+ AtomicReference<DeviceEvent.Type> eventType = new AtomicReference<>();
+ Device device = devices.compute(deviceId, (k, existingDevice) -> {
+ Device newDevice = composeDevice(deviceId);
+ if (existingDevice == null) {
+ eventType.set(DEVICE_ADDED);
+ } else {
+ // We allow only certain attributes to trigger update
+ boolean propertiesChanged =
+ !Objects.equals(existingDevice.hwVersion(), newDevice.hwVersion()) ||
+ !Objects.equals(existingDevice.swVersion(), newDevice.swVersion()) ||
+ !Objects.equals(existingDevice.providerId(), newDevice.providerId());
+ boolean annotationsChanged =
+ !AnnotationsUtil.isEqual(existingDevice.annotations(), newDevice.annotations());
+
+ // Primary providers can respond to all changes, but ancillary ones
+ // should respond only to annotation changes.
+ if ((providerId.isAncillary() && annotationsChanged) ||
+ (!providerId.isAncillary() && (propertiesChanged || annotationsChanged))) {
+ boolean replaced = devices.replace(deviceId, existingDevice, newDevice);
+ verify(replaced, "Replacing devices cache failed. PID:%s [expected:%s, found:%s, new=%s]",
+ providerId, existingDevice, devices.get(deviceId), newDevice);
+ eventType.set(DEVICE_UPDATED);
+ }
+ }
+ return newDevice;
+ });
+ if (eventType.get() != null && !providerId.isAncillary()) {
+ markOnline(deviceId);
+ }
+ return eventType.get() != null ? new DeviceEvent(eventType.get(), device) : null;
+ }
+
+ /**
+ * Returns the primary providerId for a device.
+ * @param deviceId device identifier
+ * @return primary providerId
+ */
+ private Set<ProviderId> getAllProviders(DeviceId deviceId) {
+ return deviceDescriptions.keySet()
+ .stream()
+ .filter(deviceKey -> deviceKey.deviceId().equals(deviceId))
+ .map(deviceKey -> deviceKey.providerId())
+ .collect(Collectors.toSet());
+ }
+
+ /**
+ * Returns the identifier for all providers for a device.
+ * @param deviceId device identifier
+ * @return set of provider identifiers
+ */
+ private ProviderId getPrimaryProviderId(DeviceId deviceId) {
+ Set<ProviderId> allProviderIds = getAllProviders(deviceId);
+ return allProviderIds.stream()
+ .filter(p -> !p.isAncillary())
+ .findFirst()
+ .orElse(Iterables.getFirst(allProviderIds, null));
+ }
+
+ /**
+ * Returns a Device, merging descriptions from multiple Providers.
+ *
+ * @param deviceId device identifier
+ * @return Device instance
+ */
+ private Device composeDevice(DeviceId deviceId) {
+
+ ProviderId primaryProviderId = getPrimaryProviderId(deviceId);
+ DeviceDescription primaryDeviceDescription =
+ deviceDescriptions.get(new DeviceKey(primaryProviderId, deviceId));
+
+ Type type = primaryDeviceDescription.type();
+ String manufacturer = primaryDeviceDescription.manufacturer();
+ String hwVersion = primaryDeviceDescription.hwVersion();
+ String swVersion = primaryDeviceDescription.swVersion();
+ String serialNumber = primaryDeviceDescription.serialNumber();
+ ChassisId chassisId = primaryDeviceDescription.chassisId();
+ DefaultAnnotations annotations = mergeAnnotations(deviceId);
+
+ return new DefaultDevice(primaryProviderId, deviceId, type, manufacturer,
+ hwVersion, swVersion, serialNumber,
+ chassisId, annotations);
+ }
+
+ private DeviceEvent purgeDeviceCache(DeviceId deviceId) {
+ Device removedDevice = devices.remove(deviceId);
+ if (removedDevice != null) {
+ getAllProviders(deviceId).forEach(p -> deviceDescriptions.remove(new DeviceKey(p, deviceId)));
+ return new DeviceEvent(DEVICE_REMOVED, removedDevice);
+ }
+ return null;
+ }
+
+ private boolean markOnline(DeviceId deviceId) {
+ return availableDevices.add(deviceId);
+ }
+
+ @Override
+ public DeviceEvent markOffline(DeviceId deviceId) {
+ availableDevices.remove(deviceId);
+ // set update listener will raise the event.
+ return null;
+ }
+
+ @Override
+ public List<DeviceEvent> updatePorts(ProviderId providerId,
+ DeviceId deviceId,
+ List<PortDescription> descriptions) {
+ NodeId master = mastershipService.getMasterFor(deviceId);
+ List<DeviceEvent> deviceEvents = null;
+ if (localNodeId.equals(master)) {
+ descriptions.forEach(description -> {
+ PortKey portKey = new PortKey(providerId, deviceId, description.portNumber());
+ portDescriptions.put(portKey, description);
+ });
+ deviceEvents = refreshDevicePortCache(providerId, deviceId, Optional.empty());
+ } else {
+ if (master == null) {
+ return Collections.emptyList();
+ }
+ PortInjectedEvent portInjectedEvent = new PortInjectedEvent(providerId, deviceId, descriptions);
+ deviceEvents = Futures.getUnchecked(
+ clusterCommunicator.sendAndReceive(portInjectedEvent,
+ PORT_INJECTED,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ master));
+ }
+ return deviceEvents == null ? Collections.emptyList() : deviceEvents;
+ }
+
+ private List<DeviceEvent> refreshDevicePortCache(ProviderId providerId,
+ DeviceId deviceId,
+ Optional<PortNumber> portNumber) {
+ Device device = devices.get(deviceId);
+ checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
+ List<DeviceEvent> events = Lists.newArrayList();
+
+ Map<PortNumber, Port> ports = devicePorts.computeIfAbsent(deviceId, key -> Maps.newConcurrentMap());
+ List<PortDescription> descriptions = Lists.newArrayList();
+ portDescriptions.entrySet().forEach(e -> {
+ PortKey key = e.getKey();
+ PortDescription value = e.getValue();
+ if (key.deviceId().equals(deviceId) && key.providerId().equals(providerId)) {
+ if (portNumber.isPresent()) {
+ if (portNumber.get().equals(key.portNumber())) {
+ descriptions.add(value);
+ }
+ } else {
+ descriptions.add(value);
+ }
+ }
+ });
+
+ for (PortDescription description : descriptions) {
+ final PortNumber number = description.portNumber();
+ ports.compute(number, (k, existingPort) -> {
+ Port newPort = composePort(device, number);
+ if (existingPort == null) {
+ events.add(new DeviceEvent(PORT_ADDED, device, newPort));
+ } else {
+ if (existingPort.isEnabled() != newPort.isEnabled() ||
+ existingPort.type() != newPort.type() ||
+ existingPort.portSpeed() != newPort.portSpeed() ||
+ !AnnotationsUtil.isEqual(existingPort.annotations(), newPort.annotations())) {
+ events.add(new DeviceEvent(PORT_UPDATED, device, newPort));
+ }
+ }
+ return newPort;
+ });
+ }
+
+ return events;
+ }
+
+ /**
+ * Returns a Port, merging descriptions from multiple Providers.
+ *
+ * @param device device the port is on
+ * @param number port number
+ * @return Port instance
+ */
+ private Port composePort(Device device, PortNumber number) {
+
+ Map<ProviderId, PortDescription> descriptions = Maps.newHashMap();
+ portDescriptions.entrySet().forEach(entry -> {
+ PortKey portKey = entry.getKey();
+ if (portKey.deviceId().equals(device.id()) && portKey.portNumber().equals(number)) {
+ descriptions.put(portKey.providerId(), entry.getValue());
+ }
+ });
+ ProviderId primary = getPrimaryProviderId(device.id());
+ PortDescription primaryDescription = descriptions.get(primary);
+
+ // if no primary, assume not enabled
+ boolean isEnabled = false;
+ DefaultAnnotations annotations = DefaultAnnotations.builder().build();
+ if (primaryDescription != null) {
+ isEnabled = primaryDescription.isEnabled();
+ annotations = merge(annotations, primaryDescription.annotations());
+ }
+ Port updated = null;
+ for (Entry<ProviderId, PortDescription> e : descriptions.entrySet()) {
+ if (e.getKey().equals(primary)) {
+ continue;
+ }
+ annotations = merge(annotations, e.getValue().annotations());
+ updated = buildTypedPort(device, number, isEnabled, e.getValue(), annotations);
+ }
+ if (primaryDescription == null) {
+ return updated == null ? new DefaultPort(device, number, false, annotations) : updated;
+ }
+ return updated == null
+ ? buildTypedPort(device, number, isEnabled, primaryDescription, annotations)
+ : updated;
+ }
+
+ private Port buildTypedPort(Device device, PortNumber number, boolean isEnabled,
+ PortDescription description, Annotations annotations) {
+ switch (description.type()) {
+ case OMS:
+ OmsPortDescription omsDesc = (OmsPortDescription) description;
+ return new OmsPort(device, number, isEnabled, omsDesc.minFrequency(),
+ omsDesc.maxFrequency(), omsDesc.grid(), annotations);
+ case OCH:
+ OchPortDescription ochDesc = (OchPortDescription) description;
+ return new OchPort(device, number, isEnabled, ochDesc.signalType(),
+ ochDesc.isTunable(), ochDesc.lambda(), annotations);
+ case ODUCLT:
+ OduCltPortDescription oduDesc = (OduCltPortDescription) description;
+ return new OduCltPort(device, number, isEnabled, oduDesc.signalType(), annotations);
+ default:
+ return new DefaultPort(device, number, isEnabled, description.type(),
+ description.portSpeed(), annotations);
+ }
+ }
+
+ @Override
+ public DeviceEvent updatePortStatus(ProviderId providerId,
+ DeviceId deviceId,
+ PortDescription portDescription) {
+ portDescriptions.put(new PortKey(providerId, deviceId, portDescription.portNumber()), portDescription);
+ List<DeviceEvent> events =
+ refreshDevicePortCache(providerId, deviceId, Optional.of(portDescription.portNumber()));
+ return Iterables.getFirst(events, null);
+ }
+
+ @Override
+ public List<Port> getPorts(DeviceId deviceId) {
+ return ImmutableList.copyOf(devicePorts.getOrDefault(deviceId, Maps.newHashMap()).values());
+ }
+
+ @Override
+ public Port getPort(DeviceId deviceId, PortNumber portNumber) {
+ return devicePorts.getOrDefault(deviceId, Maps.newHashMap()).get(portNumber);
+ }
+
+ @Override
+ public DeviceEvent updatePortStatistics(ProviderId providerId,
+ DeviceId deviceId,
+ Collection<PortStatistics> newStatsCollection) {
+
+ Map<PortNumber, PortStatistics> prvStatsMap = devicePortStats.get(deviceId);
+ Map<PortNumber, PortStatistics> newStatsMap = Maps.newHashMap();
+ Map<PortNumber, PortStatistics> deltaStatsMap = Maps.newHashMap();
+
+ if (prvStatsMap != null) {
+ for (PortStatistics newStats : newStatsCollection) {
+ PortNumber port = PortNumber.portNumber(newStats.port());
+ PortStatistics prvStats = prvStatsMap.get(port);
+ DefaultPortStatistics.Builder builder = DefaultPortStatistics.builder();
+ PortStatistics deltaStats = builder.build();
+ if (prvStats != null) {
+ deltaStats = calcDeltaStats(deviceId, prvStats, newStats);
+ }
+ deltaStatsMap.put(port, deltaStats);
+ newStatsMap.put(port, newStats);
+ }
+ } else {
+ for (PortStatistics newStats : newStatsCollection) {
+ PortNumber port = PortNumber.portNumber(newStats.port());
+ newStatsMap.put(port, newStats);
+ }
+ }
+ devicePortDeltaStats.put(deviceId, deltaStatsMap);
+ devicePortStats.put(deviceId, newStatsMap);
+ // DeviceEvent returns null because of InternalPortStatsListener usage
+ return null;
+ }
+
+ /**
+ * Calculate delta statistics by subtracting previous from new statistics.
+ *
+ * @param deviceId device indentifier
+ * @param prvStats previous port statistics
+ * @param newStats new port statistics
+ * @return PortStatistics
+ */
+ public PortStatistics calcDeltaStats(DeviceId deviceId, PortStatistics prvStats, PortStatistics newStats) {
+ // calculate time difference
+ long deltaStatsSec, deltaStatsNano;
+ if (newStats.durationNano() < prvStats.durationNano()) {
+ deltaStatsNano = newStats.durationNano() - prvStats.durationNano() + TimeUnit.SECONDS.toNanos(1);
+ deltaStatsSec = newStats.durationSec() - prvStats.durationSec() - 1L;
+ } else {
+ deltaStatsNano = newStats.durationNano() - prvStats.durationNano();
+ deltaStatsSec = newStats.durationSec() - prvStats.durationSec();
+ }
+ DefaultPortStatistics.Builder builder = DefaultPortStatistics.builder();
+ DefaultPortStatistics deltaStats = builder.setDeviceId(deviceId)
+ .setPort(newStats.port())
+ .setPacketsReceived(newStats.packetsReceived() - prvStats.packetsReceived())
+ .setPacketsSent(newStats.packetsSent() - prvStats.packetsSent())
+ .setBytesReceived(newStats.bytesReceived() - prvStats.bytesReceived())
+ .setBytesSent(newStats.bytesSent() - prvStats.bytesSent())
+ .setPacketsRxDropped(newStats.packetsRxDropped() - prvStats.packetsRxDropped())
+ .setPacketsTxDropped(newStats.packetsTxDropped() - prvStats.packetsTxDropped())
+ .setPacketsRxErrors(newStats.packetsRxErrors() - prvStats.packetsRxErrors())
+ .setPacketsTxErrors(newStats.packetsTxErrors() - prvStats.packetsTxErrors())
+ .setDurationSec(deltaStatsSec)
+ .setDurationNano(deltaStatsNano)
+ .build();
+ return deltaStats;
+ }
+
+ @Override
+ public List<PortStatistics> getPortStatistics(DeviceId deviceId) {
+ Map<PortNumber, PortStatistics> portStats = devicePortStats.get(deviceId);
+ if (portStats == null) {
+ return Collections.emptyList();
+ }
+ return ImmutableList.copyOf(portStats.values());
+ }
+
+ @Override
+ public List<PortStatistics> getPortDeltaStatistics(DeviceId deviceId) {
+ Map<PortNumber, PortStatistics> portStats = devicePortDeltaStats.get(deviceId);
+ if (portStats == null) {
+ return Collections.emptyList();
+ }
+ return ImmutableList.copyOf(portStats.values());
+ }
+
+ @Override
+ public boolean isAvailable(DeviceId deviceId) {
+ return availableDevices.contains(deviceId);
+ }
+
+ @Override
+ public Iterable<Device> getAvailableDevices() {
+ return Iterables.filter(Iterables.transform(availableDevices, devices::get), d -> d != null);
+ }
+
+ @Override
+ public DeviceEvent removeDevice(DeviceId deviceId) {
+ NodeId master = mastershipService.getMasterFor(deviceId);
+ // if there exist a master, forward
+ // if there is no master, try to become one and process
+ boolean relinquishAtEnd = false;
+ if (master == null) {
+ final MastershipRole myRole = mastershipService.getLocalRole(deviceId);
+ if (myRole != MastershipRole.NONE) {
+ relinquishAtEnd = true;
+ }
+ log.debug("Temporarily requesting role for {} to remove", deviceId);
+ MastershipRole role = Futures.getUnchecked(mastershipService.requestRoleFor(deviceId));
+ if (role == MastershipRole.MASTER) {
+ master = localNodeId;
+ }
+ }
+
+ if (!localNodeId.equals(master)) {
+ log.debug("{} has control of {}, forwarding remove request",
+ master, deviceId);
+
+ clusterCommunicator.unicast(deviceId, DEVICE_REMOVE_REQ, SERIALIZER::encode, master)
+ .whenComplete((r, e) -> {
+ if (e != null) {
+ log.error("Failed to forward {} remove request to its master", deviceId, e);
+ }
+ });
+ return null;
+ }
+
+ // I have control..
+ DeviceEvent event = null;
+ final DeviceKey deviceKey = new DeviceKey(getPrimaryProviderId(deviceId), deviceId);
+ DeviceDescription removedDeviceDescription =
+ deviceDescriptions.remove(deviceKey);
+ if (removedDeviceDescription != null) {
+ event = purgeDeviceCache(deviceId);
+ }
+
+ if (relinquishAtEnd) {
+ log.debug("Relinquishing temporary role acquired for {}", deviceId);
+ mastershipService.relinquishMastership(deviceId);
+ }
+ return event;
+ }
+
+ private DeviceEvent injectDevice(DeviceInjectedEvent event) {
+ return createOrUpdateDevice(event.providerId(), event.deviceId(), event.deviceDescription());
+ }
+
+ private List<DeviceEvent> injectPort(PortInjectedEvent event) {
+ return updatePorts(event.providerId(), event.deviceId(), event.portDescriptions());
+ }
+
+ private DefaultAnnotations mergeAnnotations(DeviceId deviceId) {
+ ProviderId primaryProviderId = getPrimaryProviderId(deviceId);
+ DeviceDescription primaryDeviceDescription =
+ deviceDescriptions.get(new DeviceKey(primaryProviderId, deviceId));
+ DefaultAnnotations annotations = DefaultAnnotations.builder().build();
+ annotations = merge(annotations, primaryDeviceDescription.annotations());
+ for (ProviderId providerId : getAllProviders(deviceId)) {
+ if (!providerId.equals(primaryProviderId)) {
+ annotations = merge(annotations,
+ deviceDescriptions.get(new DeviceKey(providerId, deviceId)).annotations());
+ }
+ }
+ return annotations;
+ }
+
+ private class InternalDeviceStatusTracker implements SetEventListener<DeviceId> {
+ @Override
+ public void event(SetEvent<DeviceId> event) {
+ final DeviceId deviceId = event.entry();
+ final Device device = devices.get(deviceId);
+ if (device != null) {
+ notifyDelegate(new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device));
+ } else {
+ pendingAvailableChangeUpdates.add(deviceId);
+ }
+ }
+ }
+
+ private class InternalDeviceChangeEventListener
+ implements EventuallyConsistentMapListener<DeviceKey, DeviceDescription> {
+ @Override
+ public void event(EventuallyConsistentMapEvent<DeviceKey, DeviceDescription> event) {
+ DeviceId deviceId = event.key().deviceId();
+ ProviderId providerId = event.key().providerId();
+ if (event.type() == PUT) {
+ notifyDelegate(refreshDeviceCache(providerId, deviceId));
+ if (pendingAvailableChangeUpdates.remove(deviceId)) {
+ notifyDelegate(new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, devices.get(deviceId)));
+ }
+ } else if (event.type() == REMOVE) {
+ notifyDelegate(purgeDeviceCache(deviceId));
+ }
+ }
+ }
+
+ private class InternalPortChangeEventListener
+ implements EventuallyConsistentMapListener<PortKey, PortDescription> {
+ @Override
+ public void event(EventuallyConsistentMapEvent<PortKey, PortDescription> event) {
+ DeviceId deviceId = event.key().deviceId();
+ ProviderId providerId = event.key().providerId();
+ PortNumber portNumber = event.key().portNumber();
+ if (event.type() == PUT) {
+ if (devices.containsKey(deviceId)) {
+ List<DeviceEvent> events = refreshDevicePortCache(providerId, deviceId, Optional.of(portNumber));
+ for (DeviceEvent deviceEvent : events) {
+ notifyDelegate(deviceEvent);
+ }
+ }
+ } else if (event.type() == REMOVE) {
+ log.warn("Unexpected port removed event");
+ }
+ }
+ }
+
+ private class InternalPortStatsListener
+ implements EventuallyConsistentMapListener<DeviceId, Map<PortNumber, PortStatistics>> {
+ @Override
+ public void event(EventuallyConsistentMapEvent<DeviceId, Map<PortNumber, PortStatistics>> event) {
+ if (event.type() == PUT) {
+ Device device = devices.get(event.key());
+ if (device != null) {
+ delegate.notify(new DeviceEvent(PORT_STATS_UPDATED, device));
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/GossipDeviceStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/GossipDeviceStore.java
new file mode 100644
index 00000000..63456433
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/GossipDeviceStore.java
@@ -0,0 +1,1670 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import com.google.common.base.Function;
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.packet.ChassisId;
+import org.onlab.util.KryoNamespace;
+import org.onlab.util.NewConcurrentHashMap;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.mastership.MastershipTerm;
+import org.onosproject.mastership.MastershipTermService;
+import org.onosproject.net.Annotations;
+import org.onosproject.net.AnnotationsUtil;
+import org.onosproject.net.DefaultAnnotations;
+import org.onosproject.net.DefaultDevice;
+import org.onosproject.net.DefaultPort;
+import org.onosproject.net.Device;
+import org.onosproject.net.Device.Type;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.MastershipRole;
+import org.onosproject.net.OchPort;
+import org.onosproject.net.OduCltPort;
+import org.onosproject.net.OmsPort;
+import org.onosproject.net.Port;
+import org.onosproject.net.PortNumber;
+import org.onosproject.net.device.DefaultPortStatistics;
+import org.onosproject.net.device.DeviceClockService;
+import org.onosproject.net.device.DeviceDescription;
+import org.onosproject.net.device.DeviceEvent;
+import org.onosproject.net.device.DeviceStore;
+import org.onosproject.net.device.DeviceStoreDelegate;
+import org.onosproject.net.device.OchPortDescription;
+import org.onosproject.net.device.OduCltPortDescription;
+import org.onosproject.net.device.OmsPortDescription;
+import org.onosproject.net.device.PortDescription;
+import org.onosproject.net.device.PortStatistics;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.Timestamp;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.ClusterMessage;
+import org.onosproject.store.cluster.messaging.ClusterMessageHandler;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+import org.onosproject.store.impl.Timestamped;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.onosproject.store.serializers.custom.DistributedStoreSerializers;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapEvent;
+import org.onosproject.store.service.EventuallyConsistentMapListener;
+import org.onosproject.store.service.MultiValuedTimestamp;
+import org.onosproject.store.service.StorageService;
+import org.onosproject.store.service.WallClockTimestamp;
+import org.slf4j.Logger;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Predicates.notNull;
+import static com.google.common.base.Verify.verify;
+import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor;
+import static org.apache.commons.lang3.concurrent.ConcurrentUtils.createIfAbsentUnchecked;
+import static org.onlab.util.Tools.groupedThreads;
+import static org.onlab.util.Tools.minPriority;
+import static org.onosproject.cluster.ControllerNodeToNodeId.toNodeId;
+import static org.onosproject.net.DefaultAnnotations.merge;
+import static org.onosproject.net.device.DeviceEvent.Type.*;
+import static org.onosproject.store.device.impl.GossipDeviceStoreMessageSubjects.*;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.PUT;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Manages inventory of infrastructure devices using gossip protocol to distribute
+ * information.
+ */
+@Component(immediate = true)
+@Service
+public class GossipDeviceStore
+ extends AbstractStore<DeviceEvent, DeviceStoreDelegate>
+ implements DeviceStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private static final String DEVICE_NOT_FOUND = "Device with ID %s not found";
+ // Timeout in milliseconds to process device or ports on remote master node
+ private static final int REMOTE_MASTER_TIMEOUT = 1000;
+
+ // innerMap is used to lock a Device, thus instance should never be replaced.
+ // collection of Description given from various providers
+ private final ConcurrentMap<DeviceId, Map<ProviderId, DeviceDescriptions>>
+ deviceDescs = Maps.newConcurrentMap();
+
+ // cache of Device and Ports generated by compositing descriptions from providers
+ private final ConcurrentMap<DeviceId, Device> devices = Maps.newConcurrentMap();
+ private final ConcurrentMap<DeviceId, ConcurrentMap<PortNumber, Port>> devicePorts = Maps.newConcurrentMap();
+
+ private EventuallyConsistentMap<DeviceId, Map<PortNumber, PortStatistics>> devicePortStats;
+ private EventuallyConsistentMap<DeviceId, Map<PortNumber, PortStatistics>> devicePortDeltaStats;
+ private final EventuallyConsistentMapListener<DeviceId, Map<PortNumber, PortStatistics>>
+ portStatsListener = new InternalPortStatsListener();
+
+ // to be updated under Device lock
+ private final Map<DeviceId, Timestamp> offline = Maps.newHashMap();
+ private final Map<DeviceId, Timestamp> removalRequest = Maps.newHashMap();
+
+ // available(=UP) devices
+ private final Set<DeviceId> availableDevices = Sets.newConcurrentHashSet();
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected DeviceClockService deviceClockService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipTermService termService;
+
+
+ protected static final KryoSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(DistributedStoreSerializers.STORE_COMMON)
+ .nextId(DistributedStoreSerializers.STORE_CUSTOM_BEGIN)
+ .register(new InternalDeviceEventSerializer(), InternalDeviceEvent.class)
+ .register(new InternalDeviceOfflineEventSerializer(), InternalDeviceOfflineEvent.class)
+ .register(InternalDeviceRemovedEvent.class)
+ .register(new InternalPortEventSerializer(), InternalPortEvent.class)
+ .register(new InternalPortStatusEventSerializer(), InternalPortStatusEvent.class)
+ .register(DeviceAntiEntropyAdvertisement.class)
+ .register(DeviceFragmentId.class)
+ .register(PortFragmentId.class)
+ .register(DeviceInjectedEvent.class)
+ .register(PortInjectedEvent.class)
+ .build();
+ }
+ };
+
+ private ExecutorService executor;
+
+ private ScheduledExecutorService backgroundExecutor;
+
+ // TODO make these anti-entropy parameters configurable
+ private long initialDelaySec = 5;
+ private long periodSec = 5;
+
+ @Activate
+ public void activate() {
+ executor = Executors.newCachedThreadPool(groupedThreads("onos/device", "fg-%d"));
+
+ backgroundExecutor =
+ newSingleThreadScheduledExecutor(minPriority(groupedThreads("onos/device", "bg-%d")));
+
+ clusterCommunicator.addSubscriber(
+ GossipDeviceStoreMessageSubjects.DEVICE_UPDATE, new InternalDeviceEventListener(), executor);
+ clusterCommunicator.addSubscriber(
+ GossipDeviceStoreMessageSubjects.DEVICE_OFFLINE,
+ new InternalDeviceOfflineEventListener(),
+ executor);
+ clusterCommunicator.addSubscriber(DEVICE_REMOVE_REQ,
+ new InternalRemoveRequestListener(),
+ executor);
+ clusterCommunicator.addSubscriber(
+ GossipDeviceStoreMessageSubjects.DEVICE_REMOVED, new InternalDeviceRemovedEventListener(), executor);
+ clusterCommunicator.addSubscriber(
+ GossipDeviceStoreMessageSubjects.PORT_UPDATE, new InternalPortEventListener(), executor);
+ clusterCommunicator.addSubscriber(
+ GossipDeviceStoreMessageSubjects.PORT_STATUS_UPDATE, new InternalPortStatusEventListener(), executor);
+ clusterCommunicator.addSubscriber(
+ GossipDeviceStoreMessageSubjects.DEVICE_ADVERTISE,
+ new InternalDeviceAdvertisementListener(),
+ backgroundExecutor);
+ clusterCommunicator.addSubscriber(
+ GossipDeviceStoreMessageSubjects.DEVICE_INJECTED, new DeviceInjectedEventListener(), executor);
+ clusterCommunicator.addSubscriber(
+ GossipDeviceStoreMessageSubjects.PORT_INJECTED, new PortInjectedEventListener(), executor);
+
+ // start anti-entropy thread
+ backgroundExecutor.scheduleAtFixedRate(new SendAdvertisementTask(),
+ initialDelaySec, periodSec, TimeUnit.SECONDS);
+
+ // Create a distributed map for port stats.
+ KryoNamespace.Builder deviceDataSerializer = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .register(DefaultPortStatistics.class)
+ .register(DeviceId.class)
+ .register(MultiValuedTimestamp.class)
+ .register(WallClockTimestamp.class);
+
+ devicePortStats = storageService.<DeviceId, Map<PortNumber, PortStatistics>>eventuallyConsistentMapBuilder()
+ .withName("port-stats")
+ .withSerializer(deviceDataSerializer)
+ .withAntiEntropyPeriod(5, TimeUnit.SECONDS)
+ .withTimestampProvider((k, v) -> new WallClockTimestamp())
+ .withTombstonesDisabled()
+ .build();
+ devicePortDeltaStats = storageService.<DeviceId, Map<PortNumber, PortStatistics>>
+ eventuallyConsistentMapBuilder()
+ .withName("port-stats-delta")
+ .withSerializer(deviceDataSerializer)
+ .withAntiEntropyPeriod(5, TimeUnit.SECONDS)
+ .withTimestampProvider((k, v) -> new WallClockTimestamp())
+ .withTombstonesDisabled()
+ .build();
+ devicePortStats.addListener(portStatsListener);
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ devicePortStats.destroy();
+ devicePortDeltaStats.destroy();
+ executor.shutdownNow();
+
+ backgroundExecutor.shutdownNow();
+ try {
+ if (!backgroundExecutor.awaitTermination(5, TimeUnit.SECONDS)) {
+ log.error("Timeout during executor shutdown");
+ }
+ } catch (InterruptedException e) {
+ log.error("Error during executor shutdown", e);
+ }
+
+ deviceDescs.clear();
+ devices.clear();
+ devicePorts.clear();
+ availableDevices.clear();
+ log.info("Stopped");
+ }
+
+ @Override
+ public int getDeviceCount() {
+ return devices.size();
+ }
+
+ @Override
+ public Iterable<Device> getDevices() {
+ return Collections.unmodifiableCollection(devices.values());
+ }
+
+ @Override
+ public Iterable<Device> getAvailableDevices() {
+ return FluentIterable.from(getDevices())
+ .filter(input -> isAvailable(input.id()));
+ }
+
+ @Override
+ public Device getDevice(DeviceId deviceId) {
+ return devices.get(deviceId);
+ }
+
+ @Override
+ public synchronized DeviceEvent createOrUpdateDevice(ProviderId providerId,
+ DeviceId deviceId,
+ DeviceDescription deviceDescription) {
+ NodeId localNode = clusterService.getLocalNode().id();
+ NodeId deviceNode = mastershipService.getMasterFor(deviceId);
+
+ // Process device update only if we're the master,
+ // otherwise signal the actual master.
+ DeviceEvent deviceEvent = null;
+ if (localNode.equals(deviceNode)) {
+
+ final Timestamp newTimestamp = deviceClockService.getTimestamp(deviceId);
+ final Timestamped<DeviceDescription> deltaDesc = new Timestamped<>(deviceDescription, newTimestamp);
+ final Timestamped<DeviceDescription> mergedDesc;
+ final Map<ProviderId, DeviceDescriptions> device = getOrCreateDeviceDescriptionsMap(deviceId);
+
+ synchronized (device) {
+ deviceEvent = createOrUpdateDeviceInternal(providerId, deviceId, deltaDesc);
+ mergedDesc = device.get(providerId).getDeviceDesc();
+ }
+
+ if (deviceEvent != null) {
+ log.debug("Notifying peers of a device update topology event for providerId: {} and deviceId: {}",
+ providerId, deviceId);
+ notifyPeers(new InternalDeviceEvent(providerId, deviceId, mergedDesc));
+ }
+
+ } else {
+ // FIXME Temporary hack for NPE (ONOS-1171).
+ // Proper fix is to implement forwarding to master on ConfigProvider
+ // redo ONOS-490
+ if (deviceNode == null) {
+ // silently ignore
+ return null;
+ }
+
+
+ DeviceInjectedEvent deviceInjectedEvent = new DeviceInjectedEvent(
+ providerId, deviceId, deviceDescription);
+
+ // TODO check unicast return value
+ clusterCommunicator.unicast(deviceInjectedEvent, DEVICE_INJECTED, SERIALIZER::encode, deviceNode);
+ /* error log:
+ log.warn("Failed to process injected device id: {} desc: {} " +
+ "(cluster messaging failed: {})",
+ deviceId, deviceDescription, e);
+ */
+ }
+
+ return deviceEvent;
+ }
+
+ private DeviceEvent createOrUpdateDeviceInternal(ProviderId providerId,
+ DeviceId deviceId,
+ Timestamped<DeviceDescription> deltaDesc) {
+
+ // Collection of DeviceDescriptions for a Device
+ Map<ProviderId, DeviceDescriptions> device
+ = getOrCreateDeviceDescriptionsMap(deviceId);
+
+ synchronized (device) {
+ // locking per device
+
+ if (isDeviceRemoved(deviceId, deltaDesc.timestamp())) {
+ log.debug("Ignoring outdated event: {}", deltaDesc);
+ return null;
+ }
+
+ DeviceDescriptions descs = getOrCreateProviderDeviceDescriptions(device, providerId, deltaDesc);
+
+ final Device oldDevice = devices.get(deviceId);
+ final Device newDevice;
+
+ if (deltaDesc == descs.getDeviceDesc() ||
+ deltaDesc.isNewer(descs.getDeviceDesc())) {
+ // on new device or valid update
+ descs.putDeviceDesc(deltaDesc);
+ newDevice = composeDevice(deviceId, device);
+ } else {
+ // outdated event, ignored.
+ return null;
+ }
+ if (oldDevice == null) {
+ // ADD
+ return createDevice(providerId, newDevice, deltaDesc.timestamp());
+ } else {
+ // UPDATE or ignore (no change or stale)
+ return updateDevice(providerId, oldDevice, newDevice, deltaDesc.timestamp());
+ }
+ }
+ }
+
+ // Creates the device and returns the appropriate event if necessary.
+ // Guarded by deviceDescs value (=Device lock)
+ private DeviceEvent createDevice(ProviderId providerId,
+ Device newDevice, Timestamp timestamp) {
+
+ // update composed device cache
+ Device oldDevice = devices.putIfAbsent(newDevice.id(), newDevice);
+ verify(oldDevice == null,
+ "Unexpected Device in cache. PID:%s [old=%s, new=%s]",
+ providerId, oldDevice, newDevice);
+
+ if (!providerId.isAncillary()) {
+ markOnline(newDevice.id(), timestamp);
+ }
+
+ return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, newDevice, null);
+ }
+
+ // Updates the device and returns the appropriate event if necessary.
+ // Guarded by deviceDescs value (=Device lock)
+ private DeviceEvent updateDevice(ProviderId providerId,
+ Device oldDevice,
+ Device newDevice, Timestamp newTimestamp) {
+ // We allow only certain attributes to trigger update
+ boolean propertiesChanged =
+ !Objects.equals(oldDevice.hwVersion(), newDevice.hwVersion()) ||
+ !Objects.equals(oldDevice.swVersion(), newDevice.swVersion()) ||
+ !Objects.equals(oldDevice.providerId(), newDevice.providerId());
+ boolean annotationsChanged =
+ !AnnotationsUtil.isEqual(oldDevice.annotations(), newDevice.annotations());
+
+ // Primary providers can respond to all changes, but ancillary ones
+ // should respond only to annotation changes.
+ if ((providerId.isAncillary() && annotationsChanged) ||
+ (!providerId.isAncillary() && (propertiesChanged || annotationsChanged))) {
+ boolean replaced = devices.replace(newDevice.id(), oldDevice, newDevice);
+ if (!replaced) {
+ verify(replaced,
+ "Replacing devices cache failed. PID:%s [expected:%s, found:%s, new=%s]",
+ providerId, oldDevice, devices.get(newDevice.id())
+ , newDevice);
+ }
+ if (!providerId.isAncillary()) {
+ boolean wasOnline = availableDevices.contains(newDevice.id());
+ markOnline(newDevice.id(), newTimestamp);
+ if (!wasOnline) {
+ notifyDelegateIfNotNull(new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, newDevice, null));
+ }
+ }
+
+ return new DeviceEvent(DeviceEvent.Type.DEVICE_UPDATED, newDevice, null);
+ }
+ return null;
+ }
+
+ @Override
+ public DeviceEvent markOffline(DeviceId deviceId) {
+ final Timestamp timestamp = deviceClockService.getTimestamp(deviceId);
+ final DeviceEvent event = markOfflineInternal(deviceId, timestamp);
+ if (event != null) {
+ log.debug("Notifying peers of a device offline topology event for deviceId: {} {}",
+ deviceId, timestamp);
+ notifyPeers(new InternalDeviceOfflineEvent(deviceId, timestamp));
+ }
+ return event;
+ }
+
+ private DeviceEvent markOfflineInternal(DeviceId deviceId, Timestamp timestamp) {
+
+ Map<ProviderId, DeviceDescriptions> providerDescs
+ = getOrCreateDeviceDescriptionsMap(deviceId);
+
+ // locking device
+ synchronized (providerDescs) {
+
+ // accept off-line if given timestamp is newer than
+ // the latest Timestamp from Primary provider
+ DeviceDescriptions primDescs = getPrimaryDescriptions(providerDescs);
+ Timestamp lastTimestamp = primDescs.getLatestTimestamp();
+ if (timestamp.compareTo(lastTimestamp) <= 0) {
+ // outdated event ignore
+ return null;
+ }
+
+ offline.put(deviceId, timestamp);
+
+ Device device = devices.get(deviceId);
+ if (device == null) {
+ return null;
+ }
+ boolean removed = availableDevices.remove(deviceId);
+ if (removed) {
+ return new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device, null);
+ }
+ return null;
+ }
+ }
+
+ /**
+ * Marks the device as available if the given timestamp is not outdated,
+ * compared to the time the device has been marked offline.
+ *
+ * @param deviceId identifier of the device
+ * @param timestamp of the event triggering this change.
+ * @return true if availability change request was accepted and changed the state
+ */
+ // Guarded by deviceDescs value (=Device lock)
+ private boolean markOnline(DeviceId deviceId, Timestamp timestamp) {
+ // accept on-line if given timestamp is newer than
+ // the latest offline request Timestamp
+ Timestamp offlineTimestamp = offline.get(deviceId);
+ if (offlineTimestamp == null ||
+ offlineTimestamp.compareTo(timestamp) < 0) {
+
+ offline.remove(deviceId);
+ return availableDevices.add(deviceId);
+ }
+ return false;
+ }
+
+ @Override
+ public synchronized List<DeviceEvent> updatePorts(ProviderId providerId,
+ DeviceId deviceId,
+ List<PortDescription> portDescriptions) {
+
+ NodeId localNode = clusterService.getLocalNode().id();
+ // TODO: It might be negligible, but this will have negative impact to topology discovery performance,
+ // since it will trigger distributed store read.
+ // Also, it'll probably be better if side-way communication happened on ConfigurationProvider, etc.
+ // outside Device subsystem. so that we don't have to modify both Device and Link stores.
+ // If we don't care much about topology performance, then it might be OK.
+ NodeId deviceNode = mastershipService.getMasterFor(deviceId);
+
+ // Process port update only if we're the master of the device,
+ // otherwise signal the actual master.
+ List<DeviceEvent> deviceEvents = null;
+ if (localNode.equals(deviceNode)) {
+
+ final Timestamp newTimestamp;
+ try {
+ newTimestamp = deviceClockService.getTimestamp(deviceId);
+ } catch (IllegalStateException e) {
+ log.info("Timestamp was not available for device {}", deviceId);
+ log.debug(" discarding {}", portDescriptions);
+ // Failed to generate timestamp.
+
+ // Possible situation:
+ // Device connected and became master for short period of time,
+ // but lost mastership before this instance had the chance to
+ // retrieve term information.
+
+ // Information dropped here is expected to be recoverable by
+ // device probing after mastership change
+
+ return Collections.emptyList();
+ }
+ log.debug("timestamp for {} {}", deviceId, newTimestamp);
+
+ final Timestamped<List<PortDescription>> timestampedInput
+ = new Timestamped<>(portDescriptions, newTimestamp);
+ final Timestamped<List<PortDescription>> merged;
+
+ final Map<ProviderId, DeviceDescriptions> device = getOrCreateDeviceDescriptionsMap(deviceId);
+
+ synchronized (device) {
+ deviceEvents = updatePortsInternal(providerId, deviceId, timestampedInput);
+ final DeviceDescriptions descs = device.get(providerId);
+ List<PortDescription> mergedList =
+ FluentIterable.from(portDescriptions)
+ .transform(new Function<PortDescription, PortDescription>() {
+ @Override
+ public PortDescription apply(PortDescription input) {
+ // lookup merged port description
+ return descs.getPortDesc(input.portNumber()).value();
+ }
+ }).toList();
+ merged = new Timestamped<>(mergedList, newTimestamp);
+ }
+
+ if (!deviceEvents.isEmpty()) {
+ log.debug("Notifying peers of a ports update topology event for providerId: {} and deviceId: {}",
+ providerId, deviceId);
+ notifyPeers(new InternalPortEvent(providerId, deviceId, merged));
+ }
+
+ } else {
+ // FIXME Temporary hack for NPE (ONOS-1171).
+ // Proper fix is to implement forwarding to master on ConfigProvider
+ // redo ONOS-490
+ if (deviceNode == null) {
+ // silently ignore
+ return Collections.emptyList();
+ }
+
+ PortInjectedEvent portInjectedEvent = new PortInjectedEvent(providerId, deviceId, portDescriptions);
+
+ //TODO check unicast return value
+ clusterCommunicator.unicast(portInjectedEvent, PORT_INJECTED, SERIALIZER::encode, deviceNode);
+ /* error log:
+ log.warn("Failed to process injected ports of device id: {} " +
+ "(cluster messaging failed: {})",
+ deviceId, e);
+ */
+ }
+
+ return deviceEvents == null ? Collections.emptyList() : deviceEvents;
+ }
+
+ private List<DeviceEvent> updatePortsInternal(ProviderId providerId,
+ DeviceId deviceId,
+ Timestamped<List<PortDescription>> portDescriptions) {
+
+ Device device = devices.get(deviceId);
+ checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
+
+ Map<ProviderId, DeviceDescriptions> descsMap = deviceDescs.get(deviceId);
+ checkArgument(descsMap != null, DEVICE_NOT_FOUND, deviceId);
+
+ List<DeviceEvent> events = new ArrayList<>();
+ synchronized (descsMap) {
+
+ if (isDeviceRemoved(deviceId, portDescriptions.timestamp())) {
+ log.debug("Ignoring outdated events: {}", portDescriptions);
+ return Collections.emptyList();
+ }
+
+ DeviceDescriptions descs = descsMap.get(providerId);
+ // every provider must provide DeviceDescription.
+ checkArgument(descs != null,
+ "Device description for Device ID %s from Provider %s was not found",
+ deviceId, providerId);
+
+ Map<PortNumber, Port> ports = getPortMap(deviceId);
+
+ final Timestamp newTimestamp = portDescriptions.timestamp();
+
+ // Add new ports
+ Set<PortNumber> processed = new HashSet<>();
+ for (PortDescription portDescription : portDescriptions.value()) {
+ final PortNumber number = portDescription.portNumber();
+ processed.add(number);
+
+ final Port oldPort = ports.get(number);
+ final Port newPort;
+
+
+ final Timestamped<PortDescription> existingPortDesc = descs.getPortDesc(number);
+ if (existingPortDesc == null ||
+ newTimestamp.compareTo(existingPortDesc.timestamp()) >= 0) {
+ // on new port or valid update
+ // update description
+ descs.putPortDesc(new Timestamped<>(portDescription,
+ portDescriptions.timestamp()));
+ newPort = composePort(device, number, descsMap);
+ } else {
+ // outdated event, ignored.
+ continue;
+ }
+
+ events.add(oldPort == null ?
+ createPort(device, newPort, ports) :
+ updatePort(device, oldPort, newPort, ports));
+ }
+
+ events.addAll(pruneOldPorts(device, ports, processed));
+ }
+ return FluentIterable.from(events).filter(notNull()).toList();
+ }
+
+ // Creates a new port based on the port description adds it to the map and
+ // Returns corresponding event.
+ // Guarded by deviceDescs value (=Device lock)
+ private DeviceEvent createPort(Device device, Port newPort,
+ Map<PortNumber, Port> ports) {
+ ports.put(newPort.number(), newPort);
+ return new DeviceEvent(PORT_ADDED, device, newPort);
+ }
+
+ // Checks if the specified port requires update and if so, it replaces the
+ // existing entry in the map and returns corresponding event.
+ // Guarded by deviceDescs value (=Device lock)
+ private DeviceEvent updatePort(Device device, Port oldPort,
+ Port newPort,
+ Map<PortNumber, Port> ports) {
+ if (oldPort.isEnabled() != newPort.isEnabled() ||
+ oldPort.type() != newPort.type() ||
+ oldPort.portSpeed() != newPort.portSpeed() ||
+ !AnnotationsUtil.isEqual(oldPort.annotations(), newPort.annotations())) {
+ ports.put(oldPort.number(), newPort);
+ return new DeviceEvent(PORT_UPDATED, device, newPort);
+ }
+ return null;
+ }
+
+ // Prunes the specified list of ports based on which ports are in the
+ // processed list and returns list of corresponding events.
+ // Guarded by deviceDescs value (=Device lock)
+ private List<DeviceEvent> pruneOldPorts(Device device,
+ Map<PortNumber, Port> ports,
+ Set<PortNumber> processed) {
+ List<DeviceEvent> events = new ArrayList<>();
+ Iterator<Entry<PortNumber, Port>> iterator = ports.entrySet().iterator();
+ while (iterator.hasNext()) {
+ Entry<PortNumber, Port> e = iterator.next();
+ PortNumber portNumber = e.getKey();
+ if (!processed.contains(portNumber)) {
+ events.add(new DeviceEvent(PORT_REMOVED, device, e.getValue()));
+ iterator.remove();
+ }
+ }
+ return events;
+ }
+
+ // Gets the map of ports for the specified device; if one does not already
+ // exist, it creates and registers a new one.
+ private ConcurrentMap<PortNumber, Port> getPortMap(DeviceId deviceId) {
+ return createIfAbsentUnchecked(devicePorts, deviceId,
+ NewConcurrentHashMap.<PortNumber, Port>ifNeeded());
+ }
+
+ private Map<ProviderId, DeviceDescriptions> getOrCreateDeviceDescriptionsMap(
+ DeviceId deviceId) {
+ Map<ProviderId, DeviceDescriptions> r;
+ r = deviceDescs.get(deviceId);
+ if (r == null) {
+ r = new HashMap<>();
+ final Map<ProviderId, DeviceDescriptions> concurrentlyAdded;
+ concurrentlyAdded = deviceDescs.putIfAbsent(deviceId, r);
+ if (concurrentlyAdded != null) {
+ r = concurrentlyAdded;
+ }
+ }
+ return r;
+ }
+
+ // Guarded by deviceDescs value (=Device lock)
+ private DeviceDescriptions getOrCreateProviderDeviceDescriptions(
+ Map<ProviderId, DeviceDescriptions> device,
+ ProviderId providerId, Timestamped<DeviceDescription> deltaDesc) {
+ synchronized (device) {
+ DeviceDescriptions r = device.get(providerId);
+ if (r == null) {
+ r = new DeviceDescriptions(deltaDesc);
+ device.put(providerId, r);
+ }
+ return r;
+ }
+ }
+
+ @Override
+ public synchronized DeviceEvent updatePortStatus(ProviderId providerId,
+ DeviceId deviceId,
+ PortDescription portDescription) {
+ final Timestamp newTimestamp;
+ try {
+ newTimestamp = deviceClockService.getTimestamp(deviceId);
+ } catch (IllegalStateException e) {
+ log.info("Timestamp was not available for device {}", deviceId);
+ log.debug(" discarding {}", portDescription);
+ // Failed to generate timestamp. Ignoring.
+ // See updatePorts comment
+ return null;
+ }
+ final Timestamped<PortDescription> deltaDesc
+ = new Timestamped<>(portDescription, newTimestamp);
+ final DeviceEvent event;
+ final Timestamped<PortDescription> mergedDesc;
+ final Map<ProviderId, DeviceDescriptions> device = getOrCreateDeviceDescriptionsMap(deviceId);
+ synchronized (device) {
+ event = updatePortStatusInternal(providerId, deviceId, deltaDesc);
+ mergedDesc = device.get(providerId)
+ .getPortDesc(portDescription.portNumber());
+ }
+ if (event != null) {
+ log.debug("Notifying peers of a port status update topology event for providerId: {} and deviceId: {}",
+ providerId, deviceId);
+ notifyPeers(new InternalPortStatusEvent(providerId, deviceId, mergedDesc));
+ }
+ return event;
+ }
+
+ private DeviceEvent updatePortStatusInternal(ProviderId providerId, DeviceId deviceId,
+ Timestamped<PortDescription> deltaDesc) {
+ Device device = devices.get(deviceId);
+ checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
+
+ Map<ProviderId, DeviceDescriptions> descsMap = deviceDescs.get(deviceId);
+ checkArgument(descsMap != null, DEVICE_NOT_FOUND, deviceId);
+
+ synchronized (descsMap) {
+
+ if (isDeviceRemoved(deviceId, deltaDesc.timestamp())) {
+ log.debug("Ignoring outdated event: {}", deltaDesc);
+ return null;
+ }
+
+ DeviceDescriptions descs = descsMap.get(providerId);
+ // assuming all providers must to give DeviceDescription
+ verify(descs != null,
+ "Device description for Device ID %s from Provider %s was not found",
+ deviceId, providerId);
+
+ ConcurrentMap<PortNumber, Port> ports = getPortMap(deviceId);
+ final PortNumber number = deltaDesc.value().portNumber();
+ final Port oldPort = ports.get(number);
+ final Port newPort;
+
+ final Timestamped<PortDescription> existingPortDesc = descs.getPortDesc(number);
+ if (existingPortDesc == null ||
+ deltaDesc.isNewer(existingPortDesc)) {
+ // on new port or valid update
+ // update description
+ descs.putPortDesc(deltaDesc);
+ newPort = composePort(device, number, descsMap);
+ } else {
+ // same or outdated event, ignored.
+ log.trace("ignore same or outdated {} >= {}", existingPortDesc, deltaDesc);
+ return null;
+ }
+
+ if (oldPort == null) {
+ return createPort(device, newPort, ports);
+ } else {
+ return updatePort(device, oldPort, newPort, ports);
+ }
+ }
+ }
+
+ @Override
+ public List<Port> getPorts(DeviceId deviceId) {
+ Map<PortNumber, Port> ports = devicePorts.get(deviceId);
+ if (ports == null) {
+ return Collections.emptyList();
+ }
+ return ImmutableList.copyOf(ports.values());
+ }
+
+ @Override
+ public DeviceEvent updatePortStatistics(ProviderId providerId, DeviceId deviceId,
+ Collection<PortStatistics> newStatsCollection) {
+
+ Map<PortNumber, PortStatistics> prvStatsMap = devicePortStats.get(deviceId);
+ Map<PortNumber, PortStatistics> newStatsMap = Maps.newHashMap();
+ Map<PortNumber, PortStatistics> deltaStatsMap = Maps.newHashMap();
+
+ if (prvStatsMap != null) {
+ for (PortStatistics newStats : newStatsCollection) {
+ PortNumber port = PortNumber.portNumber(newStats.port());
+ PortStatistics prvStats = prvStatsMap.get(port);
+ DefaultPortStatistics.Builder builder = DefaultPortStatistics.builder();
+ PortStatistics deltaStats = builder.build();
+ if (prvStats != null) {
+ deltaStats = calcDeltaStats(deviceId, prvStats, newStats);
+ }
+ deltaStatsMap.put(port, deltaStats);
+ newStatsMap.put(port, newStats);
+ }
+ } else {
+ for (PortStatistics newStats : newStatsCollection) {
+ PortNumber port = PortNumber.portNumber(newStats.port());
+ newStatsMap.put(port, newStats);
+ }
+ }
+ devicePortDeltaStats.put(deviceId, deltaStatsMap);
+ devicePortStats.put(deviceId, newStatsMap);
+ // DeviceEvent returns null because of InternalPortStatsListener usage
+ return null;
+ }
+
+ /**
+ * Calculate delta statistics by subtracting previous from new statistics.
+ *
+ * @param deviceId device identifier
+ * @param prvStats previous port statistics
+ * @param newStats new port statistics
+ * @return PortStatistics
+ */
+ public PortStatistics calcDeltaStats(DeviceId deviceId, PortStatistics prvStats, PortStatistics newStats) {
+ // calculate time difference
+ long deltaStatsSec, deltaStatsNano;
+ if (newStats.durationNano() < prvStats.durationNano()) {
+ deltaStatsNano = newStats.durationNano() - prvStats.durationNano() + TimeUnit.SECONDS.toNanos(1);
+ deltaStatsSec = newStats.durationSec() - prvStats.durationSec() - 1L;
+ } else {
+ deltaStatsNano = newStats.durationNano() - prvStats.durationNano();
+ deltaStatsSec = newStats.durationSec() - prvStats.durationSec();
+ }
+ DefaultPortStatistics.Builder builder = DefaultPortStatistics.builder();
+ DefaultPortStatistics deltaStats = builder.setDeviceId(deviceId)
+ .setPort(newStats.port())
+ .setPacketsReceived(newStats.packetsReceived() - prvStats.packetsReceived())
+ .setPacketsSent(newStats.packetsSent() - prvStats.packetsSent())
+ .setBytesReceived(newStats.bytesReceived() - prvStats.bytesReceived())
+ .setBytesSent(newStats.bytesSent() - prvStats.bytesSent())
+ .setPacketsRxDropped(newStats.packetsRxDropped() - prvStats.packetsRxDropped())
+ .setPacketsTxDropped(newStats.packetsTxDropped() - prvStats.packetsTxDropped())
+ .setPacketsRxErrors(newStats.packetsRxErrors() - prvStats.packetsRxErrors())
+ .setPacketsTxErrors(newStats.packetsTxErrors() - prvStats.packetsTxErrors())
+ .setDurationSec(deltaStatsSec)
+ .setDurationNano(deltaStatsNano)
+ .build();
+ return deltaStats;
+ }
+
+ @Override
+ public List<PortStatistics> getPortStatistics(DeviceId deviceId) {
+ Map<PortNumber, PortStatistics> portStats = devicePortStats.get(deviceId);
+ if (portStats == null) {
+ return Collections.emptyList();
+ }
+ return ImmutableList.copyOf(portStats.values());
+ }
+
+ @Override
+ public List<PortStatistics> getPortDeltaStatistics(DeviceId deviceId) {
+ Map<PortNumber, PortStatistics> portStats = devicePortDeltaStats.get(deviceId);
+ if (portStats == null) {
+ return Collections.emptyList();
+ }
+ return ImmutableList.copyOf(portStats.values());
+ }
+
+ @Override
+ public Port getPort(DeviceId deviceId, PortNumber portNumber) {
+ Map<PortNumber, Port> ports = devicePorts.get(deviceId);
+ return ports == null ? null : ports.get(portNumber);
+ }
+
+ @Override
+ public boolean isAvailable(DeviceId deviceId) {
+ return availableDevices.contains(deviceId);
+ }
+
+ @Override
+ public synchronized DeviceEvent removeDevice(DeviceId deviceId) {
+ final NodeId myId = clusterService.getLocalNode().id();
+ NodeId master = mastershipService.getMasterFor(deviceId);
+
+ // if there exist a master, forward
+ // if there is no master, try to become one and process
+
+ boolean relinquishAtEnd = false;
+ if (master == null) {
+ final MastershipRole myRole = mastershipService.getLocalRole(deviceId);
+ if (myRole != MastershipRole.NONE) {
+ relinquishAtEnd = true;
+ }
+ log.debug("Temporarily requesting role for {} to remove", deviceId);
+ mastershipService.requestRoleFor(deviceId);
+ MastershipTerm term = termService.getMastershipTerm(deviceId);
+ if (term != null && myId.equals(term.master())) {
+ master = myId;
+ }
+ }
+
+ if (!myId.equals(master)) {
+ log.debug("{} has control of {}, forwarding remove request",
+ master, deviceId);
+
+ // TODO check unicast return value
+ clusterCommunicator.unicast(deviceId, DEVICE_REMOVE_REQ, SERIALIZER::encode, master);
+ /* error log:
+ log.error("Failed to forward {} remove request to {}", deviceId, master, e);
+ */
+
+ // event will be triggered after master processes it.
+ return null;
+ }
+
+ // I have control..
+
+ Timestamp timestamp = deviceClockService.getTimestamp(deviceId);
+ DeviceEvent event = removeDeviceInternal(deviceId, timestamp);
+ if (event != null) {
+ log.debug("Notifying peers of a device removed topology event for deviceId: {}",
+ deviceId);
+ notifyPeers(new InternalDeviceRemovedEvent(deviceId, timestamp));
+ }
+ if (relinquishAtEnd) {
+ log.debug("Relinquishing temporary role acquired for {}", deviceId);
+ mastershipService.relinquishMastership(deviceId);
+ }
+ return event;
+ }
+
+ private DeviceEvent removeDeviceInternal(DeviceId deviceId,
+ Timestamp timestamp) {
+
+ Map<ProviderId, DeviceDescriptions> descs = getOrCreateDeviceDescriptionsMap(deviceId);
+ synchronized (descs) {
+ // accept removal request if given timestamp is newer than
+ // the latest Timestamp from Primary provider
+ DeviceDescriptions primDescs = getPrimaryDescriptions(descs);
+ Timestamp lastTimestamp = primDescs.getLatestTimestamp();
+ if (timestamp.compareTo(lastTimestamp) <= 0) {
+ // outdated event ignore
+ return null;
+ }
+ removalRequest.put(deviceId, timestamp);
+
+ Device device = devices.remove(deviceId);
+ // should DEVICE_REMOVED carry removed ports?
+ Map<PortNumber, Port> ports = devicePorts.get(deviceId);
+ if (ports != null) {
+ ports.clear();
+ }
+ markOfflineInternal(deviceId, timestamp);
+ descs.clear();
+ return device == null ? null :
+ new DeviceEvent(DeviceEvent.Type.DEVICE_REMOVED, device, null);
+ }
+ }
+
+ /**
+ * Checks if given timestamp is superseded by removal request
+ * with more recent timestamp.
+ *
+ * @param deviceId identifier of a device
+ * @param timestampToCheck timestamp of an event to check
+ * @return true if device is already removed
+ */
+ private boolean isDeviceRemoved(DeviceId deviceId, Timestamp timestampToCheck) {
+ Timestamp removalTimestamp = removalRequest.get(deviceId);
+ if (removalTimestamp != null &&
+ removalTimestamp.compareTo(timestampToCheck) >= 0) {
+ // removalRequest is more recent
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Returns a Device, merging description given from multiple Providers.
+ *
+ * @param deviceId device identifier
+ * @param providerDescs Collection of Descriptions from multiple providers
+ * @return Device instance
+ */
+ private Device composeDevice(DeviceId deviceId,
+ Map<ProviderId, DeviceDescriptions> providerDescs) {
+
+ checkArgument(!providerDescs.isEmpty(), "No device descriptions supplied");
+
+ ProviderId primary = pickPrimaryPID(providerDescs);
+
+ DeviceDescriptions desc = providerDescs.get(primary);
+
+ final DeviceDescription base = desc.getDeviceDesc().value();
+ Type type = base.type();
+ String manufacturer = base.manufacturer();
+ String hwVersion = base.hwVersion();
+ String swVersion = base.swVersion();
+ String serialNumber = base.serialNumber();
+ ChassisId chassisId = base.chassisId();
+ DefaultAnnotations annotations = DefaultAnnotations.builder().build();
+ annotations = merge(annotations, base.annotations());
+
+ for (Entry<ProviderId, DeviceDescriptions> e : providerDescs.entrySet()) {
+ if (e.getKey().equals(primary)) {
+ continue;
+ }
+ // Note: should keep track of Description timestamp in the future
+ // and only merge conflicting keys when timestamp is newer.
+ // Currently assuming there will never be a key conflict between
+ // providers
+
+ // annotation merging. not so efficient, should revisit later
+ annotations = merge(annotations, e.getValue().getDeviceDesc().value().annotations());
+ }
+
+ return new DefaultDevice(primary, deviceId, type, manufacturer,
+ hwVersion, swVersion, serialNumber,
+ chassisId, annotations);
+ }
+
+ private Port buildTypedPort(Device device, PortNumber number, boolean isEnabled,
+ PortDescription description, Annotations annotations) {
+ switch (description.type()) {
+ case OMS:
+ OmsPortDescription omsDesc = (OmsPortDescription) description;
+ return new OmsPort(device, number, isEnabled, omsDesc.minFrequency(),
+ omsDesc.maxFrequency(), omsDesc.grid(), annotations);
+ case OCH:
+ OchPortDescription ochDesc = (OchPortDescription) description;
+ return new OchPort(device, number, isEnabled, ochDesc.signalType(),
+ ochDesc.isTunable(), ochDesc.lambda(), annotations);
+ case ODUCLT:
+ OduCltPortDescription oduDesc = (OduCltPortDescription) description;
+ return new OduCltPort(device, number, isEnabled, oduDesc.signalType(), annotations);
+ default:
+ return new DefaultPort(device, number, isEnabled, description.type(),
+ description.portSpeed(), annotations);
+ }
+ }
+
+ /**
+ * Returns a Port, merging description given from multiple Providers.
+ *
+ * @param device device the port is on
+ * @param number port number
+ * @param descsMap Collection of Descriptions from multiple providers
+ * @return Port instance
+ */
+ private Port composePort(Device device, PortNumber number,
+ Map<ProviderId, DeviceDescriptions> descsMap) {
+
+ ProviderId primary = pickPrimaryPID(descsMap);
+ DeviceDescriptions primDescs = descsMap.get(primary);
+ // if no primary, assume not enabled
+ boolean isEnabled = false;
+ DefaultAnnotations annotations = DefaultAnnotations.builder().build();
+ Timestamp newest = null;
+ final Timestamped<PortDescription> portDesc = primDescs.getPortDesc(number);
+ if (portDesc != null) {
+ isEnabled = portDesc.value().isEnabled();
+ annotations = merge(annotations, portDesc.value().annotations());
+ newest = portDesc.timestamp();
+ }
+ Port updated = null;
+ for (Entry<ProviderId, DeviceDescriptions> e : descsMap.entrySet()) {
+ if (e.getKey().equals(primary)) {
+ continue;
+ }
+ // Note: should keep track of Description timestamp in the future
+ // and only merge conflicting keys when timestamp is newer.
+ // Currently assuming there will never be a key conflict between
+ // providers
+
+ // annotation merging. not so efficient, should revisit later
+ final Timestamped<PortDescription> otherPortDesc = e.getValue().getPortDesc(number);
+ if (otherPortDesc != null) {
+ if (newest != null && newest.isNewerThan(otherPortDesc.timestamp())) {
+ continue;
+ }
+ annotations = merge(annotations, otherPortDesc.value().annotations());
+ PortDescription other = otherPortDesc.value();
+ updated = buildTypedPort(device, number, isEnabled, other, annotations);
+ newest = otherPortDesc.timestamp();
+ }
+ }
+ if (portDesc == null) {
+ return updated == null ? new DefaultPort(device, number, false, annotations) : updated;
+ }
+ PortDescription current = portDesc.value();
+ return updated == null
+ ? buildTypedPort(device, number, isEnabled, current, annotations)
+ : updated;
+ }
+
+ /**
+ * @return primary ProviderID, or randomly chosen one if none exists
+ */
+ private ProviderId pickPrimaryPID(
+ Map<ProviderId, DeviceDescriptions> providerDescs) {
+ ProviderId fallBackPrimary = null;
+ for (Entry<ProviderId, DeviceDescriptions> e : providerDescs.entrySet()) {
+ if (!e.getKey().isAncillary()) {
+ return e.getKey();
+ } else if (fallBackPrimary == null) {
+ // pick randomly as a fallback in case there is no primary
+ fallBackPrimary = e.getKey();
+ }
+ }
+ return fallBackPrimary;
+ }
+
+ private DeviceDescriptions getPrimaryDescriptions(
+ Map<ProviderId, DeviceDescriptions> providerDescs) {
+ ProviderId pid = pickPrimaryPID(providerDescs);
+ return providerDescs.get(pid);
+ }
+
+ private void unicastMessage(NodeId recipient, MessageSubject subject, Object event) throws IOException {
+ clusterCommunicator.unicast(event, subject, SERIALIZER::encode, recipient);
+ }
+
+ private void broadcastMessage(MessageSubject subject, Object event) {
+ clusterCommunicator.broadcast(event, subject, SERIALIZER::encode);
+ }
+
+ private void notifyPeers(InternalDeviceEvent event) {
+ broadcastMessage(GossipDeviceStoreMessageSubjects.DEVICE_UPDATE, event);
+ }
+
+ private void notifyPeers(InternalDeviceOfflineEvent event) {
+ broadcastMessage(GossipDeviceStoreMessageSubjects.DEVICE_OFFLINE, event);
+ }
+
+ private void notifyPeers(InternalDeviceRemovedEvent event) {
+ broadcastMessage(GossipDeviceStoreMessageSubjects.DEVICE_REMOVED, event);
+ }
+
+ private void notifyPeers(InternalPortEvent event) {
+ broadcastMessage(GossipDeviceStoreMessageSubjects.PORT_UPDATE, event);
+ }
+
+ private void notifyPeers(InternalPortStatusEvent event) {
+ broadcastMessage(GossipDeviceStoreMessageSubjects.PORT_STATUS_UPDATE, event);
+ }
+
+ private void notifyPeer(NodeId recipient, InternalDeviceEvent event) {
+ try {
+ unicastMessage(recipient, GossipDeviceStoreMessageSubjects.DEVICE_UPDATE, event);
+ } catch (IOException e) {
+ log.error("Failed to send" + event + " to " + recipient, e);
+ }
+ }
+
+ private void notifyPeer(NodeId recipient, InternalDeviceOfflineEvent event) {
+ try {
+ unicastMessage(recipient, GossipDeviceStoreMessageSubjects.DEVICE_OFFLINE, event);
+ } catch (IOException e) {
+ log.error("Failed to send" + event + " to " + recipient, e);
+ }
+ }
+
+ private void notifyPeer(NodeId recipient, InternalDeviceRemovedEvent event) {
+ try {
+ unicastMessage(recipient, GossipDeviceStoreMessageSubjects.DEVICE_REMOVED, event);
+ } catch (IOException e) {
+ log.error("Failed to send" + event + " to " + recipient, e);
+ }
+ }
+
+ private void notifyPeer(NodeId recipient, InternalPortEvent event) {
+ try {
+ unicastMessage(recipient, GossipDeviceStoreMessageSubjects.PORT_UPDATE, event);
+ } catch (IOException e) {
+ log.error("Failed to send" + event + " to " + recipient, e);
+ }
+ }
+
+ private void notifyPeer(NodeId recipient, InternalPortStatusEvent event) {
+ try {
+ unicastMessage(recipient, GossipDeviceStoreMessageSubjects.PORT_STATUS_UPDATE, event);
+ } catch (IOException e) {
+ log.error("Failed to send" + event + " to " + recipient, e);
+ }
+ }
+
+ private DeviceAntiEntropyAdvertisement createAdvertisement() {
+ final NodeId self = clusterService.getLocalNode().id();
+
+ final int numDevices = deviceDescs.size();
+ Map<DeviceFragmentId, Timestamp> adDevices = new HashMap<>(numDevices);
+ final int portsPerDevice = 8; // random factor to minimize reallocation
+ Map<PortFragmentId, Timestamp> adPorts = new HashMap<>(numDevices * portsPerDevice);
+ Map<DeviceId, Timestamp> adOffline = new HashMap<>(numDevices);
+
+ deviceDescs.forEach((deviceId, devDescs) -> {
+
+ // for each Device...
+ synchronized (devDescs) {
+
+ // send device offline timestamp
+ Timestamp lOffline = this.offline.get(deviceId);
+ if (lOffline != null) {
+ adOffline.put(deviceId, lOffline);
+ }
+
+ for (Entry<ProviderId, DeviceDescriptions>
+ prov : devDescs.entrySet()) {
+
+ // for each Provider Descriptions...
+ final ProviderId provId = prov.getKey();
+ final DeviceDescriptions descs = prov.getValue();
+
+ adDevices.put(new DeviceFragmentId(deviceId, provId),
+ descs.getDeviceDesc().timestamp());
+
+ for (Entry<PortNumber, Timestamped<PortDescription>>
+ portDesc : descs.getPortDescs().entrySet()) {
+
+ final PortNumber number = portDesc.getKey();
+ adPorts.put(new PortFragmentId(deviceId, provId, number),
+ portDesc.getValue().timestamp());
+ }
+ }
+ }
+ });
+
+ return new DeviceAntiEntropyAdvertisement(self, adDevices, adPorts, adOffline);
+ }
+
+ /**
+ * Responds to anti-entropy advertisement message.
+ * <p/>
+ * Notify sender about out-dated information using regular replication message.
+ * Send back advertisement to sender if not in sync.
+ *
+ * @param advertisement to respond to
+ */
+ private void handleAdvertisement(DeviceAntiEntropyAdvertisement advertisement) {
+
+ final NodeId sender = advertisement.sender();
+
+ Map<DeviceFragmentId, Timestamp> devAds = new HashMap<>(advertisement.deviceFingerPrints());
+ Map<PortFragmentId, Timestamp> portAds = new HashMap<>(advertisement.ports());
+ Map<DeviceId, Timestamp> offlineAds = new HashMap<>(advertisement.offline());
+
+ // Fragments to request
+ Collection<DeviceFragmentId> reqDevices = new ArrayList<>();
+ Collection<PortFragmentId> reqPorts = new ArrayList<>();
+
+ for (Entry<DeviceId, Map<ProviderId, DeviceDescriptions>> de : deviceDescs.entrySet()) {
+ final DeviceId deviceId = de.getKey();
+ final Map<ProviderId, DeviceDescriptions> lDevice = de.getValue();
+
+ synchronized (lDevice) {
+ // latestTimestamp across provider
+ // Note: can be null initially
+ Timestamp localLatest = offline.get(deviceId);
+
+ // handle device Ads
+ for (Entry<ProviderId, DeviceDescriptions> prov : lDevice.entrySet()) {
+ final ProviderId provId = prov.getKey();
+ final DeviceDescriptions lDeviceDescs = prov.getValue();
+
+ final DeviceFragmentId devFragId = new DeviceFragmentId(deviceId, provId);
+
+
+ Timestamped<DeviceDescription> lProvDevice = lDeviceDescs.getDeviceDesc();
+ Timestamp advDevTimestamp = devAds.get(devFragId);
+
+ if (advDevTimestamp == null || lProvDevice.isNewerThan(
+ advDevTimestamp)) {
+ // remote does not have it or outdated, suggest
+ notifyPeer(sender, new InternalDeviceEvent(provId, deviceId, lProvDevice));
+ } else if (!lProvDevice.timestamp().equals(advDevTimestamp)) {
+ // local is outdated, request
+ reqDevices.add(devFragId);
+ }
+
+ // handle port Ads
+ for (Entry<PortNumber, Timestamped<PortDescription>>
+ pe : lDeviceDescs.getPortDescs().entrySet()) {
+
+ final PortNumber num = pe.getKey();
+ final Timestamped<PortDescription> lPort = pe.getValue();
+
+ final PortFragmentId portFragId = new PortFragmentId(deviceId, provId, num);
+
+ Timestamp advPortTimestamp = portAds.get(portFragId);
+ if (advPortTimestamp == null || lPort.isNewerThan(
+ advPortTimestamp)) {
+ // remote does not have it or outdated, suggest
+ notifyPeer(sender, new InternalPortStatusEvent(provId, deviceId, lPort));
+ } else if (!lPort.timestamp().equals(advPortTimestamp)) {
+ // local is outdated, request
+ log.trace("need update {} < {}", lPort.timestamp(), advPortTimestamp);
+ reqPorts.add(portFragId);
+ }
+
+ // remove port Ad already processed
+ portAds.remove(portFragId);
+ } // end local port loop
+
+ // remove device Ad already processed
+ devAds.remove(devFragId);
+
+ // find latest and update
+ final Timestamp providerLatest = lDeviceDescs.getLatestTimestamp();
+ if (localLatest == null ||
+ providerLatest.compareTo(localLatest) > 0) {
+ localLatest = providerLatest;
+ }
+ } // end local provider loop
+
+ // checking if remote timestamp is more recent.
+ Timestamp rOffline = offlineAds.get(deviceId);
+ if (rOffline != null &&
+ rOffline.compareTo(localLatest) > 0) {
+ // remote offline timestamp suggests that the
+ // device is off-line
+ markOfflineInternal(deviceId, rOffline);
+ }
+
+ Timestamp lOffline = offline.get(deviceId);
+ if (lOffline != null && rOffline == null) {
+ // locally offline, but remote is online, suggest offline
+ notifyPeer(sender, new InternalDeviceOfflineEvent(deviceId, lOffline));
+ }
+
+ // remove device offline Ad already processed
+ offlineAds.remove(deviceId);
+ } // end local device loop
+ } // device lock
+
+ // If there is any Ads left, request them
+ log.trace("Ads left {}, {}", devAds, portAds);
+ reqDevices.addAll(devAds.keySet());
+ reqPorts.addAll(portAds.keySet());
+
+ if (reqDevices.isEmpty() && reqPorts.isEmpty()) {
+ log.trace("Nothing to request to remote peer {}", sender);
+ return;
+ }
+
+ log.debug("Need to sync {} {}", reqDevices, reqPorts);
+
+ // 2-way Anti-Entropy for now
+ try {
+ unicastMessage(sender, DEVICE_ADVERTISE, createAdvertisement());
+ } catch (IOException e) {
+ log.error("Failed to send response advertisement to " + sender, e);
+ }
+
+// Sketch of 3-way Anti-Entropy
+// DeviceAntiEntropyRequest request = new DeviceAntiEntropyRequest(self, reqDevices, reqPorts);
+// ClusterMessage message = new ClusterMessage(
+// clusterService.getLocalNode().id(),
+// GossipDeviceStoreMessageSubjects.DEVICE_REQUEST,
+// SERIALIZER.encode(request));
+//
+// try {
+// clusterCommunicator.unicast(message, advertisement.sender());
+// } catch (IOException e) {
+// log.error("Failed to send advertisement reply to "
+// + advertisement.sender(), e);
+// }
+ }
+
+ private void notifyDelegateIfNotNull(DeviceEvent event) {
+ if (event != null) {
+ notifyDelegate(event);
+ }
+ }
+
+ private final class SendAdvertisementTask implements Runnable {
+
+ @Override
+ public void run() {
+ if (Thread.currentThread().isInterrupted()) {
+ log.debug("Interrupted, quitting");
+ return;
+ }
+
+ try {
+ final NodeId self = clusterService.getLocalNode().id();
+ Set<ControllerNode> nodes = clusterService.getNodes();
+
+ ImmutableList<NodeId> nodeIds = FluentIterable.from(nodes)
+ .transform(toNodeId())
+ .toList();
+
+ if (nodeIds.size() == 1 && nodeIds.get(0).equals(self)) {
+ log.trace("No other peers in the cluster.");
+ return;
+ }
+
+ NodeId peer;
+ do {
+ int idx = RandomUtils.nextInt(0, nodeIds.size());
+ peer = nodeIds.get(idx);
+ } while (peer.equals(self));
+
+ DeviceAntiEntropyAdvertisement ad = createAdvertisement();
+
+ if (Thread.currentThread().isInterrupted()) {
+ log.debug("Interrupted, quitting");
+ return;
+ }
+
+ try {
+ unicastMessage(peer, DEVICE_ADVERTISE, ad);
+ } catch (IOException e) {
+ log.debug("Failed to send anti-entropy advertisement to {}", peer);
+ return;
+ }
+ } catch (Exception e) {
+ // catch all Exception to avoid Scheduled task being suppressed.
+ log.error("Exception thrown while sending advertisement", e);
+ }
+ }
+ }
+
+ private final class InternalDeviceEventListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+ log.debug("Received device update event from peer: {}", message.sender());
+ InternalDeviceEvent event = SERIALIZER.decode(message.payload());
+
+ ProviderId providerId = event.providerId();
+ DeviceId deviceId = event.deviceId();
+ Timestamped<DeviceDescription> deviceDescription = event.deviceDescription();
+
+ try {
+ notifyDelegateIfNotNull(createOrUpdateDeviceInternal(providerId, deviceId, deviceDescription));
+ } catch (Exception e) {
+ log.warn("Exception thrown handling device update", e);
+ }
+ }
+ }
+
+ private final class InternalDeviceOfflineEventListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+ log.debug("Received device offline event from peer: {}", message.sender());
+ InternalDeviceOfflineEvent event = SERIALIZER.decode(message.payload());
+
+ DeviceId deviceId = event.deviceId();
+ Timestamp timestamp = event.timestamp();
+
+ try {
+ notifyDelegateIfNotNull(markOfflineInternal(deviceId, timestamp));
+ } catch (Exception e) {
+ log.warn("Exception thrown handling device offline", e);
+ }
+ }
+ }
+
+ private final class InternalRemoveRequestListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+ log.debug("Received device remove request from peer: {}", message.sender());
+ DeviceId did = SERIALIZER.decode(message.payload());
+
+ try {
+ removeDevice(did);
+ } catch (Exception e) {
+ log.warn("Exception thrown handling device remove", e);
+ }
+ }
+ }
+
+ private final class InternalDeviceRemovedEventListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+ log.debug("Received device removed event from peer: {}", message.sender());
+ InternalDeviceRemovedEvent event = SERIALIZER.decode(message.payload());
+
+ DeviceId deviceId = event.deviceId();
+ Timestamp timestamp = event.timestamp();
+
+ try {
+ notifyDelegateIfNotNull(removeDeviceInternal(deviceId, timestamp));
+ } catch (Exception e) {
+ log.warn("Exception thrown handling device removed", e);
+ }
+ }
+ }
+
+ private final class InternalPortEventListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+
+ log.debug("Received port update event from peer: {}", message.sender());
+ InternalPortEvent event = SERIALIZER.decode(message.payload());
+
+ ProviderId providerId = event.providerId();
+ DeviceId deviceId = event.deviceId();
+ Timestamped<List<PortDescription>> portDescriptions = event.portDescriptions();
+
+ if (getDevice(deviceId) == null) {
+ log.debug("{} not found on this node yet, ignoring.", deviceId);
+ // Note: dropped information will be recovered by anti-entropy
+ return;
+ }
+
+ try {
+ notifyDelegate(updatePortsInternal(providerId, deviceId, portDescriptions));
+ } catch (Exception e) {
+ log.warn("Exception thrown handling port update", e);
+ }
+ }
+ }
+
+ private final class InternalPortStatusEventListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+
+ log.debug("Received port status update event from peer: {}", message.sender());
+ InternalPortStatusEvent event = SERIALIZER.decode(message.payload());
+
+ ProviderId providerId = event.providerId();
+ DeviceId deviceId = event.deviceId();
+ Timestamped<PortDescription> portDescription = event.portDescription();
+
+ if (getDevice(deviceId) == null) {
+ log.debug("{} not found on this node yet, ignoring.", deviceId);
+ // Note: dropped information will be recovered by anti-entropy
+ return;
+ }
+
+ try {
+ notifyDelegateIfNotNull(updatePortStatusInternal(providerId, deviceId, portDescription));
+ } catch (Exception e) {
+ log.warn("Exception thrown handling port update", e);
+ }
+ }
+ }
+
+ private final class InternalDeviceAdvertisementListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+ log.trace("Received Device Anti-Entropy advertisement from peer: {}", message.sender());
+ DeviceAntiEntropyAdvertisement advertisement = SERIALIZER.decode(message.payload());
+ try {
+ handleAdvertisement(advertisement);
+ } catch (Exception e) {
+ log.warn("Exception thrown handling Device advertisements.", e);
+ }
+ }
+ }
+
+ private final class DeviceInjectedEventListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+ log.debug("Received injected device event from peer: {}", message.sender());
+ DeviceInjectedEvent event = SERIALIZER.decode(message.payload());
+
+ ProviderId providerId = event.providerId();
+ DeviceId deviceId = event.deviceId();
+ DeviceDescription deviceDescription = event.deviceDescription();
+ if (!deviceClockService.isTimestampAvailable(deviceId)) {
+ // workaround for ONOS-1208
+ log.warn("Not ready to accept update. Dropping {}", deviceDescription);
+ return;
+ }
+
+ try {
+ createOrUpdateDevice(providerId, deviceId, deviceDescription);
+ } catch (Exception e) {
+ log.warn("Exception thrown handling device injected event.", e);
+ }
+ }
+ }
+
+ private final class PortInjectedEventListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+ log.debug("Received injected port event from peer: {}", message.sender());
+ PortInjectedEvent event = SERIALIZER.decode(message.payload());
+
+ ProviderId providerId = event.providerId();
+ DeviceId deviceId = event.deviceId();
+ List<PortDescription> portDescriptions = event.portDescriptions();
+ if (!deviceClockService.isTimestampAvailable(deviceId)) {
+ // workaround for ONOS-1208
+ log.warn("Not ready to accept update. Dropping {}", portDescriptions);
+ return;
+ }
+
+ try {
+ updatePorts(providerId, deviceId, portDescriptions);
+ } catch (Exception e) {
+ log.warn("Exception thrown handling port injected event.", e);
+ }
+ }
+ }
+
+ private class InternalPortStatsListener
+ implements EventuallyConsistentMapListener<DeviceId, Map<PortNumber, PortStatistics>> {
+ @Override
+ public void event(EventuallyConsistentMapEvent<DeviceId, Map<PortNumber, PortStatistics>> event) {
+ if (event.type() == PUT) {
+ Device device = devices.get(event.key());
+ if (device != null) {
+ delegate.notify(new DeviceEvent(PORT_STATS_UPDATED, device));
+ }
+ }
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/GossipDeviceStoreMessageSubjects.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/GossipDeviceStoreMessageSubjects.java
new file mode 100644
index 00000000..554faf91
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/GossipDeviceStoreMessageSubjects.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import org.onosproject.store.cluster.messaging.MessageSubject;
+
+/**
+ * MessageSubjects used by GossipDeviceStore peer-peer communication.
+ */
+public final class GossipDeviceStoreMessageSubjects {
+
+ private GossipDeviceStoreMessageSubjects() {}
+
+ public static final MessageSubject DEVICE_UPDATE = new MessageSubject("peer-device-update");
+ public static final MessageSubject DEVICE_OFFLINE = new MessageSubject("peer-device-offline");
+ public static final MessageSubject DEVICE_REMOVE_REQ = new MessageSubject("peer-device-remove-request");
+ public static final MessageSubject DEVICE_REMOVED = new MessageSubject("peer-device-removed");
+ public static final MessageSubject PORT_UPDATE = new MessageSubject("peer-port-update");
+ public static final MessageSubject PORT_STATUS_UPDATE = new MessageSubject("peer-port-status-update");
+
+ public static final MessageSubject DEVICE_ADVERTISE = new MessageSubject("peer-device-advertisements");
+ // to be used with 3-way anti-entropy process
+ public static final MessageSubject DEVICE_REQUEST = new MessageSubject("peer-device-request");
+
+ // Network elements injected (not discovered) by ConfigProvider
+ public static final MessageSubject DEVICE_INJECTED = new MessageSubject("peer-device-injected");
+ public static final MessageSubject PORT_INJECTED = new MessageSubject("peer-port-injected");
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceEvent.java
new file mode 100644
index 00000000..6916a3ed
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceEvent.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.device.DeviceDescription;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.impl.Timestamped;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Information published by GossipDeviceStore to notify peers of a device
+ * change event.
+ */
+public class InternalDeviceEvent {
+
+ private final ProviderId providerId;
+ private final DeviceId deviceId;
+ private final Timestamped<DeviceDescription> deviceDescription;
+
+ protected InternalDeviceEvent(
+ ProviderId providerId,
+ DeviceId deviceId,
+ Timestamped<DeviceDescription> deviceDescription) {
+ this.providerId = providerId;
+ this.deviceId = deviceId;
+ this.deviceDescription = deviceDescription;
+ }
+
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ public Timestamped<DeviceDescription> deviceDescription() {
+ return deviceDescription;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("deviceId", deviceId)
+ .add("deviceDescription", deviceDescription)
+ .toString();
+ }
+
+ // for serializer
+ protected InternalDeviceEvent() {
+ this.providerId = null;
+ this.deviceId = null;
+ this.deviceDescription = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceEventSerializer.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceEventSerializer.java
new file mode 100644
index 00000000..d5fbde7e
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceEventSerializer.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.device.DeviceDescription;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.impl.Timestamped;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+ * Kryo Serializer for {@link InternalDeviceEvent}.
+ */
+public class InternalDeviceEventSerializer extends Serializer<InternalDeviceEvent> {
+
+ /**
+ * Creates a serializer for {@link InternalDeviceEvent}.
+ */
+ public InternalDeviceEventSerializer() {
+ // does not accept null
+ super(false);
+ }
+
+ @Override
+ public void write(Kryo kryo, Output output, InternalDeviceEvent event) {
+ kryo.writeClassAndObject(output, event.providerId());
+ kryo.writeClassAndObject(output, event.deviceId());
+ kryo.writeClassAndObject(output, event.deviceDescription());
+ }
+
+ @Override
+ public InternalDeviceEvent read(Kryo kryo, Input input,
+ Class<InternalDeviceEvent> type) {
+ ProviderId providerId = (ProviderId) kryo.readClassAndObject(input);
+ DeviceId deviceId = (DeviceId) kryo.readClassAndObject(input);
+
+ @SuppressWarnings("unchecked")
+ Timestamped<DeviceDescription> deviceDescription
+ = (Timestamped<DeviceDescription>) kryo.readClassAndObject(input);
+
+ return new InternalDeviceEvent(providerId, deviceId, deviceDescription);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceOfflineEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceOfflineEvent.java
new file mode 100644
index 00000000..0546c139
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceOfflineEvent.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.store.Timestamp;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Information published by GossipDeviceStore to notify peers of a device
+ * going offline.
+ */
+public class InternalDeviceOfflineEvent {
+
+ private final DeviceId deviceId;
+ private final Timestamp timestamp;
+
+ /**
+ * Creates a InternalDeviceOfflineEvent.
+ * @param deviceId identifier of device going offline.
+ * @param timestamp timestamp of when the device went offline.
+ */
+ public InternalDeviceOfflineEvent(DeviceId deviceId, Timestamp timestamp) {
+ this.deviceId = deviceId;
+ this.timestamp = timestamp;
+ }
+
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ public Timestamp timestamp() {
+ return timestamp;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("deviceId", deviceId)
+ .add("timestamp", timestamp)
+ .toString();
+ }
+
+ // for serializer
+ @SuppressWarnings("unused")
+ private InternalDeviceOfflineEvent() {
+ deviceId = null;
+ timestamp = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceOfflineEventSerializer.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceOfflineEventSerializer.java
new file mode 100644
index 00000000..7f3c7bcf
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceOfflineEventSerializer.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.store.Timestamp;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+ * Kryo Serializer for {@link InternalDeviceOfflineEvent}.
+ */
+public class InternalDeviceOfflineEventSerializer extends Serializer<InternalDeviceOfflineEvent> {
+
+ /**
+ * Creates a serializer for {@link InternalDeviceOfflineEvent}.
+ */
+ public InternalDeviceOfflineEventSerializer() {
+ // does not accept null
+ super(false);
+ }
+
+ @Override
+ public void write(Kryo kryo, Output output, InternalDeviceOfflineEvent event) {
+ kryo.writeClassAndObject(output, event.deviceId());
+ kryo.writeClassAndObject(output, event.timestamp());
+ }
+
+ @Override
+ public InternalDeviceOfflineEvent read(Kryo kryo, Input input,
+ Class<InternalDeviceOfflineEvent> type) {
+ DeviceId deviceId = (DeviceId) kryo.readClassAndObject(input);
+ Timestamp timestamp = (Timestamp) kryo.readClassAndObject(input);
+
+ return new InternalDeviceOfflineEvent(deviceId, timestamp);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceRemovedEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceRemovedEvent.java
new file mode 100644
index 00000000..e9f4f06a
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalDeviceRemovedEvent.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.store.Timestamp;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Information published by GossipDeviceStore to notify peers of a device
+ * being administratively removed.
+ */
+public class InternalDeviceRemovedEvent {
+
+ private final DeviceId deviceId;
+ private final Timestamp timestamp;
+
+ /**
+ * Creates a InternalDeviceRemovedEvent.
+ * @param deviceId identifier of the removed device.
+ * @param timestamp timestamp of when the device was administratively removed.
+ */
+ public InternalDeviceRemovedEvent(DeviceId deviceId, Timestamp timestamp) {
+ this.deviceId = deviceId;
+ this.timestamp = timestamp;
+ }
+
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ public Timestamp timestamp() {
+ return timestamp;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("deviceId", deviceId)
+ .add("timestamp", timestamp)
+ .toString();
+ }
+
+ // for serializer
+ @SuppressWarnings("unused")
+ private InternalDeviceRemovedEvent() {
+ deviceId = null;
+ timestamp = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortEvent.java
new file mode 100644
index 00000000..f92fb115
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortEvent.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import java.util.List;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.device.PortDescription;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.impl.Timestamped;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Information published by GossipDeviceStore to notify peers of a port
+ * change event.
+ */
+public class InternalPortEvent {
+
+ private final ProviderId providerId;
+ private final DeviceId deviceId;
+ private final Timestamped<List<PortDescription>> portDescriptions;
+
+ protected InternalPortEvent(
+ ProviderId providerId,
+ DeviceId deviceId,
+ Timestamped<List<PortDescription>> portDescriptions) {
+ this.providerId = providerId;
+ this.deviceId = deviceId;
+ this.portDescriptions = portDescriptions;
+ }
+
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ public Timestamped<List<PortDescription>> portDescriptions() {
+ return portDescriptions;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("deviceId", deviceId)
+ .add("portDescriptions", portDescriptions)
+ .toString();
+ }
+
+ // for serializer
+ protected InternalPortEvent() {
+ this.providerId = null;
+ this.deviceId = null;
+ this.portDescriptions = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortEventSerializer.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortEventSerializer.java
new file mode 100644
index 00000000..0acd703f
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortEventSerializer.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import java.util.List;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.device.PortDescription;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.impl.Timestamped;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+ * Kryo Serializer for {@link InternalPortEvent}.
+ */
+public class InternalPortEventSerializer extends Serializer<InternalPortEvent> {
+
+ /**
+ * Creates a serializer for {@link InternalPortEvent}.
+ */
+ public InternalPortEventSerializer() {
+ // does not accept null
+ super(false);
+ }
+
+ @Override
+ public void write(Kryo kryo, Output output, InternalPortEvent event) {
+ kryo.writeClassAndObject(output, event.providerId());
+ kryo.writeClassAndObject(output, event.deviceId());
+ kryo.writeClassAndObject(output, event.portDescriptions());
+ }
+
+ @Override
+ public InternalPortEvent read(Kryo kryo, Input input,
+ Class<InternalPortEvent> type) {
+ ProviderId providerId = (ProviderId) kryo.readClassAndObject(input);
+ DeviceId deviceId = (DeviceId) kryo.readClassAndObject(input);
+
+ @SuppressWarnings("unchecked")
+ Timestamped<List<PortDescription>> portDescriptions
+ = (Timestamped<List<PortDescription>>) kryo.readClassAndObject(input);
+
+ return new InternalPortEvent(providerId, deviceId, portDescriptions);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortStatusEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortStatusEvent.java
new file mode 100644
index 00000000..f1781693
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortStatusEvent.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.device.PortDescription;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.impl.Timestamped;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Information published by GossipDeviceStore to notify peers of a port
+ * status change event.
+ */
+public class InternalPortStatusEvent {
+
+ private final ProviderId providerId;
+ private final DeviceId deviceId;
+ private final Timestamped<PortDescription> portDescription;
+
+ protected InternalPortStatusEvent(
+ ProviderId providerId,
+ DeviceId deviceId,
+ Timestamped<PortDescription> portDescription) {
+ this.providerId = providerId;
+ this.deviceId = deviceId;
+ this.portDescription = portDescription;
+ }
+
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ public Timestamped<PortDescription> portDescription() {
+ return portDescription;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("deviceId", deviceId)
+ .add("portDescription", portDescription)
+ .toString();
+ }
+
+ // for serializer
+ protected InternalPortStatusEvent() {
+ this.providerId = null;
+ this.deviceId = null;
+ this.portDescription = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortStatusEventSerializer.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortStatusEventSerializer.java
new file mode 100644
index 00000000..32ee3915
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/InternalPortStatusEventSerializer.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.device.PortDescription;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.impl.Timestamped;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+ * Kryo Serializer for {@link InternalPortStatusEvent}.
+ */
+public class InternalPortStatusEventSerializer extends Serializer<InternalPortStatusEvent> {
+
+ /**
+ * Creates a serializer for {@link InternalPortStatusEvent}.
+ */
+ public InternalPortStatusEventSerializer() {
+ // does not accept null
+ super(false);
+ }
+
+ @Override
+ public void write(Kryo kryo, Output output, InternalPortStatusEvent event) {
+ kryo.writeClassAndObject(output, event.providerId());
+ kryo.writeClassAndObject(output, event.deviceId());
+ kryo.writeClassAndObject(output, event.portDescription());
+ }
+
+ @Override
+ public InternalPortStatusEvent read(Kryo kryo, Input input,
+ Class<InternalPortStatusEvent> type) {
+ ProviderId providerId = (ProviderId) kryo.readClassAndObject(input);
+ DeviceId deviceId = (DeviceId) kryo.readClassAndObject(input);
+ @SuppressWarnings("unchecked")
+ Timestamped<PortDescription> portDescription = (Timestamped<PortDescription>) kryo.readClassAndObject(input);
+
+ return new InternalPortStatusEvent(providerId, deviceId, portDescription);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortFragmentId.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortFragmentId.java
new file mode 100644
index 00000000..ed0ccaa1
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortFragmentId.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import java.util.Objects;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.PortNumber;
+import org.onosproject.net.provider.ProviderId;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Identifier for PortDescription from a Provider.
+ */
+public final class PortFragmentId {
+ public final ProviderId providerId;
+ public final DeviceId deviceId;
+ public final PortNumber portNumber;
+
+ public PortFragmentId(DeviceId deviceId, ProviderId providerId,
+ PortNumber portNumber) {
+ this.providerId = providerId;
+ this.deviceId = deviceId;
+ this.portNumber = portNumber;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(providerId, deviceId, portNumber);
+ };
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof PortFragmentId)) {
+ return false;
+ }
+ PortFragmentId that = (PortFragmentId) obj;
+ return Objects.equals(this.deviceId, that.deviceId) &&
+ Objects.equals(this.portNumber, that.portNumber) &&
+ Objects.equals(this.providerId, that.providerId);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("deviceId", deviceId)
+ .add("portNumber", portNumber)
+ .toString();
+ }
+
+ // for serializer
+ @SuppressWarnings("unused")
+ private PortFragmentId() {
+ this.providerId = null;
+ this.deviceId = null;
+ this.portNumber = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortInjectedEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortInjectedEvent.java
new file mode 100644
index 00000000..c80f8105
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortInjectedEvent.java
@@ -0,0 +1,50 @@
+package org.onosproject.store.device.impl;
+
+import com.google.common.base.MoreObjects;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.device.PortDescription;
+import org.onosproject.net.provider.ProviderId;
+
+import java.util.List;
+
+public class PortInjectedEvent {
+
+ private ProviderId providerId;
+ private DeviceId deviceId;
+ private List<PortDescription> portDescriptions;
+
+ protected PortInjectedEvent(ProviderId providerId, DeviceId deviceId, List<PortDescription> portDescriptions) {
+ this.providerId = providerId;
+ this.deviceId = deviceId;
+ this.portDescriptions = portDescriptions;
+ }
+
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ public List<PortDescription> portDescriptions() {
+ return portDescriptions;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("deviceId", deviceId)
+ .add("portDescriptions", portDescriptions)
+ .toString();
+ }
+
+ // for serializer
+ protected PortInjectedEvent() {
+ this.providerId = null;
+ this.deviceId = null;
+ this.portDescriptions = null;
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortKey.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortKey.java
new file mode 100644
index 00000000..62b09952
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/PortKey.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.device.impl;
+
+import java.util.Objects;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.PortNumber;
+import org.onosproject.net.provider.ProviderId;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Key for PortDescriptions in ECDeviceStore.
+ */
+public class PortKey {
+ private final ProviderId providerId;
+ private final DeviceId deviceId;
+ private final PortNumber portNumber;
+
+ public PortKey(ProviderId providerId, DeviceId deviceId, PortNumber portNumber) {
+ this.providerId = providerId;
+ this.deviceId = deviceId;
+ this.portNumber = portNumber;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ public PortNumber portNumber() {
+ return portNumber;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(providerId, deviceId, portNumber);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof PortKey)) {
+ return false;
+ }
+ PortKey that = (PortKey) obj;
+ return Objects.equals(this.deviceId, that.deviceId) &&
+ Objects.equals(this.providerId, that.providerId) &&
+ Objects.equals(this.portNumber, that.portNumber);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("deviceId", deviceId)
+ .add("portNumber", portNumber)
+ .toString();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/package-info.java
new file mode 100644
index 00000000..29df62ec
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/device/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of distributed device store using p2p synchronization protocol.
+ */
+package org.onosproject.store.device.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/AntiEntropyAdvertisement.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/AntiEntropyAdvertisement.java
new file mode 100644
index 00000000..d783fe22
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/AntiEntropyAdvertisement.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.ecmap;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableMap;
+
+import org.onosproject.cluster.NodeId;
+
+import java.util.Map;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Anti-entropy advertisement message for eventually consistent map.
+ */
+public class AntiEntropyAdvertisement<K> {
+
+ private final NodeId sender;
+ private final Map<K, MapValue.Digest> digest;
+
+ /**
+ * Creates a new anti entropy advertisement message.
+ *
+ * @param sender the sender's node ID
+ * @param digest for map entries
+ */
+ public AntiEntropyAdvertisement(NodeId sender,
+ Map<K, MapValue.Digest> digest) {
+ this.sender = checkNotNull(sender);
+ this.digest = ImmutableMap.copyOf(checkNotNull(digest));
+ }
+
+ /**
+ * Returns the sender's node ID.
+ *
+ * @return the sender's node ID
+ */
+ public NodeId sender() {
+ return sender;
+ }
+
+ /**
+ * Returns the digest for map entries.
+ *
+ * @return mapping from key to associated digest
+ */
+ public Map<K, MapValue.Digest> digest() {
+ return digest;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("sender", sender)
+ .add("totalEntries", digest.size())
+ .toString();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/EventuallyConsistentMapBuilderImpl.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/EventuallyConsistentMapBuilderImpl.java
new file mode 100644
index 00000000..a553ffff
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/EventuallyConsistentMapBuilderImpl.java
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.ecmap;
+
+import org.onlab.util.KryoNamespace;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.store.Timestamp;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapBuilder;
+
+import java.util.Collection;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiFunction;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Eventually consistent map builder.
+ */
+public class EventuallyConsistentMapBuilderImpl<K, V>
+ implements EventuallyConsistentMapBuilder<K, V> {
+ private final ClusterService clusterService;
+ private final ClusterCommunicationService clusterCommunicator;
+
+ private String name;
+ private KryoNamespace.Builder serializerBuilder;
+ private ExecutorService eventExecutor;
+ private ExecutorService communicationExecutor;
+ private ScheduledExecutorService backgroundExecutor;
+ private BiFunction<K, V, Timestamp> timestampProvider;
+ private BiFunction<K, V, Collection<NodeId>> peerUpdateFunction;
+ private boolean tombstonesDisabled = false;
+ private long antiEntropyPeriod = 5;
+ private TimeUnit antiEntropyTimeUnit = TimeUnit.SECONDS;
+ private boolean convergeFaster = false;
+ private boolean persistent = false;
+
+ /**
+ * Creates a new eventually consistent map builder.
+ *
+ * @param clusterService cluster service
+ * @param clusterCommunicator cluster communication service
+ */
+ public EventuallyConsistentMapBuilderImpl(ClusterService clusterService,
+ ClusterCommunicationService clusterCommunicator) {
+ this.clusterService = checkNotNull(clusterService);
+ this.clusterCommunicator = checkNotNull(clusterCommunicator);
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withName(String name) {
+ this.name = checkNotNull(name);
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withSerializer(
+ KryoNamespace.Builder serializerBuilder) {
+ this.serializerBuilder = checkNotNull(serializerBuilder);
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withTimestampProvider(
+ BiFunction<K, V, Timestamp> timestampProvider) {
+ this.timestampProvider = checkNotNull(timestampProvider);
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withEventExecutor(ExecutorService executor) {
+ this.eventExecutor = checkNotNull(executor);
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withCommunicationExecutor(
+ ExecutorService executor) {
+ communicationExecutor = checkNotNull(executor);
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withBackgroundExecutor(ScheduledExecutorService executor) {
+ this.backgroundExecutor = checkNotNull(executor);
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withPeerUpdateFunction(
+ BiFunction<K, V, Collection<NodeId>> peerUpdateFunction) {
+ this.peerUpdateFunction = checkNotNull(peerUpdateFunction);
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withTombstonesDisabled() {
+ tombstonesDisabled = true;
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withAntiEntropyPeriod(long period, TimeUnit unit) {
+ checkArgument(period > 0, "anti-entropy period must be greater than 0");
+ antiEntropyPeriod = period;
+ antiEntropyTimeUnit = checkNotNull(unit);
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withFasterConvergence() {
+ convergeFaster = true;
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMapBuilder<K, V> withPersistence() {
+ persistent = true;
+ return this;
+ }
+
+ @Override
+ public EventuallyConsistentMap<K, V> build() {
+ checkNotNull(name, "name is a mandatory parameter");
+ checkNotNull(serializerBuilder, "serializerBuilder is a mandatory parameter");
+ checkNotNull(timestampProvider, "timestampProvider is a mandatory parameter");
+
+ return new EventuallyConsistentMapImpl<>(name,
+ clusterService,
+ clusterCommunicator,
+ serializerBuilder,
+ timestampProvider,
+ peerUpdateFunction,
+ eventExecutor,
+ communicationExecutor,
+ backgroundExecutor,
+ tombstonesDisabled,
+ antiEntropyPeriod,
+ antiEntropyTimeUnit,
+ convergeFaster,
+ persistent);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/EventuallyConsistentMapImpl.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/EventuallyConsistentMapImpl.java
new file mode 100644
index 00000000..2859b62f
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/EventuallyConsistentMapImpl.java
@@ -0,0 +1,678 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.ecmap;
+
+import com.google.common.collect.Collections2;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.onlab.util.AbstractAccumulator;
+import org.onlab.util.KryoNamespace;
+import org.onlab.util.SlidingWindowCounter;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.store.Timestamp;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+import org.onosproject.store.impl.LogicalTimestamp;
+import org.onosproject.store.service.WallClockTimestamp;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapEvent;
+import org.onosproject.store.service.EventuallyConsistentMapListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.PUT;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.REMOVE;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Timer;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiFunction;
+import java.util.stream.Collectors;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor;
+import static org.onlab.util.BoundedThreadPool.newFixedThreadPool;
+import static org.onlab.util.Tools.groupedThreads;
+
+/**
+ * Distributed Map implementation which uses optimistic replication and gossip
+ * based techniques to provide an eventually consistent data store.
+ */
+public class EventuallyConsistentMapImpl<K, V>
+ implements EventuallyConsistentMap<K, V> {
+
+ private static final Logger log = LoggerFactory.getLogger(EventuallyConsistentMapImpl.class);
+
+ private final Map<K, MapValue<V>> items;
+
+ private final ClusterService clusterService;
+ private final ClusterCommunicationService clusterCommunicator;
+ private final KryoSerializer serializer;
+ private final NodeId localNodeId;
+
+ private final BiFunction<K, V, Timestamp> timestampProvider;
+
+ private final MessageSubject updateMessageSubject;
+ private final MessageSubject antiEntropyAdvertisementSubject;
+
+ private final Set<EventuallyConsistentMapListener<K, V>> listeners
+ = Sets.newCopyOnWriteArraySet();
+
+ private final ExecutorService executor;
+ private final ScheduledExecutorService backgroundExecutor;
+ private final BiFunction<K, V, Collection<NodeId>> peerUpdateFunction;
+
+ private final ExecutorService communicationExecutor;
+ private final Map<NodeId, EventAccumulator> senderPending;
+
+ private final String mapName;
+
+ private volatile boolean destroyed = false;
+ private static final String ERROR_DESTROYED = " map is already destroyed";
+ private final String destroyedMessage;
+
+ private static final String ERROR_NULL_KEY = "Key cannot be null";
+ private static final String ERROR_NULL_VALUE = "Null values are not allowed";
+
+ private final long initialDelaySec = 5;
+ private final boolean lightweightAntiEntropy;
+ private final boolean tombstonesDisabled;
+
+ private static final int WINDOW_SIZE = 5;
+ private static final int HIGH_LOAD_THRESHOLD = 0;
+ private static final int LOAD_WINDOW = 2;
+ private SlidingWindowCounter counter = new SlidingWindowCounter(WINDOW_SIZE);
+
+ private final boolean persistent;
+ private final PersistentStore<K, V> persistentStore;
+
+ /**
+ * Creates a new eventually consistent map shared amongst multiple instances.
+ * <p>
+ * See {@link org.onosproject.store.service.EventuallyConsistentMapBuilder}
+ * for more description of the parameters expected by the map.
+ * </p>
+ *
+ * @param mapName a String identifier for the map.
+ * @param clusterService the cluster service
+ * @param clusterCommunicator the cluster communications service
+ * @param serializerBuilder a Kryo namespace builder that can serialize
+ * both K and V
+ * @param timestampProvider provider of timestamps for K and V
+ * @param peerUpdateFunction function that provides a set of nodes to immediately
+ * update to when there writes to the map
+ * @param eventExecutor executor to use for processing incoming
+ * events from peers
+ * @param communicationExecutor executor to use for sending events to peers
+ * @param backgroundExecutor executor to use for background anti-entropy
+ * tasks
+ * @param tombstonesDisabled true if this map should not maintain
+ * tombstones
+ * @param antiEntropyPeriod period that the anti-entropy task should run
+ * @param antiEntropyTimeUnit time unit for anti-entropy period
+ * @param convergeFaster make anti-entropy try to converge faster
+ * @param persistent persist data to disk
+ */
+ EventuallyConsistentMapImpl(String mapName,
+ ClusterService clusterService,
+ ClusterCommunicationService clusterCommunicator,
+ KryoNamespace.Builder serializerBuilder,
+ BiFunction<K, V, Timestamp> timestampProvider,
+ BiFunction<K, V, Collection<NodeId>> peerUpdateFunction,
+ ExecutorService eventExecutor,
+ ExecutorService communicationExecutor,
+ ScheduledExecutorService backgroundExecutor,
+ boolean tombstonesDisabled,
+ long antiEntropyPeriod,
+ TimeUnit antiEntropyTimeUnit,
+ boolean convergeFaster,
+ boolean persistent) {
+ this.mapName = mapName;
+ items = Maps.newConcurrentMap();
+ senderPending = Maps.newConcurrentMap();
+ destroyedMessage = mapName + ERROR_DESTROYED;
+
+ this.clusterService = clusterService;
+ this.clusterCommunicator = clusterCommunicator;
+ this.localNodeId = clusterService.getLocalNode().id();
+
+ this.serializer = createSerializer(serializerBuilder);
+
+ this.timestampProvider = timestampProvider;
+
+ if (peerUpdateFunction != null) {
+ this.peerUpdateFunction = peerUpdateFunction;
+ } else {
+ this.peerUpdateFunction = (key, value) -> clusterService.getNodes().stream()
+ .map(ControllerNode::id)
+ .filter(nodeId -> !nodeId.equals(localNodeId))
+ .collect(Collectors.toList());
+ }
+
+ if (eventExecutor != null) {
+ this.executor = eventExecutor;
+ } else {
+ // should be a normal executor; it's used for receiving messages
+ this.executor =
+ Executors.newFixedThreadPool(8, groupedThreads("onos/ecm", mapName + "-fg-%d"));
+ }
+
+ if (communicationExecutor != null) {
+ this.communicationExecutor = communicationExecutor;
+ } else {
+ // sending executor; should be capped
+ //TODO this probably doesn't need to be bounded anymore
+ this.communicationExecutor =
+ newFixedThreadPool(8, groupedThreads("onos/ecm", mapName + "-publish-%d"));
+ }
+
+ this.persistent = persistent;
+
+ if (this.persistent) {
+ String dataDirectory = System.getProperty("karaf.data", "./data");
+ String filename = dataDirectory + "/" + "mapdb-ecm-" + mapName;
+
+ ExecutorService dbExecutor =
+ newFixedThreadPool(1, groupedThreads("onos/ecm", mapName + "-dbwriter"));
+
+ persistentStore = new MapDbPersistentStore<>(filename, dbExecutor, serializer);
+ persistentStore.readInto(items);
+ } else {
+ this.persistentStore = null;
+ }
+
+ if (backgroundExecutor != null) {
+ this.backgroundExecutor = backgroundExecutor;
+ } else {
+ this.backgroundExecutor =
+ newSingleThreadScheduledExecutor(groupedThreads("onos/ecm", mapName + "-bg-%d"));
+ }
+
+ // start anti-entropy thread
+ this.backgroundExecutor.scheduleAtFixedRate(this::sendAdvertisement,
+ initialDelaySec, antiEntropyPeriod,
+ antiEntropyTimeUnit);
+
+ updateMessageSubject = new MessageSubject("ecm-" + mapName + "-update");
+ clusterCommunicator.addSubscriber(updateMessageSubject,
+ serializer::decode,
+ this::processUpdates,
+ this.executor);
+
+ antiEntropyAdvertisementSubject = new MessageSubject("ecm-" + mapName + "-anti-entropy");
+ clusterCommunicator.addSubscriber(antiEntropyAdvertisementSubject,
+ serializer::decode,
+ this::handleAntiEntropyAdvertisement,
+ this.backgroundExecutor);
+
+ this.tombstonesDisabled = tombstonesDisabled;
+ this.lightweightAntiEntropy = !convergeFaster;
+ }
+
+ private KryoSerializer createSerializer(KryoNamespace.Builder builder) {
+ return new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ // Add the map's internal helper classes to the user-supplied serializer
+ serializerPool = builder
+ .register(KryoNamespaces.BASIC)
+ .nextId(KryoNamespaces.BEGIN_USER_CUSTOM_ID)
+ .register(LogicalTimestamp.class)
+ .register(WallClockTimestamp.class)
+ .register(AntiEntropyAdvertisement.class)
+ .register(UpdateEntry.class)
+ .register(MapValue.class)
+ .register(MapValue.Digest.class)
+ .build();
+ }
+ };
+ }
+
+ @Override
+ public int size() {
+ checkState(!destroyed, destroyedMessage);
+ // TODO: Maintain a separate counter for tracking live elements in map.
+ return Maps.filterValues(items, MapValue::isAlive).size();
+ }
+
+ @Override
+ public boolean isEmpty() {
+ checkState(!destroyed, destroyedMessage);
+ return size() == 0;
+ }
+
+ @Override
+ public boolean containsKey(K key) {
+ checkState(!destroyed, destroyedMessage);
+ checkNotNull(key, ERROR_NULL_KEY);
+ return get(key) != null;
+ }
+
+ @Override
+ public boolean containsValue(V value) {
+ checkState(!destroyed, destroyedMessage);
+ checkNotNull(value, ERROR_NULL_VALUE);
+ return items.values()
+ .stream()
+ .filter(MapValue::isAlive)
+ .anyMatch(v -> value.equals(v.get()));
+ }
+
+ @Override
+ public V get(K key) {
+ checkState(!destroyed, destroyedMessage);
+ checkNotNull(key, ERROR_NULL_KEY);
+
+ MapValue<V> value = items.get(key);
+ return (value == null || value.isTombstone()) ? null : value.get();
+ }
+
+ @Override
+ public void put(K key, V value) {
+ checkState(!destroyed, destroyedMessage);
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(value, ERROR_NULL_VALUE);
+
+ MapValue<V> newValue = new MapValue<>(value, timestampProvider.apply(key, value));
+ if (putInternal(key, newValue)) {
+ notifyPeers(new UpdateEntry<>(key, newValue), peerUpdateFunction.apply(key, value));
+ notifyListeners(new EventuallyConsistentMapEvent<>(mapName, PUT, key, value));
+ }
+ }
+
+ @Override
+ public V remove(K key) {
+ checkState(!destroyed, destroyedMessage);
+ checkNotNull(key, ERROR_NULL_KEY);
+ return removeAndNotify(key, null);
+ }
+
+ @Override
+ public void remove(K key, V value) {
+ checkState(!destroyed, destroyedMessage);
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(value, ERROR_NULL_VALUE);
+ removeAndNotify(key, value);
+ }
+
+ private V removeAndNotify(K key, V value) {
+ Timestamp timestamp = timestampProvider.apply(key, value);
+ Optional<MapValue<V>> tombstone = tombstonesDisabled || timestamp == null
+ ? Optional.empty() : Optional.of(MapValue.tombstone(timestamp));
+ MapValue<V> previousValue = removeInternal(key, Optional.ofNullable(value), tombstone);
+ if (previousValue != null) {
+ notifyPeers(new UpdateEntry<>(key, tombstone.orElse(null)),
+ peerUpdateFunction.apply(key, previousValue.get()));
+ if (previousValue.isAlive()) {
+ notifyListeners(new EventuallyConsistentMapEvent<>(mapName, REMOVE, key, previousValue.get()));
+ }
+ }
+ return previousValue != null ? previousValue.get() : null;
+ }
+
+ private MapValue<V> removeInternal(K key, Optional<V> value, Optional<MapValue<V>> tombstone) {
+ checkState(!destroyed, destroyedMessage);
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(value, ERROR_NULL_VALUE);
+ tombstone.ifPresent(v -> checkState(v.isTombstone()));
+
+ counter.incrementCount();
+ AtomicBoolean updated = new AtomicBoolean(false);
+ AtomicReference<MapValue<V>> previousValue = new AtomicReference<>();
+ items.compute(key, (k, existing) -> {
+ boolean valueMatches = true;
+ if (value.isPresent() && existing != null && existing.isAlive()) {
+ valueMatches = Objects.equals(value.get(), existing.get());
+ }
+ if (existing == null) {
+ log.debug("ECMap Remove: Existing value for key {} is already null", k);
+ }
+ if (valueMatches) {
+ if (existing == null) {
+ updated.set(tombstone.isPresent());
+ } else {
+ updated.set(!tombstone.isPresent() || tombstone.get().isNewerThan(existing));
+ }
+ }
+ if (updated.get()) {
+ previousValue.set(existing);
+ return tombstone.orElse(null);
+ } else {
+ return existing;
+ }
+ });
+ if (updated.get()) {
+ if (persistent) {
+ if (tombstone.isPresent()) {
+ persistentStore.update(key, tombstone.get());
+ } else {
+ persistentStore.remove(key);
+ }
+ }
+ }
+ return previousValue.get();
+ }
+
+ @Override
+ public V compute(K key, BiFunction<K, V, V> recomputeFunction) {
+ checkState(!destroyed, destroyedMessage);
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(recomputeFunction, "Recompute function cannot be null");
+
+ AtomicBoolean updated = new AtomicBoolean(false);
+ AtomicReference<MapValue<V>> previousValue = new AtomicReference<>();
+ MapValue<V> computedValue = items.compute(key, (k, mv) -> {
+ previousValue.set(mv);
+ V newRawValue = recomputeFunction.apply(key, mv == null ? null : mv.get());
+ MapValue<V> newValue = new MapValue<>(newRawValue, timestampProvider.apply(key, newRawValue));
+ if (mv == null || newValue.isNewerThan(mv)) {
+ updated.set(true);
+ return newValue;
+ } else {
+ return mv;
+ }
+ });
+ if (updated.get()) {
+ notifyPeers(new UpdateEntry<>(key, computedValue), peerUpdateFunction.apply(key, computedValue.get()));
+ EventuallyConsistentMapEvent.Type updateType = computedValue.isTombstone() ? REMOVE : PUT;
+ V value = computedValue.isTombstone()
+ ? previousValue.get() == null ? null : previousValue.get().get()
+ : computedValue.get();
+ if (value != null) {
+ notifyListeners(new EventuallyConsistentMapEvent<>(mapName, updateType, key, value));
+ }
+ }
+ return computedValue.get();
+ }
+
+ @Override
+ public void putAll(Map<? extends K, ? extends V> m) {
+ checkState(!destroyed, destroyedMessage);
+ m.forEach(this::put);
+ }
+
+ @Override
+ public void clear() {
+ checkState(!destroyed, destroyedMessage);
+ Maps.filterValues(items, MapValue::isAlive)
+ .forEach((k, v) -> remove(k));
+ }
+
+ @Override
+ public Set<K> keySet() {
+ checkState(!destroyed, destroyedMessage);
+ return Maps.filterValues(items, MapValue::isAlive)
+ .keySet();
+ }
+
+ @Override
+ public Collection<V> values() {
+ checkState(!destroyed, destroyedMessage);
+ return Collections2.transform(Maps.filterValues(items, MapValue::isAlive).values(), MapValue::get);
+ }
+
+ @Override
+ public Set<Map.Entry<K, V>> entrySet() {
+ checkState(!destroyed, destroyedMessage);
+ return Maps.filterValues(items, MapValue::isAlive)
+ .entrySet()
+ .stream()
+ .map(e -> Pair.of(e.getKey(), e.getValue().get()))
+ .collect(Collectors.toSet());
+ }
+
+ /**
+ * Returns true if newValue was accepted i.e. map is updated.
+ * @param key key
+ * @param newValue proposed new value
+ * @return true if update happened; false if map already contains a more recent value for the key
+ */
+ private boolean putInternal(K key, MapValue<V> newValue) {
+ checkState(!destroyed, destroyedMessage);
+ checkNotNull(key, ERROR_NULL_KEY);
+ checkNotNull(newValue, ERROR_NULL_VALUE);
+ checkState(newValue.isAlive());
+ counter.incrementCount();
+ AtomicBoolean updated = new AtomicBoolean(false);
+ items.compute(key, (k, existing) -> {
+ if (existing == null || newValue.isNewerThan(existing)) {
+ updated.set(true);
+ return newValue;
+ }
+ return existing;
+ });
+ if (updated.get() && persistent) {
+ persistentStore.update(key, newValue);
+ }
+ return updated.get();
+ }
+
+ @Override
+ public void addListener(EventuallyConsistentMapListener<K, V> listener) {
+ checkState(!destroyed, destroyedMessage);
+
+ listeners.add(checkNotNull(listener));
+ }
+
+ @Override
+ public void removeListener(EventuallyConsistentMapListener<K, V> listener) {
+ checkState(!destroyed, destroyedMessage);
+
+ listeners.remove(checkNotNull(listener));
+ }
+
+ @Override
+ public void destroy() {
+ destroyed = true;
+
+ executor.shutdown();
+ backgroundExecutor.shutdown();
+ communicationExecutor.shutdown();
+
+ listeners.clear();
+
+ clusterCommunicator.removeSubscriber(updateMessageSubject);
+ clusterCommunicator.removeSubscriber(antiEntropyAdvertisementSubject);
+ }
+
+ private void notifyListeners(EventuallyConsistentMapEvent<K, V> event) {
+ listeners.forEach(listener -> listener.event(event));
+ }
+
+ private void notifyPeers(UpdateEntry<K, V> event, Collection<NodeId> peers) {
+ queueUpdate(event, peers);
+ }
+
+ private void queueUpdate(UpdateEntry<K, V> event, Collection<NodeId> peers) {
+ if (peers == null) {
+ // we have no friends :(
+ return;
+ }
+ peers.forEach(node ->
+ senderPending.computeIfAbsent(node, unusedKey -> new EventAccumulator(node)).add(event)
+ );
+ }
+
+ private boolean underHighLoad() {
+ return counter.get(LOAD_WINDOW) > HIGH_LOAD_THRESHOLD;
+ }
+
+ private void sendAdvertisement() {
+ try {
+ if (underHighLoad() || destroyed) {
+ return;
+ }
+ pickRandomActivePeer().ifPresent(this::sendAdvertisementToPeer);
+ } catch (Exception e) {
+ // Catch all exceptions to avoid scheduled task being suppressed.
+ log.error("Exception thrown while sending advertisement", e);
+ }
+ }
+
+ private Optional<NodeId> pickRandomActivePeer() {
+ List<NodeId> activePeers = clusterService.getNodes()
+ .stream()
+ .map(ControllerNode::id)
+ .filter(id -> !localNodeId.equals(id))
+ .filter(id -> clusterService.getState(id) == ControllerNode.State.ACTIVE)
+ .collect(Collectors.toList());
+ Collections.shuffle(activePeers);
+ return activePeers.isEmpty() ? Optional.empty() : Optional.of(activePeers.get(0));
+ }
+
+ private void sendAdvertisementToPeer(NodeId peer) {
+ clusterCommunicator.unicast(createAdvertisement(),
+ antiEntropyAdvertisementSubject,
+ serializer::encode,
+ peer)
+ .whenComplete((result, error) -> {
+ if (error != null) {
+ log.debug("Failed to send anti-entropy advertisement to {}", peer, error);
+ }
+ });
+ }
+
+ private AntiEntropyAdvertisement<K> createAdvertisement() {
+ return new AntiEntropyAdvertisement<K>(localNodeId,
+ ImmutableMap.copyOf(Maps.transformValues(items, MapValue::digest)));
+ }
+
+ private void handleAntiEntropyAdvertisement(AntiEntropyAdvertisement<K> ad) {
+ if (destroyed || underHighLoad()) {
+ return;
+ }
+ try {
+ log.debug("Received anti-entropy advertisement from {} for {} with {} entries in it",
+ mapName, ad.sender(), ad.digest().size());
+ antiEntropyCheckLocalItems(ad).forEach(this::notifyListeners);
+
+ if (!lightweightAntiEntropy) {
+ // if remote ad has any entries that the local copy is missing, actively sync
+ // TODO: Missing keys is not the way local copy can be behind.
+ if (Sets.difference(ad.digest().keySet(), items.keySet()).size() > 0) {
+ // TODO: Send ad for missing keys and for entries that are stale
+ sendAdvertisementToPeer(ad.sender());
+ }
+ }
+ } catch (Exception e) {
+ log.warn("Error handling anti-entropy advertisement", e);
+ }
+ }
+
+ /**
+ * Processes anti-entropy ad from peer by taking following actions:
+ * 1. If peer has an old entry, updates peer.
+ * 2. If peer indicates an entry is removed and has a more recent
+ * timestamp than the local entry, update local state.
+ */
+ private List<EventuallyConsistentMapEvent<K, V>> antiEntropyCheckLocalItems(
+ AntiEntropyAdvertisement<K> ad) {
+ final List<EventuallyConsistentMapEvent<K, V>> externalEvents = Lists.newLinkedList();
+ final NodeId sender = ad.sender();
+ items.forEach((key, localValue) -> {
+ MapValue.Digest remoteValueDigest = ad.digest().get(key);
+ if (remoteValueDigest == null || localValue.isNewerThan(remoteValueDigest.timestamp())) {
+ // local value is more recent, push to sender
+ queueUpdate(new UpdateEntry<>(key, localValue), ImmutableList.of(sender));
+ }
+ if (remoteValueDigest != null
+ && remoteValueDigest.isNewerThan(localValue.digest())
+ && remoteValueDigest.isTombstone()) {
+ MapValue<V> tombstone = MapValue.tombstone(remoteValueDigest.timestamp());
+ MapValue<V> previousValue = removeInternal(key,
+ Optional.empty(),
+ Optional.of(tombstone));
+ if (previousValue != null && previousValue.isAlive()) {
+ externalEvents.add(new EventuallyConsistentMapEvent<>(mapName, REMOVE, key, previousValue.get()));
+ }
+ }
+ });
+ return externalEvents;
+ }
+
+ private void processUpdates(Collection<UpdateEntry<K, V>> updates) {
+ if (destroyed) {
+ return;
+ }
+ updates.forEach(update -> {
+ final K key = update.key();
+ final MapValue<V> value = update.value();
+ if (value == null || value.isTombstone()) {
+ MapValue<V> previousValue = removeInternal(key, Optional.empty(), Optional.ofNullable(value));
+ if (previousValue != null && previousValue.isAlive()) {
+ notifyListeners(new EventuallyConsistentMapEvent<>(mapName, REMOVE, key, previousValue.get()));
+ }
+ } else if (putInternal(key, value)) {
+ notifyListeners(new EventuallyConsistentMapEvent<>(mapName, PUT, key, value.get()));
+ }
+ });
+ }
+
+ // TODO pull this into the class if this gets pulled out...
+ private static final int DEFAULT_MAX_EVENTS = 1000;
+ private static final int DEFAULT_MAX_IDLE_MS = 10;
+ private static final int DEFAULT_MAX_BATCH_MS = 50;
+ private static final Timer TIMER = new Timer("onos-ecm-sender-events");
+
+ private final class EventAccumulator extends AbstractAccumulator<UpdateEntry<K, V>> {
+
+ private final NodeId peer;
+
+ private EventAccumulator(NodeId peer) {
+ super(TIMER, DEFAULT_MAX_EVENTS, DEFAULT_MAX_BATCH_MS, DEFAULT_MAX_IDLE_MS);
+ this.peer = peer;
+ }
+
+ @Override
+ public void processItems(List<UpdateEntry<K, V>> items) {
+ Map<K, UpdateEntry<K, V>> map = Maps.newHashMap();
+ items.forEach(item -> map.compute(item.key(), (key, existing) ->
+ item.isNewerThan(existing) ? item : existing));
+ communicationExecutor.submit(() -> {
+ clusterCommunicator.unicast(ImmutableList.copyOf(map.values()),
+ updateMessageSubject,
+ serializer::encode,
+ peer)
+ .whenComplete((result, error) -> {
+ if (error != null) {
+ log.debug("Failed to send to {}", peer, error);
+ }
+ });
+ });
+ }
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/MapDbPersistentStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/MapDbPersistentStore.java
new file mode 100644
index 00000000..e62a2d5c
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/MapDbPersistentStore.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.ecmap;
+
+import org.mapdb.DB;
+import org.mapdb.DBMaker;
+import org.mapdb.Hasher;
+import org.mapdb.Serializer;
+import org.onosproject.store.serializers.KryoSerializer;
+
+import java.io.File;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * MapDB based implementation of a persistent store.
+ */
+class MapDbPersistentStore<K, V> implements PersistentStore<K, V> {
+
+ private final ExecutorService executor;
+ private final KryoSerializer serializer;
+
+ private final DB database;
+
+ private final Map<byte[], byte[]> items;
+
+ /**
+ * Creates a new MapDB based persistent store.
+ *
+ * @param filename filename of the database on disk
+ * @param executor executor to use for tasks that write to the disk
+ * @param serializer serializer for keys and values
+ */
+ MapDbPersistentStore(String filename, ExecutorService executor,
+ KryoSerializer serializer) {
+ this.executor = checkNotNull(executor);
+ this.serializer = checkNotNull(serializer);
+
+ File databaseFile = new File(filename);
+
+ database = DBMaker.newFileDB(databaseFile).make();
+
+ items = database.createHashMap("items")
+ .keySerializer(Serializer.BYTE_ARRAY)
+ .valueSerializer(Serializer.BYTE_ARRAY)
+ .hasher(Hasher.BYTE_ARRAY)
+ .makeOrGet();
+ }
+
+ @Override
+ public void readInto(Map<K, MapValue<V>> items) {
+ this.items.forEach((keyBytes, valueBytes) ->
+ items.put(serializer.decode(keyBytes),
+ serializer.decode(valueBytes)));
+ }
+
+ @Override
+ public void update(K key, MapValue<V> value) {
+ executor.submit(() -> updateInternal(key, value));
+ }
+
+ @Override
+ public void remove(K key) {
+ executor.submit(() -> removeInternal(key));
+ }
+
+ private void updateInternal(K key, MapValue<V> newValue) {
+ byte[] keyBytes = serializer.encode(key);
+
+ items.compute(keyBytes, (k, existingBytes) -> {
+ MapValue<V> existing = existingBytes == null ? null :
+ serializer.decode(existingBytes);
+ if (existing == null || newValue.isNewerThan(existing)) {
+ return serializer.encode(newValue);
+ } else {
+ return existingBytes;
+ }
+ });
+ database.commit();
+ }
+
+ private void removeInternal(K key) {
+ byte[] keyBytes = serializer.encode(key);
+ items.remove(keyBytes);
+ database.commit();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/MapValue.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/MapValue.java
new file mode 100644
index 00000000..bb69b472
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/MapValue.java
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.ecmap;
+
+import org.onosproject.store.Timestamp;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Objects;
+
+/**
+ * Representation of a value in EventuallyConsistentMap.
+ *
+ * @param <V> value type
+ */
+public class MapValue<V> implements Comparable<MapValue<V>> {
+ private final Timestamp timestamp;
+ private final V value;
+
+ /**
+ * Creates a tombstone value with the specified timestamp.
+ * @param timestamp timestamp for tombstone
+ * @return tombstone MapValue
+ *
+ * @param <U> value type
+ */
+ public static <U> MapValue<U> tombstone(Timestamp timestamp) {
+ return new MapValue<>(null, timestamp);
+ }
+
+ public MapValue(V value, Timestamp timestamp) {
+ this.value = value;
+ this.timestamp = timestamp;
+ }
+
+ public boolean isTombstone() {
+ return value == null;
+ }
+
+ public boolean isAlive() {
+ return value != null;
+ }
+
+ public Timestamp timestamp() {
+ return timestamp;
+ }
+
+ public V get() {
+ return value;
+ }
+
+ @Override
+ public int compareTo(MapValue<V> o) {
+ return this.timestamp.compareTo(o.timestamp);
+ }
+
+ public boolean isNewerThan(MapValue<V> other) {
+ return timestamp.isNewerThan(other.timestamp);
+ }
+
+ public boolean isNewerThan(Timestamp timestamp) {
+ return this.timestamp.isNewerThan(timestamp);
+ }
+
+ public Digest digest() {
+ return new Digest(timestamp, isTombstone());
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(timestamp, value);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof MapValue) {
+ MapValue<V> that = (MapValue<V>) other;
+ return Objects.equal(this.timestamp, that.timestamp) &&
+ Objects.equal(this.value, that.value);
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("timestamp", timestamp)
+ .add("value", value)
+ .toString();
+ }
+
+ @SuppressWarnings("unused")
+ private MapValue() {
+ this.timestamp = null;
+ this.value = null;
+ }
+
+ /**
+ * Digest or summary of a MapValue for use during Anti-Entropy exchanges.
+ */
+ public static class Digest {
+ private final Timestamp timestamp;
+ private final boolean isTombstone;
+
+ public Digest(Timestamp timestamp, boolean isTombstone) {
+ this.timestamp = timestamp;
+ this.isTombstone = isTombstone;
+ }
+
+ public Timestamp timestamp() {
+ return timestamp;
+ }
+
+ public boolean isTombstone() {
+ return isTombstone;
+ }
+
+ public boolean isNewerThan(Digest other) {
+ return timestamp.isNewerThan(other.timestamp);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(timestamp, isTombstone);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof Digest) {
+ Digest that = (Digest) other;
+ return Objects.equal(this.timestamp, that.timestamp) &&
+ Objects.equal(this.isTombstone, that.isTombstone);
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("timestamp", timestamp)
+ .add("isTombstone", isTombstone)
+ .toString();
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/PersistentStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/PersistentStore.java
new file mode 100644
index 00000000..e85987a7
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/PersistentStore.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.onosproject.store.ecmap;
+
+import java.util.Map;
+
+/**
+ * A persistent store for an eventually consistent map.
+ */
+interface PersistentStore<K, V> {
+
+ /**
+ * Read the contents of the disk into the given maps.
+ *
+ * @param items items map
+ */
+ void readInto(Map<K, MapValue<V>> items);
+
+ /**
+ * Updates a key,value pair in the persistent store.
+ *
+ * @param key the key
+ * @param value the value
+ */
+ void update(K key, MapValue<V> value);
+
+ /**
+ * Removes a key from persistent store.
+ *
+ * @param key the key to remove
+ */
+ void remove(K key);
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/UpdateEntry.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/UpdateEntry.java
new file mode 100644
index 00000000..53683b98
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/UpdateEntry.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.ecmap;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Describes a single update event in an EventuallyConsistentMap.
+ */
+final class UpdateEntry<K, V> {
+ private final K key;
+ private final MapValue<V> value;
+
+ /**
+ * Creates a new update entry.
+ *
+ * @param key key of the entry
+ * @param value value of the entry
+ */
+ public UpdateEntry(K key, MapValue<V> value) {
+ this.key = checkNotNull(key);
+ this.value = value;
+ }
+
+ /**
+ * Returns the key.
+ *
+ * @return the key
+ */
+ public K key() {
+ return key;
+ }
+
+ /**
+ * Returns the value of the entry.
+ *
+ * @return the value
+ */
+ public MapValue<V> value() {
+ return value;
+ }
+
+ /**
+ * Returns if this entry is newer than other entry.
+ * @param other other entry
+ * @return true if this entry is newer; false otherwise
+ */
+ public boolean isNewerThan(UpdateEntry<K, V> other) {
+ return other == null || other.value == null || (value != null && value.isNewerThan(other.value));
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("key", key())
+ .add("value", value)
+ .toString();
+ }
+
+ @SuppressWarnings("unused")
+ private UpdateEntry() {
+ this.key = null;
+ this.value = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/package-info.java
new file mode 100644
index 00000000..81fd2868
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/ecmap/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Distributed map with eventually-consistent update semantics and gossip
+ * based anti-entropy mechanism.
+ */
+package org.onosproject.store.ecmap; \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfo.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfo.java
new file mode 100644
index 00000000..6011c16c
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfo.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.flow;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.onosproject.cluster.NodeId;
+
+import com.google.common.base.Objects;
+import com.google.common.base.Optional;
+
+/**
+ * Class to represent placement information about Master/Backup copy.
+ */
+public final class ReplicaInfo {
+
+ private final Optional<NodeId> master;
+ private final List<NodeId> backups;
+
+ /**
+ * Creates a ReplicaInfo instance.
+ *
+ * @param master NodeId of the node where the master copy should be
+ * @param backups list of NodeId, where backup copies should be placed
+ */
+ public ReplicaInfo(NodeId master, List<NodeId> backups) {
+ this.master = Optional.fromNullable(master);
+ this.backups = checkNotNull(backups);
+ }
+
+ /**
+ * Returns the NodeId, if there is a Node where the master copy should be.
+ *
+ * @return NodeId, where the master copy should be placed
+ */
+ public Optional<NodeId> master() {
+ return master;
+ }
+
+ /**
+ * Returns the collection of NodeId, where backup copies should be placed.
+ *
+ * @return collection of NodeId, where backup copies should be placed
+ */
+ public List<NodeId> backups() {
+ return backups;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(master, backups);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof ReplicaInfo)) {
+ return false;
+ }
+ ReplicaInfo that = (ReplicaInfo) other;
+ return Objects.equal(this.master, that.master) &&
+ Objects.equal(this.backups, that.backups);
+ }
+
+ // for Serializer
+ private ReplicaInfo() {
+ this.master = Optional.absent();
+ this.backups = Collections.emptyList();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoEvent.java
new file mode 100644
index 00000000..5eafc7ed
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoEvent.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.flow;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import org.onosproject.event.AbstractEvent;
+import org.onosproject.net.DeviceId;
+
+/**
+ * Describes a device replicainfo event.
+ */
+public class ReplicaInfoEvent extends AbstractEvent<ReplicaInfoEvent.Type, DeviceId> {
+
+ private final ReplicaInfo replicaInfo;
+
+ /**
+ * Types of Replica info event.
+ */
+ public enum Type {
+ /**
+ * Event to notify that master placement should be changed.
+ */
+ MASTER_CHANGED,
+ //
+ BACKUPS_CHANGED,
+ }
+
+
+ /**
+ * Creates an event of a given type and for the specified device,
+ * and replica info.
+ *
+ * @param type replicainfo event type
+ * @param device event device subject
+ * @param replicaInfo replicainfo
+ */
+ public ReplicaInfoEvent(Type type, DeviceId device, ReplicaInfo replicaInfo) {
+ super(type, device);
+ this.replicaInfo = checkNotNull(replicaInfo);
+ }
+
+ /**
+ * Returns the current replica information for the subject.
+ *
+ * @return replica information for the subject
+ */
+ public ReplicaInfo replicaInfo() {
+ return replicaInfo;
+ };
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoEventListener.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoEventListener.java
new file mode 100644
index 00000000..b6761d1d
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoEventListener.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.flow;
+
+import org.onosproject.event.EventListener;
+
+/**
+ * Entity capable of receiving Replica placement information-related events.
+ */
+public interface ReplicaInfoEventListener extends EventListener<ReplicaInfoEvent> {
+
+}
+
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoService.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoService.java
new file mode 100644
index 00000000..bf60f931
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/ReplicaInfoService.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.flow;
+
+import org.onosproject.net.DeviceId;
+
+/**
+ * Service to return where the replica should be placed.
+ */
+public interface ReplicaInfoService {
+
+ // returns where it should be.
+ /**
+ * Returns the placement information for given Device.
+ *
+ * @param deviceId identifier of the device
+ * @return placement information
+ */
+ ReplicaInfo getReplicaInfoFor(DeviceId deviceId);
+
+ /**
+ * Adds the specified replica placement info change listener.
+ *
+ * @param listener the replica placement info change listener
+ */
+ void addListener(ReplicaInfoEventListener listener);
+
+ /**
+ * Removes the specified replica placement info change listener.
+ *
+ * @param listener the replica placement info change listener
+ */
+ void removeListener(ReplicaInfoEventListener listener);
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/FlowStoreMessageSubjects.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/FlowStoreMessageSubjects.java
new file mode 100644
index 00000000..041053cf
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/FlowStoreMessageSubjects.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.flow.impl;
+
+import org.onosproject.store.cluster.messaging.MessageSubject;
+
+/**
+ * MessageSubjects used by DistributedFlowRuleStore peer-peer communication.
+ */
+public final class FlowStoreMessageSubjects {
+ private FlowStoreMessageSubjects() {}
+
+ public static final MessageSubject APPLY_BATCH_FLOWS
+ = new MessageSubject("peer-forward-apply-batch");
+
+ public static final MessageSubject GET_FLOW_ENTRY
+ = new MessageSubject("peer-forward-get-flow-entry");
+
+ public static final MessageSubject GET_DEVICE_FLOW_ENTRIES
+ = new MessageSubject("peer-forward-get-device-flow-entries");
+
+ public static final MessageSubject REMOVE_FLOW_ENTRY
+ = new MessageSubject("peer-forward-remove-flow-entry");
+
+ public static final MessageSubject REMOTE_APPLY_COMPLETED
+ = new MessageSubject("peer-apply-completed");
+
+ public static final MessageSubject FLOW_TABLE_BACKUP
+ = new MessageSubject("peer-flow-table-backup");
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/NewDistributedFlowRuleStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/NewDistributedFlowRuleStore.java
new file mode 100644
index 00000000..de7a3ac3
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/NewDistributedFlowRuleStore.java
@@ -0,0 +1,789 @@
+ /*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.flow.impl;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Futures;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Modified;
+import org.apache.felix.scr.annotations.Property;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onlab.util.Tools;
+import org.onosproject.cfg.ComponentConfigService;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.core.CoreService;
+import org.onosproject.core.IdGenerator;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.device.DeviceService;
+import org.onosproject.net.flow.CompletedBatchOperation;
+import org.onosproject.net.flow.DefaultFlowEntry;
+import org.onosproject.net.flow.FlowEntry;
+import org.onosproject.net.flow.FlowEntry.FlowEntryState;
+import org.onosproject.net.flow.FlowId;
+import org.onosproject.net.flow.FlowRule;
+import org.onosproject.net.flow.FlowRuleBatchEntry;
+import org.onosproject.net.flow.FlowRuleBatchEntry.FlowRuleOperation;
+import org.onosproject.net.flow.FlowRuleBatchEvent;
+import org.onosproject.net.flow.FlowRuleBatchOperation;
+import org.onosproject.net.flow.FlowRuleBatchRequest;
+import org.onosproject.net.flow.FlowRuleEvent;
+import org.onosproject.net.flow.FlowRuleEvent.Type;
+import org.onosproject.net.flow.FlowRuleService;
+import org.onosproject.net.flow.FlowRuleStore;
+import org.onosproject.net.flow.FlowRuleStoreDelegate;
+import org.onosproject.net.flow.StoredFlowEntry;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.ClusterMessage;
+import org.onosproject.store.cluster.messaging.ClusterMessageHandler;
+import org.onosproject.store.flow.ReplicaInfoEvent;
+import org.onosproject.store.flow.ReplicaInfoEventListener;
+import org.onosproject.store.flow.ReplicaInfoService;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.onosproject.store.serializers.StoreSerializer;
+import org.onosproject.store.serializers.custom.DistributedStoreSerializers;
+import org.osgi.service.component.ComponentContext;
+import org.slf4j.Logger;
+
+import java.util.Collections;
+import java.util.Dictionary;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+
+import static com.google.common.base.Strings.isNullOrEmpty;
+import static org.onlab.util.Tools.get;
+import static org.onlab.util.Tools.groupedThreads;
+import static org.onosproject.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
+import static org.onosproject.store.flow.impl.FlowStoreMessageSubjects.*;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Manages inventory of flow rules using a distributed state management protocol.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class NewDistributedFlowRuleStore
+ extends AbstractStore<FlowRuleBatchEvent, FlowRuleStoreDelegate>
+ implements FlowRuleStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private static final int MESSAGE_HANDLER_THREAD_POOL_SIZE = 8;
+ private static final boolean DEFAULT_BACKUP_ENABLED = true;
+ private static final int DEFAULT_BACKUP_PERIOD_MILLIS = 2000;
+ private static final long FLOW_RULE_STORE_TIMEOUT_MILLIS = 5000;
+ // number of devices whose flow entries will be backed up in one communication round
+ private static final int FLOW_TABLE_BACKUP_BATCH_SIZE = 1;
+
+ @Property(name = "msgHandlerPoolSize", intValue = MESSAGE_HANDLER_THREAD_POOL_SIZE,
+ label = "Number of threads in the message handler pool")
+ private int msgHandlerPoolSize = MESSAGE_HANDLER_THREAD_POOL_SIZE;
+
+ @Property(name = "backupEnabled", boolValue = DEFAULT_BACKUP_ENABLED,
+ label = "Indicates whether backups are enabled or not")
+ private boolean backupEnabled = DEFAULT_BACKUP_ENABLED;
+
+ @Property(name = "backupPeriod", intValue = DEFAULT_BACKUP_PERIOD_MILLIS,
+ label = "Delay in ms between successive backup runs")
+ private int backupPeriod = DEFAULT_BACKUP_PERIOD_MILLIS;
+
+ private InternalFlowTable flowTable = new InternalFlowTable();
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ReplicaInfoService replicaInfoManager;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected DeviceService deviceService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected CoreService coreService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ComponentConfigService configService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ private Map<Long, NodeId> pendingResponses = Maps.newConcurrentMap();
+ private ExecutorService messageHandlingExecutor;
+
+ private ScheduledFuture<?> backupTask;
+ private final ScheduledExecutorService backupSenderExecutor =
+ Executors.newSingleThreadScheduledExecutor(groupedThreads("onos/flow", "backup-sender"));
+
+ protected static final StoreSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(DistributedStoreSerializers.STORE_COMMON)
+ .nextId(DistributedStoreSerializers.STORE_CUSTOM_BEGIN)
+ .build();
+ }
+ };
+
+ private IdGenerator idGenerator;
+ private NodeId local;
+
+ @Activate
+ public void activate(ComponentContext context) {
+ configService.registerProperties(getClass());
+
+ idGenerator = coreService.getIdGenerator(FlowRuleService.FLOW_OP_TOPIC);
+
+ local = clusterService.getLocalNode().id();
+
+ messageHandlingExecutor = Executors.newFixedThreadPool(
+ msgHandlerPoolSize, groupedThreads("onos/store/flow", "message-handlers"));
+
+ registerMessageHandlers(messageHandlingExecutor);
+
+ if (backupEnabled) {
+ replicaInfoManager.addListener(flowTable);
+ backupTask = backupSenderExecutor.scheduleWithFixedDelay(
+ flowTable::backup,
+ 0,
+ backupPeriod,
+ TimeUnit.MILLISECONDS);
+ }
+
+ logConfig("Started");
+ }
+
+ @Deactivate
+ public void deactivate(ComponentContext context) {
+ if (backupEnabled) {
+ replicaInfoManager.removeListener(flowTable);
+ backupTask.cancel(true);
+ }
+ configService.unregisterProperties(getClass(), false);
+ unregisterMessageHandlers();
+ messageHandlingExecutor.shutdownNow();
+ backupSenderExecutor.shutdownNow();
+ log.info("Stopped");
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Modified
+ public void modified(ComponentContext context) {
+ if (context == null) {
+ backupEnabled = DEFAULT_BACKUP_ENABLED;
+ logConfig("Default config");
+ return;
+ }
+
+ Dictionary properties = context.getProperties();
+ int newPoolSize;
+ boolean newBackupEnabled;
+ int newBackupPeriod;
+ try {
+ String s = get(properties, "msgHandlerPoolSize");
+ newPoolSize = isNullOrEmpty(s) ? msgHandlerPoolSize : Integer.parseInt(s.trim());
+
+ s = get(properties, "backupEnabled");
+ newBackupEnabled = isNullOrEmpty(s) ? backupEnabled : Boolean.parseBoolean(s.trim());
+
+ s = get(properties, "backupPeriod");
+ newBackupPeriod = isNullOrEmpty(s) ? backupPeriod : Integer.parseInt(s.trim());
+
+ } catch (NumberFormatException | ClassCastException e) {
+ newPoolSize = MESSAGE_HANDLER_THREAD_POOL_SIZE;
+ newBackupEnabled = DEFAULT_BACKUP_ENABLED;
+ newBackupPeriod = DEFAULT_BACKUP_PERIOD_MILLIS;
+ }
+
+ boolean restartBackupTask = false;
+ if (newBackupEnabled != backupEnabled) {
+ backupEnabled = newBackupEnabled;
+ if (!backupEnabled) {
+ replicaInfoManager.removeListener(flowTable);
+ if (backupTask != null) {
+ backupTask.cancel(false);
+ backupTask = null;
+ }
+ } else {
+ replicaInfoManager.addListener(flowTable);
+ }
+ restartBackupTask = backupEnabled;
+ }
+ if (newBackupPeriod != backupPeriod) {
+ backupPeriod = newBackupPeriod;
+ restartBackupTask = backupEnabled;
+ }
+ if (restartBackupTask) {
+ if (backupTask != null) {
+ // cancel previously running task
+ backupTask.cancel(false);
+ }
+ backupTask = backupSenderExecutor.scheduleWithFixedDelay(
+ flowTable::backup,
+ 0,
+ backupPeriod,
+ TimeUnit.MILLISECONDS);
+ }
+ if (newPoolSize != msgHandlerPoolSize) {
+ msgHandlerPoolSize = newPoolSize;
+ ExecutorService oldMsgHandler = messageHandlingExecutor;
+ messageHandlingExecutor = Executors.newFixedThreadPool(
+ msgHandlerPoolSize, groupedThreads("onos/store/flow", "message-handlers"));
+
+ // replace previously registered handlers.
+ registerMessageHandlers(messageHandlingExecutor);
+ oldMsgHandler.shutdown();
+ }
+ logConfig("Reconfigured");
+ }
+
+ private void registerMessageHandlers(ExecutorService executor) {
+
+ clusterCommunicator.addSubscriber(APPLY_BATCH_FLOWS, new OnStoreBatch(), executor);
+ clusterCommunicator.<FlowRuleBatchEvent>addSubscriber(
+ REMOTE_APPLY_COMPLETED, SERIALIZER::decode, this::notifyDelegate, executor);
+ clusterCommunicator.addSubscriber(
+ GET_FLOW_ENTRY, SERIALIZER::decode, flowTable::getFlowEntry, SERIALIZER::encode, executor);
+ clusterCommunicator.addSubscriber(
+ GET_DEVICE_FLOW_ENTRIES, SERIALIZER::decode, flowTable::getFlowEntries, SERIALIZER::encode, executor);
+ clusterCommunicator.addSubscriber(
+ REMOVE_FLOW_ENTRY, SERIALIZER::decode, this::removeFlowRuleInternal, SERIALIZER::encode, executor);
+ clusterCommunicator.addSubscriber(
+ REMOVE_FLOW_ENTRY, SERIALIZER::decode, this::removeFlowRuleInternal, SERIALIZER::encode, executor);
+ clusterCommunicator.addSubscriber(
+ FLOW_TABLE_BACKUP, SERIALIZER::decode, flowTable::onBackupReceipt, SERIALIZER::encode, executor);
+ }
+
+ private void unregisterMessageHandlers() {
+ clusterCommunicator.removeSubscriber(REMOVE_FLOW_ENTRY);
+ clusterCommunicator.removeSubscriber(GET_DEVICE_FLOW_ENTRIES);
+ clusterCommunicator.removeSubscriber(GET_FLOW_ENTRY);
+ clusterCommunicator.removeSubscriber(APPLY_BATCH_FLOWS);
+ clusterCommunicator.removeSubscriber(REMOTE_APPLY_COMPLETED);
+ clusterCommunicator.removeSubscriber(FLOW_TABLE_BACKUP);
+ }
+
+ private void logConfig(String prefix) {
+ log.info("{} with msgHandlerPoolSize = {}; backupEnabled = {}, backupPeriod = {}",
+ prefix, msgHandlerPoolSize, backupEnabled, backupPeriod);
+ }
+
+ // This is not a efficient operation on a distributed sharded
+ // flow store. We need to revisit the need for this operation or at least
+ // make it device specific.
+ @Override
+ public int getFlowRuleCount() {
+ AtomicInteger sum = new AtomicInteger(0);
+ deviceService.getDevices().forEach(device -> sum.addAndGet(Iterables.size(getFlowEntries(device.id()))));
+ return sum.get();
+ }
+
+ @Override
+ public FlowEntry getFlowEntry(FlowRule rule) {
+ NodeId master = mastershipService.getMasterFor(rule.deviceId());
+
+ if (master == null) {
+ log.debug("Failed to getFlowEntry: No master for {}", rule.deviceId());
+ return null;
+ }
+
+ if (Objects.equal(local, master)) {
+ return flowTable.getFlowEntry(rule);
+ }
+
+ log.trace("Forwarding getFlowEntry to {}, which is the primary (master) for device {}",
+ master, rule.deviceId());
+
+ return Tools.futureGetOrElse(clusterCommunicator.sendAndReceive(rule,
+ FlowStoreMessageSubjects.GET_FLOW_ENTRY,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ master),
+ FLOW_RULE_STORE_TIMEOUT_MILLIS,
+ TimeUnit.MILLISECONDS,
+ null);
+ }
+
+ @Override
+ public Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) {
+ NodeId master = mastershipService.getMasterFor(deviceId);
+
+ if (master == null) {
+ log.debug("Failed to getFlowEntries: No master for {}", deviceId);
+ return Collections.emptyList();
+ }
+
+ if (Objects.equal(local, master)) {
+ return flowTable.getFlowEntries(deviceId);
+ }
+
+ log.trace("Forwarding getFlowEntries to {}, which is the primary (master) for device {}",
+ master, deviceId);
+
+ return Tools.futureGetOrElse(clusterCommunicator.sendAndReceive(deviceId,
+ FlowStoreMessageSubjects.GET_DEVICE_FLOW_ENTRIES,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ master),
+ FLOW_RULE_STORE_TIMEOUT_MILLIS,
+ TimeUnit.MILLISECONDS,
+ Collections.emptyList());
+ }
+
+ @Override
+ public void storeFlowRule(FlowRule rule) {
+ storeBatch(new FlowRuleBatchOperation(
+ Collections.singletonList(new FlowRuleBatchEntry(FlowRuleOperation.ADD, rule)),
+ rule.deviceId(), idGenerator.getNewId()));
+ }
+
+ @Override
+ public void storeBatch(FlowRuleBatchOperation operation) {
+ if (operation.getOperations().isEmpty()) {
+ notifyDelegate(FlowRuleBatchEvent.completed(
+ new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
+ new CompletedBatchOperation(true, Collections.emptySet(), operation.deviceId())));
+ return;
+ }
+
+ DeviceId deviceId = operation.deviceId();
+ NodeId master = mastershipService.getMasterFor(deviceId);
+
+ if (master == null) {
+ log.warn("No master for {} : flows will be marked for removal", deviceId);
+
+ updateStoreInternal(operation);
+
+ notifyDelegate(FlowRuleBatchEvent.completed(
+ new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
+ new CompletedBatchOperation(true, Collections.emptySet(), operation.deviceId())));
+ return;
+ }
+
+ if (Objects.equal(local, master)) {
+ storeBatchInternal(operation);
+ return;
+ }
+
+ log.trace("Forwarding storeBatch to {}, which is the primary (master) for device {}",
+ master, deviceId);
+
+ clusterCommunicator.unicast(operation,
+ APPLY_BATCH_FLOWS,
+ SERIALIZER::encode,
+ master)
+ .whenComplete((result, error) -> {
+ if (error != null) {
+ log.warn("Failed to storeBatch: {} to {}", operation, master, error);
+
+ Set<FlowRule> allFailures = operation.getOperations()
+ .stream()
+ .map(op -> op.target())
+ .collect(Collectors.toSet());
+
+ notifyDelegate(FlowRuleBatchEvent.completed(
+ new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
+ new CompletedBatchOperation(false, allFailures, deviceId)));
+ }
+ });
+ }
+
+ private void storeBatchInternal(FlowRuleBatchOperation operation) {
+
+ final DeviceId did = operation.deviceId();
+ //final Collection<FlowEntry> ft = flowTable.getFlowEntries(did);
+ Set<FlowRuleBatchEntry> currentOps = updateStoreInternal(operation);
+ if (currentOps.isEmpty()) {
+ batchOperationComplete(FlowRuleBatchEvent.completed(
+ new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
+ new CompletedBatchOperation(true, Collections.emptySet(), did)));
+ return;
+ }
+
+ notifyDelegate(FlowRuleBatchEvent.requested(new
+ FlowRuleBatchRequest(operation.id(),
+ currentOps), operation.deviceId()));
+ }
+
+ private Set<FlowRuleBatchEntry> updateStoreInternal(FlowRuleBatchOperation operation) {
+ return operation.getOperations().stream().map(
+ op -> {
+ StoredFlowEntry entry;
+ switch (op.operator()) {
+ case ADD:
+ entry = new DefaultFlowEntry(op.target());
+ // always add requested FlowRule
+ // Note: 2 equal FlowEntry may have different treatment
+ flowTable.remove(entry.deviceId(), entry);
+ flowTable.add(entry);
+
+ return op;
+ case REMOVE:
+ entry = flowTable.getFlowEntry(op.target());
+ if (entry != null) {
+ entry.setState(FlowEntryState.PENDING_REMOVE);
+ return op;
+ }
+ break;
+ case MODIFY:
+ //TODO: figure this out at some point
+ break;
+ default:
+ log.warn("Unknown flow operation operator: {}", op.operator());
+ }
+ return null;
+ }
+ ).filter(op -> op != null).collect(Collectors.toSet());
+ }
+
+ @Override
+ public void deleteFlowRule(FlowRule rule) {
+ storeBatch(
+ new FlowRuleBatchOperation(
+ Collections.singletonList(
+ new FlowRuleBatchEntry(
+ FlowRuleOperation.REMOVE,
+ rule)), rule.deviceId(), idGenerator.getNewId()));
+ }
+
+ @Override
+ public FlowRuleEvent addOrUpdateFlowRule(FlowEntry rule) {
+ NodeId master = mastershipService.getMasterFor(rule.deviceId());
+ if (Objects.equal(local, master)) {
+ return addOrUpdateFlowRuleInternal(rule);
+ }
+
+ log.warn("Tried to update FlowRule {} state,"
+ + " while the Node was not the master.", rule);
+ return null;
+ }
+
+ private FlowRuleEvent addOrUpdateFlowRuleInternal(FlowEntry rule) {
+ // check if this new rule is an update to an existing entry
+ StoredFlowEntry stored = flowTable.getFlowEntry(rule);
+ if (stored != null) {
+ stored.setBytes(rule.bytes());
+ stored.setLife(rule.life());
+ stored.setPackets(rule.packets());
+ if (stored.state() == FlowEntryState.PENDING_ADD) {
+ stored.setState(FlowEntryState.ADDED);
+ return new FlowRuleEvent(Type.RULE_ADDED, rule);
+ }
+ return new FlowRuleEvent(Type.RULE_UPDATED, rule);
+ }
+
+ // TODO: Confirm if this behavior is correct. See SimpleFlowRuleStore
+ // TODO: also update backup if the behavior is correct.
+ flowTable.add(rule);
+ return null;
+ }
+
+ @Override
+ public FlowRuleEvent removeFlowRule(FlowEntry rule) {
+ final DeviceId deviceId = rule.deviceId();
+ NodeId master = mastershipService.getMasterFor(deviceId);
+
+ if (Objects.equal(local, master)) {
+ // bypass and handle it locally
+ return removeFlowRuleInternal(rule);
+ }
+
+ if (master == null) {
+ log.warn("Failed to removeFlowRule: No master for {}", deviceId);
+ // TODO: revisit if this should be null (="no-op") or Exception
+ return null;
+ }
+
+ log.trace("Forwarding removeFlowRule to {}, which is the master for device {}",
+ master, deviceId);
+
+ return Futures.get(clusterCommunicator.sendAndReceive(
+ rule,
+ REMOVE_FLOW_ENTRY,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ master),
+ FLOW_RULE_STORE_TIMEOUT_MILLIS,
+ TimeUnit.MILLISECONDS,
+ RuntimeException.class);
+ }
+
+ private FlowRuleEvent removeFlowRuleInternal(FlowEntry rule) {
+ final DeviceId deviceId = rule.deviceId();
+ // This is where one could mark a rule as removed and still keep it in the store.
+ final boolean removed = flowTable.remove(deviceId, rule); //flowEntries.remove(deviceId, rule);
+ return removed ? new FlowRuleEvent(RULE_REMOVED, rule) : null;
+ }
+
+ @Override
+ public void batchOperationComplete(FlowRuleBatchEvent event) {
+ //FIXME: need a per device pending response
+ NodeId nodeId = pendingResponses.remove(event.subject().batchId());
+ if (nodeId == null) {
+ notifyDelegate(event);
+ } else {
+ // TODO check unicast return value
+ clusterCommunicator.unicast(event, REMOTE_APPLY_COMPLETED, SERIALIZER::encode, nodeId);
+ //error log: log.warn("Failed to respond to peer for batch operation result");
+ }
+ }
+
+ private final class OnStoreBatch implements ClusterMessageHandler {
+
+ @Override
+ public void handle(final ClusterMessage message) {
+ FlowRuleBatchOperation operation = SERIALIZER.decode(message.payload());
+ log.debug("received batch request {}", operation);
+
+ final DeviceId deviceId = operation.deviceId();
+ NodeId master = mastershipService.getMasterFor(deviceId);
+ if (!Objects.equal(local, master)) {
+ Set<FlowRule> failures = new HashSet<>(operation.size());
+ for (FlowRuleBatchEntry op : operation.getOperations()) {
+ failures.add(op.target());
+ }
+ CompletedBatchOperation allFailed = new CompletedBatchOperation(false, failures, deviceId);
+ // This node is no longer the master, respond as all failed.
+ // TODO: we might want to wrap response in envelope
+ // to distinguish sw programming failure and hand over
+ // it make sense in the latter case to retry immediately.
+ message.respond(SERIALIZER.encode(allFailed));
+ return;
+ }
+
+ pendingResponses.put(operation.id(), message.sender());
+ storeBatchInternal(operation);
+ }
+ }
+
+ private class InternalFlowTable implements ReplicaInfoEventListener {
+
+ private final Map<DeviceId, Map<FlowId, Set<StoredFlowEntry>>>
+ flowEntries = Maps.newConcurrentMap();
+
+ private final Map<DeviceId, Long> lastBackupTimes = Maps.newConcurrentMap();
+ private final Map<DeviceId, Long> lastUpdateTimes = Maps.newConcurrentMap();
+ private final Map<DeviceId, NodeId> lastBackupNodes = Maps.newConcurrentMap();
+
+ @Override
+ public void event(ReplicaInfoEvent event) {
+ if (!backupEnabled) {
+ return;
+ }
+ if (event.type() == ReplicaInfoEvent.Type.BACKUPS_CHANGED) {
+ DeviceId deviceId = event.subject();
+ NodeId master = mastershipService.getMasterFor(deviceId);
+ if (!Objects.equal(local, master)) {
+ // ignore since this event is for a device this node does not manage.
+ return;
+ }
+ NodeId newBackupNode = getBackupNode(deviceId);
+ NodeId currentBackupNode = lastBackupNodes.get(deviceId);
+ if (Objects.equal(newBackupNode, currentBackupNode)) {
+ // ignore since backup location hasn't changed.
+ return;
+ }
+ if (currentBackupNode != null && newBackupNode == null) {
+ // Current backup node is most likely down and no alternate backup node
+ // has been chosen. Clear current backup location so that we can resume
+ // backups when either current backup comes online or a different backup node
+ // is chosen.
+ log.warn("Lost backup location {} for deviceId {} and no alternate backup node exists. "
+ + "Flows can be lost if the master goes down", currentBackupNode, deviceId);
+ lastBackupNodes.remove(deviceId);
+ lastBackupTimes.remove(deviceId);
+ return;
+ // TODO: Pick any available node as backup and ensure hand-off occurs when
+ // a new master is elected.
+ }
+ log.debug("Backup location for {} has changed from {} to {}.",
+ deviceId, currentBackupNode, newBackupNode);
+ backupSenderExecutor.schedule(() -> backupFlowEntries(newBackupNode, Sets.newHashSet(deviceId)),
+ 0,
+ TimeUnit.SECONDS);
+ }
+ }
+
+ private void sendBackups(NodeId nodeId, Set<DeviceId> deviceIds) {
+ // split up the devices into smaller batches and send them separately.
+ Iterables.partition(deviceIds, FLOW_TABLE_BACKUP_BATCH_SIZE)
+ .forEach(ids -> backupFlowEntries(nodeId, Sets.newHashSet(ids)));
+ }
+
+ private void backupFlowEntries(NodeId nodeId, Set<DeviceId> deviceIds) {
+ if (deviceIds.isEmpty()) {
+ return;
+ }
+ log.debug("Sending flowEntries for devices {} to {} as backup.", deviceIds, nodeId);
+ Map<DeviceId, Map<FlowId, Set<StoredFlowEntry>>> deviceFlowEntries =
+ Maps.newConcurrentMap();
+ deviceIds.forEach(id -> deviceFlowEntries.put(id, ImmutableMap.copyOf(getFlowTable(id))));
+ clusterCommunicator.<Map<DeviceId, Map<FlowId, Set<StoredFlowEntry>>>, Set<DeviceId>>sendAndReceive(
+ deviceFlowEntries,
+ FLOW_TABLE_BACKUP,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ nodeId)
+ .whenComplete((backedupDevices, error) -> {
+ Set<DeviceId> devicesNotBackedup = error != null ?
+ deviceFlowEntries.keySet() :
+ Sets.difference(deviceFlowEntries.keySet(), backedupDevices);
+ if (devicesNotBackedup.size() > 0) {
+ log.warn("Failed to backup devices: {}. Reason: {}",
+ devicesNotBackedup, error.getMessage());
+ }
+ if (backedupDevices != null) {
+ backedupDevices.forEach(id -> {
+ lastBackupTimes.put(id, System.currentTimeMillis());
+ lastBackupNodes.put(id, nodeId);
+ });
+ }
+ });
+ }
+
+ /**
+ * Returns the flow table for specified device.
+ *
+ * @param deviceId identifier of the device
+ * @return Map representing Flow Table of given device.
+ */
+ private Map<FlowId, Set<StoredFlowEntry>> getFlowTable(DeviceId deviceId) {
+ return flowEntries.computeIfAbsent(deviceId, id -> Maps.newConcurrentMap());
+ }
+
+ private Set<StoredFlowEntry> getFlowEntriesInternal(DeviceId deviceId, FlowId flowId) {
+ return getFlowTable(deviceId).computeIfAbsent(flowId, id -> Sets.newCopyOnWriteArraySet());
+ }
+
+ private StoredFlowEntry getFlowEntryInternal(FlowRule rule) {
+ Set<StoredFlowEntry> flowEntries = getFlowEntriesInternal(rule.deviceId(), rule.id());
+ return flowEntries.stream()
+ .filter(entry -> Objects.equal(entry, rule))
+ .findAny()
+ .orElse(null);
+ }
+
+ private Set<FlowEntry> getFlowEntriesInternal(DeviceId deviceId) {
+ Set<FlowEntry> result = Sets.newHashSet();
+ getFlowTable(deviceId).values().forEach(result::addAll);
+ return result;
+ }
+
+ public StoredFlowEntry getFlowEntry(FlowRule rule) {
+ return getFlowEntryInternal(rule);
+ }
+
+ public Set<FlowEntry> getFlowEntries(DeviceId deviceId) {
+ return getFlowEntriesInternal(deviceId);
+ }
+
+ public void add(FlowEntry rule) {
+ getFlowEntriesInternal(rule.deviceId(), rule.id()).add((StoredFlowEntry) rule);
+ lastUpdateTimes.put(rule.deviceId(), System.currentTimeMillis());
+ }
+
+ public boolean remove(DeviceId deviceId, FlowEntry rule) {
+ try {
+ return getFlowEntriesInternal(deviceId, rule.id()).remove(rule);
+ } finally {
+ lastUpdateTimes.put(deviceId, System.currentTimeMillis());
+ }
+ }
+
+ private NodeId getBackupNode(DeviceId deviceId) {
+ List<NodeId> deviceStandbys = replicaInfoManager.getReplicaInfoFor(deviceId).backups();
+ // pick the standby which is most likely to become next master
+ return deviceStandbys.isEmpty() ? null : deviceStandbys.get(0);
+ }
+
+ private void backup() {
+ if (!backupEnabled) {
+ return;
+ }
+ try {
+ // determine the set of devices that we need to backup during this run.
+ Set<DeviceId> devicesToBackup = mastershipService.getDevicesOf(local)
+ .stream()
+ .filter(deviceId -> {
+ Long lastBackupTime = lastBackupTimes.get(deviceId);
+ Long lastUpdateTime = lastUpdateTimes.get(deviceId);
+ NodeId lastBackupNode = lastBackupNodes.get(deviceId);
+ NodeId newBackupNode = getBackupNode(deviceId);
+ return lastBackupTime == null
+ || !Objects.equal(lastBackupNode, newBackupNode)
+ || (lastUpdateTime != null && lastUpdateTime > lastBackupTime);
+ })
+ .collect(Collectors.toSet());
+
+ // compute a mapping from node to the set of devices whose flow entries it should backup
+ Map<NodeId, Set<DeviceId>> devicesToBackupByNode = Maps.newHashMap();
+ devicesToBackup.forEach(deviceId -> {
+ NodeId backupLocation = getBackupNode(deviceId);
+ if (backupLocation != null) {
+ devicesToBackupByNode.computeIfAbsent(backupLocation, nodeId -> Sets.newHashSet())
+ .add(deviceId);
+ }
+ });
+ // send the device flow entries to their respective backup nodes
+ devicesToBackupByNode.forEach(this::sendBackups);
+ } catch (Exception e) {
+ log.error("Backup failed.", e);
+ }
+ }
+
+ private Set<DeviceId> onBackupReceipt(Map<DeviceId, Map<FlowId, Set<StoredFlowEntry>>> flowTables) {
+ log.debug("Received flowEntries for {} to backup", flowTables.keySet());
+ Set<DeviceId> backedupDevices = Sets.newHashSet();
+ try {
+ flowTables.forEach((deviceId, deviceFlowTable) -> {
+ // Only process those devices are that not managed by the local node.
+ if (!Objects.equal(local, mastershipService.getMasterFor(deviceId))) {
+ Map<FlowId, Set<StoredFlowEntry>> backupFlowTable = getFlowTable(deviceId);
+ backupFlowTable.clear();
+ backupFlowTable.putAll(deviceFlowTable);
+ backedupDevices.add(deviceId);
+ }
+ });
+ } catch (Exception e) {
+ log.warn("Failure processing backup request", e);
+ }
+ return backedupDevices;
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/ReplicaInfoManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/ReplicaInfoManager.java
new file mode 100644
index 00000000..ebb487bf
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/ReplicaInfoManager.java
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.flow.impl;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.cluster.RoleInfo;
+import org.onosproject.event.EventDeliveryService;
+import org.onosproject.event.ListenerRegistry;
+import org.onosproject.mastership.MastershipEvent;
+import org.onosproject.mastership.MastershipListener;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.net.DeviceId;
+import org.onosproject.store.flow.ReplicaInfo;
+import org.onosproject.store.flow.ReplicaInfoEvent;
+import org.onosproject.store.flow.ReplicaInfoEventListener;
+import org.onosproject.store.flow.ReplicaInfoService;
+import org.slf4j.Logger;
+
+import java.util.Collections;
+import java.util.List;
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.onosproject.store.flow.ReplicaInfoEvent.Type.BACKUPS_CHANGED;
+import static org.onosproject.store.flow.ReplicaInfoEvent.Type.MASTER_CHANGED;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Manages replica placement information.
+ */
+@Component(immediate = true)
+@Service
+public class ReplicaInfoManager implements ReplicaInfoService {
+
+ private final Logger log = getLogger(getClass());
+
+ private final MastershipListener mastershipListener = new InternalMastershipListener();
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected EventDeliveryService eventDispatcher;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ protected final ListenerRegistry<ReplicaInfoEvent, ReplicaInfoEventListener>
+ listenerRegistry = new ListenerRegistry<>();
+
+ @Activate
+ public void activate() {
+ eventDispatcher.addSink(ReplicaInfoEvent.class, listenerRegistry);
+ mastershipService.addListener(mastershipListener);
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ eventDispatcher.removeSink(ReplicaInfoEvent.class);
+ mastershipService.removeListener(mastershipListener);
+ log.info("Stopped");
+ }
+
+ @Override
+ public ReplicaInfo getReplicaInfoFor(DeviceId deviceId) {
+ return buildFromRoleInfo(mastershipService.getNodesFor(deviceId));
+ }
+
+ @Override
+ public void addListener(ReplicaInfoEventListener listener) {
+ listenerRegistry.addListener(checkNotNull(listener));
+ }
+
+ @Override
+ public void removeListener(ReplicaInfoEventListener listener) {
+ listenerRegistry.removeListener(checkNotNull(listener));
+ }
+
+ private static ReplicaInfo buildFromRoleInfo(RoleInfo roles) {
+ List<NodeId> backups = roles.backups() == null ?
+ Collections.emptyList() : ImmutableList.copyOf(roles.backups());
+ return new ReplicaInfo(roles.master(), backups);
+ }
+
+ final class InternalMastershipListener implements MastershipListener {
+
+ @Override
+ public void event(MastershipEvent event) {
+ final ReplicaInfo replicaInfo = buildFromRoleInfo(event.roleInfo());
+ switch (event.type()) {
+ case MASTER_CHANGED:
+ eventDispatcher.post(new ReplicaInfoEvent(MASTER_CHANGED,
+ event.subject(),
+ replicaInfo));
+ break;
+ case BACKUPS_CHANGED:
+ eventDispatcher.post(new ReplicaInfoEvent(BACKUPS_CHANGED,
+ event.subject(),
+ replicaInfo));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/package-info.java
new file mode 100644
index 00000000..b3de23db
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of the distributed flow rule store using p2p synchronization
+ * protocol.
+ */
+package org.onosproject.store.flow.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/package-info.java
new file mode 100644
index 00000000..10dd24e3
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flow/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Definitions of events and messages pertaining to replication of flow entries.
+ */
+package org.onosproject.store.flow;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flowobjective/impl/DistributedFlowObjectiveStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flowobjective/impl/DistributedFlowObjectiveStore.java
new file mode 100644
index 00000000..e8ea24fa
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flowobjective/impl/DistributedFlowObjectiveStore.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.flowobjective.impl;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.net.behaviour.DefaultNextGroup;
+import org.onosproject.net.behaviour.NextGroup;
+import org.onosproject.net.flowobjective.FlowObjectiveStore;
+import org.onosproject.net.flowobjective.FlowObjectiveStoreDelegate;
+import org.onosproject.net.flowobjective.ObjectiveEvent;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.service.AtomicCounter;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.StorageService;
+import org.onosproject.store.service.Versioned;
+import org.slf4j.Logger;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Manages the inventory of created next groups.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class DistributedFlowObjectiveStore
+ extends AbstractStore<ObjectiveEvent, FlowObjectiveStoreDelegate>
+ implements FlowObjectiveStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private ConsistentMap<Integer, byte[]> nextGroups;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ private AtomicCounter nextIds;
+
+ @Activate
+ public void activate() {
+ nextGroups = storageService.<Integer, byte[]>consistentMapBuilder()
+ .withName("flowobjective-groups")
+ .withSerializer(Serializer.using(
+ new KryoNamespace.Builder()
+ .register(byte[].class)
+ .register(Versioned.class)
+ .build()))
+ .build();
+
+ nextIds = storageService.atomicCounterBuilder()
+ .withName("next-objective-counter")
+ .build();
+
+ log.info("Started");
+ }
+
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+
+ @Override
+ public void putNextGroup(Integer nextId, NextGroup group) {
+ nextGroups.putIfAbsent(nextId, group.data());
+ notifyDelegate(new ObjectiveEvent(ObjectiveEvent.Type.ADD, nextId));
+ }
+
+ @Override
+ public NextGroup getNextGroup(Integer nextId) {
+ Versioned<byte[]> versionGroup = nextGroups.get(nextId);
+ if (versionGroup != null) {
+ return new DefaultNextGroup(versionGroup.value());
+ }
+ return null;
+ }
+
+ @Override
+ public int allocateNextId() {
+ return (int) nextIds.incrementAndGet();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flowobjective/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flowobjective/impl/package-info.java
new file mode 100644
index 00000000..49acd878
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/flowobjective/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of distributed store for the flow objective state.
+ */
+package org.onosproject.store.flowobjective.impl; \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/DistributedGroupStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/DistributedGroupStore.java
new file mode 100644
index 00000000..cf48dcb8
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/DistributedGroupStore.java
@@ -0,0 +1,1304 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.group.impl;
+
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onlab.util.NewConcurrentHashMap;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.core.DefaultApplicationId;
+import org.onosproject.core.DefaultGroupId;
+import org.onosproject.core.GroupId;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.MastershipRole;
+import org.onosproject.net.PortNumber;
+import org.onosproject.net.flow.DefaultTrafficTreatment;
+import org.onosproject.net.flow.FlowRule;
+import org.onosproject.net.flow.instructions.Instructions;
+import org.onosproject.net.flow.instructions.L0ModificationInstruction;
+import org.onosproject.net.flow.instructions.L2ModificationInstruction;
+import org.onosproject.net.flow.instructions.L3ModificationInstruction;
+import org.onosproject.net.group.DefaultGroup;
+import org.onosproject.net.group.DefaultGroupBucket;
+import org.onosproject.net.group.DefaultGroupDescription;
+import org.onosproject.net.group.DefaultGroupKey;
+import org.onosproject.net.group.Group;
+import org.onosproject.net.group.Group.GroupState;
+import org.onosproject.net.group.GroupBucket;
+import org.onosproject.net.group.GroupBuckets;
+import org.onosproject.net.group.GroupDescription;
+import org.onosproject.net.group.GroupEvent;
+import org.onosproject.net.group.GroupEvent.Type;
+import org.onosproject.net.group.GroupKey;
+import org.onosproject.net.group.GroupOperation;
+import org.onosproject.net.group.GroupStore;
+import org.onosproject.net.group.GroupStoreDelegate;
+import org.onosproject.net.group.StoredGroupBucketEntry;
+import org.onosproject.net.group.StoredGroupEntry;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.service.MultiValuedTimestamp;
+import org.onosproject.store.serializers.DeviceIdSerializer;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.URISerializer;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapBuilder;
+import org.onosproject.store.service.EventuallyConsistentMapEvent;
+import org.onosproject.store.service.EventuallyConsistentMapListener;
+import org.onosproject.store.service.StorageService;
+import org.slf4j.Logger;
+
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+
+import static org.apache.commons.lang3.concurrent.ConcurrentUtils.createIfAbsentUnchecked;
+import static org.onlab.util.Tools.groupedThreads;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Manages inventory of group entries using trivial in-memory implementation.
+ */
+@Component(immediate = true)
+@Service
+public class DistributedGroupStore
+ extends AbstractStore<GroupEvent, GroupStoreDelegate>
+ implements GroupStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private final int dummyId = 0xffffffff;
+ private final GroupId dummyGroupId = new DefaultGroupId(dummyId);
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ // Per device group table with (device id + app cookie) as key
+ private EventuallyConsistentMap<GroupStoreKeyMapKey,
+ StoredGroupEntry> groupStoreEntriesByKey = null;
+ // Per device group table with (device id + group id) as key
+ private final ConcurrentMap<DeviceId, ConcurrentMap<GroupId, StoredGroupEntry>>
+ groupEntriesById = new ConcurrentHashMap<>();
+ private EventuallyConsistentMap<GroupStoreKeyMapKey,
+ StoredGroupEntry> auditPendingReqQueue = null;
+ private final ConcurrentMap<DeviceId, ConcurrentMap<GroupId, Group>>
+ extraneousGroupEntriesById = new ConcurrentHashMap<>();
+ private ExecutorService messageHandlingExecutor;
+ private static final int MESSAGE_HANDLER_THREAD_POOL_SIZE = 1;
+
+ private final HashMap<DeviceId, Boolean> deviceAuditStatus =
+ new HashMap<DeviceId, Boolean>();
+
+ private final AtomicInteger groupIdGen = new AtomicInteger();
+
+ private KryoNamespace.Builder kryoBuilder = null;
+
+ private final AtomicLong sequenceNumber = new AtomicLong(0);
+
+ @Activate
+ public void activate() {
+ kryoBuilder = new KryoNamespace.Builder()
+ .register(DefaultGroup.class,
+ DefaultGroupBucket.class,
+ DefaultGroupDescription.class,
+ DefaultGroupKey.class,
+ GroupDescription.Type.class,
+ Group.GroupState.class,
+ GroupBuckets.class,
+ DefaultGroupId.class,
+ GroupStoreMessage.class,
+ GroupStoreMessage.Type.class,
+ UpdateType.class,
+ GroupStoreMessageSubjects.class,
+ MultiValuedTimestamp.class,
+ GroupStoreKeyMapKey.class,
+ GroupStoreIdMapKey.class,
+ GroupStoreMapKey.class
+ )
+ .register(new URISerializer(), URI.class)
+ .register(new DeviceIdSerializer(), DeviceId.class)
+ .register(PortNumber.class)
+ .register(DefaultApplicationId.class)
+ .register(DefaultTrafficTreatment.class,
+ Instructions.DropInstruction.class,
+ Instructions.OutputInstruction.class,
+ Instructions.GroupInstruction.class,
+ Instructions.TableTypeTransition.class,
+ FlowRule.Type.class,
+ L0ModificationInstruction.class,
+ L0ModificationInstruction.L0SubType.class,
+ L0ModificationInstruction.ModLambdaInstruction.class,
+ L2ModificationInstruction.class,
+ L2ModificationInstruction.L2SubType.class,
+ L2ModificationInstruction.ModEtherInstruction.class,
+ L2ModificationInstruction.PushHeaderInstructions.class,
+ L2ModificationInstruction.ModVlanIdInstruction.class,
+ L2ModificationInstruction.ModVlanPcpInstruction.class,
+ L2ModificationInstruction.ModMplsLabelInstruction.class,
+ L2ModificationInstruction.ModMplsTtlInstruction.class,
+ L3ModificationInstruction.class,
+ L3ModificationInstruction.L3SubType.class,
+ L3ModificationInstruction.ModIPInstruction.class,
+ L3ModificationInstruction.ModIPv6FlowLabelInstruction.class,
+ L3ModificationInstruction.ModTtlInstruction.class,
+ org.onlab.packet.MplsLabel.class
+ )
+ .register(org.onosproject.cluster.NodeId.class)
+ .register(KryoNamespaces.BASIC)
+ .register(KryoNamespaces.MISC);
+
+ messageHandlingExecutor = Executors.
+ newFixedThreadPool(MESSAGE_HANDLER_THREAD_POOL_SIZE,
+ groupedThreads("onos/store/group",
+ "message-handlers"));
+
+ clusterCommunicator.addSubscriber(GroupStoreMessageSubjects.REMOTE_GROUP_OP_REQUEST,
+ kryoBuilder.build()::deserialize,
+ this::process,
+ messageHandlingExecutor);
+
+ log.debug("Creating EC map groupstorekeymap");
+ EventuallyConsistentMapBuilder<GroupStoreKeyMapKey, StoredGroupEntry>
+ keyMapBuilder = storageService.eventuallyConsistentMapBuilder();
+
+ groupStoreEntriesByKey = keyMapBuilder
+ .withName("groupstorekeymap")
+ .withSerializer(kryoBuilder)
+ .withTimestampProvider((k, v) -> new MultiValuedTimestamp<>(System.currentTimeMillis(),
+ sequenceNumber.getAndIncrement()))
+ .build();
+ groupStoreEntriesByKey.addListener(new GroupStoreKeyMapListener());
+ log.debug("Current size of groupstorekeymap:{}",
+ groupStoreEntriesByKey.size());
+
+ log.debug("Creating EC map pendinggroupkeymap");
+ EventuallyConsistentMapBuilder<GroupStoreKeyMapKey, StoredGroupEntry>
+ auditMapBuilder = storageService.eventuallyConsistentMapBuilder();
+
+ auditPendingReqQueue = auditMapBuilder
+ .withName("pendinggroupkeymap")
+ .withSerializer(kryoBuilder)
+ .withTimestampProvider((k, v) -> new MultiValuedTimestamp<>(System.currentTimeMillis(),
+ sequenceNumber.getAndIncrement()))
+ .build();
+ log.debug("Current size of pendinggroupkeymap:{}",
+ auditPendingReqQueue.size());
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ groupStoreEntriesByKey.destroy();
+ auditPendingReqQueue.destroy();
+ log.info("Stopped");
+ }
+
+ private static NewConcurrentHashMap<GroupId, Group>
+ lazyEmptyExtraneousGroupIdTable() {
+ return NewConcurrentHashMap.<GroupId, Group>ifNeeded();
+ }
+
+ private static NewConcurrentHashMap<GroupId, StoredGroupEntry>
+ lazyEmptyGroupIdTable() {
+ return NewConcurrentHashMap.<GroupId, StoredGroupEntry>ifNeeded();
+ }
+
+ /**
+ * Returns the group store eventual consistent key map.
+ *
+ * @return Map representing group key table.
+ */
+ private EventuallyConsistentMap<GroupStoreKeyMapKey, StoredGroupEntry>
+ getGroupStoreKeyMap() {
+ return groupStoreEntriesByKey;
+ }
+
+ /**
+ * Returns the group id table for specified device.
+ *
+ * @param deviceId identifier of the device
+ * @return Map representing group key table of given device.
+ */
+ private ConcurrentMap<GroupId, StoredGroupEntry> getGroupIdTable(DeviceId deviceId) {
+ return createIfAbsentUnchecked(groupEntriesById,
+ deviceId, lazyEmptyGroupIdTable());
+ }
+
+ /**
+ * Returns the pending group request table.
+ *
+ * @return Map representing group key table.
+ */
+ private EventuallyConsistentMap<GroupStoreKeyMapKey, StoredGroupEntry>
+ getPendingGroupKeyTable() {
+ return auditPendingReqQueue;
+ }
+
+ /**
+ * Returns the extraneous group id table for specified device.
+ *
+ * @param deviceId identifier of the device
+ * @return Map representing group key table of given device.
+ */
+ private ConcurrentMap<GroupId, Group>
+ getExtraneousGroupIdTable(DeviceId deviceId) {
+ return createIfAbsentUnchecked(extraneousGroupEntriesById,
+ deviceId,
+ lazyEmptyExtraneousGroupIdTable());
+ }
+
+ /**
+ * Returns the number of groups for the specified device in the store.
+ *
+ * @return number of groups for the specified device
+ */
+ @Override
+ public int getGroupCount(DeviceId deviceId) {
+ return (getGroups(deviceId) != null) ?
+ Iterables.size(getGroups(deviceId)) : 0;
+ }
+
+ /**
+ * Returns the groups associated with a device.
+ *
+ * @param deviceId the device ID
+ *
+ * @return the group entries
+ */
+ @Override
+ public Iterable<Group> getGroups(DeviceId deviceId) {
+ // flatten and make iterator unmodifiable
+ log.debug("getGroups: for device {} total number of groups {}",
+ deviceId, getGroupStoreKeyMap().values().size());
+ return FluentIterable.from(getGroupStoreKeyMap().values())
+ .filter(input -> input.deviceId().equals(deviceId))
+ .transform(input -> input);
+ }
+
+ private Iterable<StoredGroupEntry> getStoredGroups(DeviceId deviceId) {
+ // flatten and make iterator unmodifiable
+ log.debug("getGroups: for device {} total number of groups {}",
+ deviceId, getGroupStoreKeyMap().values().size());
+ return FluentIterable.from(getGroupStoreKeyMap().values())
+ .filter(input -> input.deviceId().equals(deviceId));
+ }
+
+ /**
+ * Returns the stored group entry.
+ *
+ * @param deviceId the device ID
+ * @param appCookie the group key
+ *
+ * @return a group associated with the key
+ */
+ @Override
+ public Group getGroup(DeviceId deviceId, GroupKey appCookie) {
+ return getStoredGroupEntry(deviceId, appCookie);
+ }
+
+ private StoredGroupEntry getStoredGroupEntry(DeviceId deviceId,
+ GroupKey appCookie) {
+ return getGroupStoreKeyMap().get(new GroupStoreKeyMapKey(deviceId,
+ appCookie));
+ }
+
+ @Override
+ public Group getGroup(DeviceId deviceId, GroupId groupId) {
+ return getStoredGroupEntry(deviceId, groupId);
+ }
+
+ private StoredGroupEntry getStoredGroupEntry(DeviceId deviceId,
+ GroupId groupId) {
+ return getGroupIdTable(deviceId).get(groupId);
+ }
+
+ private int getFreeGroupIdValue(DeviceId deviceId) {
+ int freeId = groupIdGen.incrementAndGet();
+
+ while (true) {
+ Group existing = getGroup(deviceId, new DefaultGroupId(freeId));
+ if (existing == null) {
+ existing = (
+ extraneousGroupEntriesById.get(deviceId) != null) ?
+ extraneousGroupEntriesById.get(deviceId).
+ get(new DefaultGroupId(freeId)) :
+ null;
+ }
+ if (existing != null) {
+ freeId = groupIdGen.incrementAndGet();
+ } else {
+ break;
+ }
+ }
+ log.debug("getFreeGroupIdValue: Next Free ID is {}", freeId);
+ return freeId;
+ }
+
+ /**
+ * Stores a new group entry using the information from group description.
+ *
+ * @param groupDesc group description to be used to create group entry
+ */
+ @Override
+ public void storeGroupDescription(GroupDescription groupDesc) {
+ log.debug("In storeGroupDescription");
+ // Check if a group is existing with the same key
+ if (getGroup(groupDesc.deviceId(), groupDesc.appCookie()) != null) {
+ log.warn("Group already exists with the same key {}",
+ groupDesc.appCookie());
+ return;
+ }
+
+ // Check if group to be created by a remote instance
+ if (mastershipService.getLocalRole(groupDesc.deviceId()) != MastershipRole.MASTER) {
+ log.debug("storeGroupDescription: Device {} local role is not MASTER",
+ groupDesc.deviceId());
+ if (mastershipService.getMasterFor(groupDesc.deviceId()) == null) {
+ log.error("No Master for device {}..."
+ + "Can not perform add group operation",
+ groupDesc.deviceId());
+ //TODO: Send Group operation failure event
+ return;
+ }
+ GroupStoreMessage groupOp = GroupStoreMessage.
+ createGroupAddRequestMsg(groupDesc.deviceId(),
+ groupDesc);
+
+ clusterCommunicator.unicast(groupOp,
+ GroupStoreMessageSubjects.REMOTE_GROUP_OP_REQUEST,
+ m -> kryoBuilder.build().serialize(m),
+ mastershipService.getMasterFor(groupDesc.deviceId())).whenComplete((result, error) -> {
+ if (error != null) {
+ log.warn("Failed to send request to master: {} to {}",
+ groupOp,
+ mastershipService.getMasterFor(groupDesc.deviceId()));
+ //TODO: Send Group operation failure event
+ } else {
+ log.debug("Sent Group operation request for device {} "
+ + "to remote MASTER {}",
+ groupDesc.deviceId(),
+ mastershipService.getMasterFor(groupDesc.deviceId()));
+ }
+ });
+ return;
+ }
+
+ log.debug("Store group for device {} is getting handled locally",
+ groupDesc.deviceId());
+ storeGroupDescriptionInternal(groupDesc);
+ }
+
+ private Group getMatchingExtraneousGroupbyId(DeviceId deviceId, Integer groupId) {
+ ConcurrentMap<GroupId, Group> extraneousMap =
+ extraneousGroupEntriesById.get(deviceId);
+ if (extraneousMap == null) {
+ return null;
+ }
+ return extraneousMap.get(new DefaultGroupId(groupId));
+ }
+
+ private Group getMatchingExtraneousGroupbyBuckets(DeviceId deviceId,
+ GroupBuckets buckets) {
+ ConcurrentMap<GroupId, Group> extraneousMap =
+ extraneousGroupEntriesById.get(deviceId);
+ if (extraneousMap == null) {
+ return null;
+ }
+
+ for (Group extraneousGroup:extraneousMap.values()) {
+ if (extraneousGroup.buckets().equals(buckets)) {
+ return extraneousGroup;
+ }
+ }
+ return null;
+ }
+
+ private void storeGroupDescriptionInternal(GroupDescription groupDesc) {
+ // Check if a group is existing with the same key
+ if (getGroup(groupDesc.deviceId(), groupDesc.appCookie()) != null) {
+ return;
+ }
+
+ if (deviceAuditStatus.get(groupDesc.deviceId()) == null) {
+ // Device group audit has not completed yet
+ // Add this group description to pending group key table
+ // Create a group entry object with Dummy Group ID
+ log.debug("storeGroupDescriptionInternal: Device {} AUDIT pending...Queuing Group ADD request",
+ groupDesc.deviceId());
+ StoredGroupEntry group = new DefaultGroup(dummyGroupId, groupDesc);
+ group.setState(GroupState.WAITING_AUDIT_COMPLETE);
+ EventuallyConsistentMap<GroupStoreKeyMapKey, StoredGroupEntry> pendingKeyTable =
+ getPendingGroupKeyTable();
+ pendingKeyTable.put(new GroupStoreKeyMapKey(groupDesc.deviceId(),
+ groupDesc.appCookie()),
+ group);
+ return;
+ }
+
+ Group matchingExtraneousGroup = null;
+ if (groupDesc.givenGroupId() != null) {
+ //Check if there is a extraneous group existing with the same Id
+ matchingExtraneousGroup = getMatchingExtraneousGroupbyId(
+ groupDesc.deviceId(), groupDesc.givenGroupId());
+ if (matchingExtraneousGroup != null) {
+ log.debug("storeGroupDescriptionInternal: Matching extraneous group found in Device {} for group id {}",
+ groupDesc.deviceId(),
+ groupDesc.givenGroupId());
+ //Check if the group buckets matches with user provided buckets
+ if (matchingExtraneousGroup.buckets().equals(groupDesc.buckets())) {
+ //Group is already existing with the same buckets and Id
+ // Create a group entry object
+ log.debug("storeGroupDescriptionInternal: Buckets also matching in Device {} for group id {}",
+ groupDesc.deviceId(),
+ groupDesc.givenGroupId());
+ StoredGroupEntry group = new DefaultGroup(
+ matchingExtraneousGroup.id(), groupDesc);
+ // Insert the newly created group entry into key and id maps
+ getGroupStoreKeyMap().
+ put(new GroupStoreKeyMapKey(groupDesc.deviceId(),
+ groupDesc.appCookie()), group);
+ // Ensure it also inserted into group id based table to
+ // avoid any chances of duplication in group id generation
+ getGroupIdTable(groupDesc.deviceId()).
+ put(matchingExtraneousGroup.id(), group);
+ addOrUpdateGroupEntry(matchingExtraneousGroup);
+ removeExtraneousGroupEntry(matchingExtraneousGroup);
+ return;
+ } else {
+ //Group buckets are not matching. Update group
+ //with user provided buckets.
+ //TODO
+ log.debug("storeGroupDescriptionInternal: Buckets are not matching in Device {} for group id {}",
+ groupDesc.deviceId(),
+ groupDesc.givenGroupId());
+ }
+ }
+ } else {
+ //Check if there is an extraneous group with user provided buckets
+ matchingExtraneousGroup = getMatchingExtraneousGroupbyBuckets(
+ groupDesc.deviceId(), groupDesc.buckets());
+ if (matchingExtraneousGroup != null) {
+ //Group is already existing with the same buckets.
+ //So reuse this group.
+ log.debug("storeGroupDescriptionInternal: Matching extraneous group found in Device {}",
+ groupDesc.deviceId());
+ //Create a group entry object
+ StoredGroupEntry group = new DefaultGroup(
+ matchingExtraneousGroup.id(), groupDesc);
+ // Insert the newly created group entry into key and id maps
+ getGroupStoreKeyMap().
+ put(new GroupStoreKeyMapKey(groupDesc.deviceId(),
+ groupDesc.appCookie()), group);
+ // Ensure it also inserted into group id based table to
+ // avoid any chances of duplication in group id generation
+ getGroupIdTable(groupDesc.deviceId()).
+ put(matchingExtraneousGroup.id(), group);
+ addOrUpdateGroupEntry(matchingExtraneousGroup);
+ removeExtraneousGroupEntry(matchingExtraneousGroup);
+ return;
+ } else {
+ //TODO: Check if there are any empty groups that can be used here
+ log.debug("storeGroupDescriptionInternal: No matching extraneous groups found in Device {}",
+ groupDesc.deviceId());
+ }
+ }
+
+ GroupId id = null;
+ if (groupDesc.givenGroupId() == null) {
+ // Get a new group identifier
+ id = new DefaultGroupId(getFreeGroupIdValue(groupDesc.deviceId()));
+ } else {
+ id = new DefaultGroupId(groupDesc.givenGroupId());
+ }
+ // Create a group entry object
+ StoredGroupEntry group = new DefaultGroup(id, groupDesc);
+ // Insert the newly created group entry into key and id maps
+ getGroupStoreKeyMap().
+ put(new GroupStoreKeyMapKey(groupDesc.deviceId(),
+ groupDesc.appCookie()), group);
+ // Ensure it also inserted into group id based table to
+ // avoid any chances of duplication in group id generation
+ getGroupIdTable(groupDesc.deviceId()).
+ put(id, group);
+ log.debug("storeGroupDescriptionInternal: Processing Group ADD request for Id {} in device {}",
+ id,
+ groupDesc.deviceId());
+ notifyDelegate(new GroupEvent(GroupEvent.Type.GROUP_ADD_REQUESTED,
+ group));
+ }
+
+ /**
+ * Updates the existing group entry with the information
+ * from group description.
+ *
+ * @param deviceId the device ID
+ * @param oldAppCookie the current group key
+ * @param type update type
+ * @param newBuckets group buckets for updates
+ * @param newAppCookie optional new group key
+ */
+ @Override
+ public void updateGroupDescription(DeviceId deviceId,
+ GroupKey oldAppCookie,
+ UpdateType type,
+ GroupBuckets newBuckets,
+ GroupKey newAppCookie) {
+ // Check if group update to be done by a remote instance
+ if (mastershipService.getMasterFor(deviceId) != null &&
+ mastershipService.getLocalRole(deviceId) != MastershipRole.MASTER) {
+ log.debug("updateGroupDescription: Device {} local role is not MASTER",
+ deviceId);
+ if (mastershipService.getMasterFor(deviceId) == null) {
+ log.error("No Master for device {}..."
+ + "Can not perform update group operation",
+ deviceId);
+ //TODO: Send Group operation failure event
+ return;
+ }
+ GroupStoreMessage groupOp = GroupStoreMessage.
+ createGroupUpdateRequestMsg(deviceId,
+ oldAppCookie,
+ type,
+ newBuckets,
+ newAppCookie);
+
+ clusterCommunicator.unicast(groupOp,
+ GroupStoreMessageSubjects.REMOTE_GROUP_OP_REQUEST,
+ m -> kryoBuilder.build().serialize(m),
+ mastershipService.getMasterFor(deviceId)).whenComplete((result, error) -> {
+ if (error != null) {
+ log.warn("Failed to send request to master: {} to {}",
+ groupOp,
+ mastershipService.getMasterFor(deviceId), error);
+ }
+ //TODO: Send Group operation failure event
+ });
+ return;
+ }
+ log.debug("updateGroupDescription for device {} is getting handled locally",
+ deviceId);
+ updateGroupDescriptionInternal(deviceId,
+ oldAppCookie,
+ type,
+ newBuckets,
+ newAppCookie);
+ }
+
+ private void updateGroupDescriptionInternal(DeviceId deviceId,
+ GroupKey oldAppCookie,
+ UpdateType type,
+ GroupBuckets newBuckets,
+ GroupKey newAppCookie) {
+ // Check if a group is existing with the provided key
+ Group oldGroup = getGroup(deviceId, oldAppCookie);
+ if (oldGroup == null) {
+ log.warn("updateGroupDescriptionInternal: Group not found...strange");
+ return;
+ }
+
+ List<GroupBucket> newBucketList = getUpdatedBucketList(oldGroup,
+ type,
+ newBuckets);
+ if (newBucketList != null) {
+ // Create a new group object from the old group
+ GroupBuckets updatedBuckets = new GroupBuckets(newBucketList);
+ GroupKey newCookie = (newAppCookie != null) ? newAppCookie : oldAppCookie;
+ GroupDescription updatedGroupDesc = new DefaultGroupDescription(
+ oldGroup.deviceId(),
+ oldGroup.type(),
+ updatedBuckets,
+ newCookie,
+ oldGroup.givenGroupId(),
+ oldGroup.appId());
+ StoredGroupEntry newGroup = new DefaultGroup(oldGroup.id(),
+ updatedGroupDesc);
+ log.debug("updateGroupDescriptionInternal: group entry {} in device {} moving from {} to PENDING_UPDATE",
+ oldGroup.id(),
+ oldGroup.deviceId(),
+ oldGroup.state());
+ newGroup.setState(GroupState.PENDING_UPDATE);
+ newGroup.setLife(oldGroup.life());
+ newGroup.setPackets(oldGroup.packets());
+ newGroup.setBytes(oldGroup.bytes());
+ //Update the group entry in groupkey based map.
+ //Update to groupid based map will happen in the
+ //groupkey based map update listener
+ log.debug("updateGroupDescriptionInternal with type {}: Group updated with buckets",
+ type);
+ getGroupStoreKeyMap().
+ put(new GroupStoreKeyMapKey(newGroup.deviceId(),
+ newGroup.appCookie()), newGroup);
+ notifyDelegate(new GroupEvent(Type.GROUP_UPDATE_REQUESTED, newGroup));
+ } else {
+ log.warn("updateGroupDescriptionInternal with type {}: No "
+ + "change in the buckets in update", type);
+ }
+ }
+
+ private List<GroupBucket> getUpdatedBucketList(Group oldGroup,
+ UpdateType type,
+ GroupBuckets buckets) {
+ GroupBuckets oldBuckets = oldGroup.buckets();
+ List<GroupBucket> newBucketList = new ArrayList<GroupBucket>(
+ oldBuckets.buckets());
+ boolean groupDescUpdated = false;
+
+ if (type == UpdateType.ADD) {
+ // Check if the any of the new buckets are part of
+ // the old bucket list
+ for (GroupBucket addBucket:buckets.buckets()) {
+ if (!newBucketList.contains(addBucket)) {
+ newBucketList.add(addBucket);
+ groupDescUpdated = true;
+ }
+ }
+ } else if (type == UpdateType.REMOVE) {
+ // Check if the to be removed buckets are part of the
+ // old bucket list
+ for (GroupBucket removeBucket:buckets.buckets()) {
+ if (newBucketList.contains(removeBucket)) {
+ newBucketList.remove(removeBucket);
+ groupDescUpdated = true;
+ }
+ }
+ }
+
+ if (groupDescUpdated) {
+ return newBucketList;
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * Triggers deleting the existing group entry.
+ *
+ * @param deviceId the device ID
+ * @param appCookie the group key
+ */
+ @Override
+ public void deleteGroupDescription(DeviceId deviceId,
+ GroupKey appCookie) {
+ // Check if group to be deleted by a remote instance
+ if (mastershipService.
+ getLocalRole(deviceId) != MastershipRole.MASTER) {
+ log.debug("deleteGroupDescription: Device {} local role is not MASTER",
+ deviceId);
+ if (mastershipService.getMasterFor(deviceId) == null) {
+ log.error("No Master for device {}..."
+ + "Can not perform delete group operation",
+ deviceId);
+ //TODO: Send Group operation failure event
+ return;
+ }
+ GroupStoreMessage groupOp = GroupStoreMessage.
+ createGroupDeleteRequestMsg(deviceId,
+ appCookie);
+
+ clusterCommunicator.unicast(groupOp,
+ GroupStoreMessageSubjects.REMOTE_GROUP_OP_REQUEST,
+ m -> kryoBuilder.build().serialize(m),
+ mastershipService.getMasterFor(deviceId)).whenComplete((result, error) -> {
+ if (error != null) {
+ log.warn("Failed to send request to master: {} to {}",
+ groupOp,
+ mastershipService.getMasterFor(deviceId), error);
+ }
+ //TODO: Send Group operation failure event
+ });
+ return;
+ }
+ log.debug("deleteGroupDescription in device {} is getting handled locally",
+ deviceId);
+ deleteGroupDescriptionInternal(deviceId, appCookie);
+ }
+
+ private void deleteGroupDescriptionInternal(DeviceId deviceId,
+ GroupKey appCookie) {
+ // Check if a group is existing with the provided key
+ StoredGroupEntry existing = getStoredGroupEntry(deviceId, appCookie);
+ if (existing == null) {
+ return;
+ }
+
+ log.debug("deleteGroupDescriptionInternal: group entry {} in device {} moving from {} to PENDING_DELETE",
+ existing.id(),
+ existing.deviceId(),
+ existing.state());
+ synchronized (existing) {
+ existing.setState(GroupState.PENDING_DELETE);
+ }
+ log.debug("deleteGroupDescriptionInternal: in device {} issuing GROUP_REMOVE_REQUESTED",
+ deviceId);
+ notifyDelegate(new GroupEvent(Type.GROUP_REMOVE_REQUESTED, existing));
+ }
+
+ /**
+ * Stores a new group entry, or updates an existing entry.
+ *
+ * @param group group entry
+ */
+ @Override
+ public void addOrUpdateGroupEntry(Group group) {
+ // check if this new entry is an update to an existing entry
+ StoredGroupEntry existing = getStoredGroupEntry(group.deviceId(),
+ group.id());
+ GroupEvent event = null;
+
+ if (existing != null) {
+ log.debug("addOrUpdateGroupEntry: updating group entry {} in device {}",
+ group.id(),
+ group.deviceId());
+ synchronized (existing) {
+ for (GroupBucket bucket:group.buckets().buckets()) {
+ Optional<GroupBucket> matchingBucket =
+ existing.buckets().buckets()
+ .stream()
+ .filter((existingBucket)->(existingBucket.equals(bucket)))
+ .findFirst();
+ if (matchingBucket.isPresent()) {
+ ((StoredGroupBucketEntry) matchingBucket.
+ get()).setPackets(bucket.packets());
+ ((StoredGroupBucketEntry) matchingBucket.
+ get()).setBytes(bucket.bytes());
+ } else {
+ log.warn("addOrUpdateGroupEntry: No matching "
+ + "buckets to update stats");
+ }
+ }
+ existing.setLife(group.life());
+ existing.setPackets(group.packets());
+ existing.setBytes(group.bytes());
+ if ((existing.state() == GroupState.PENDING_ADD) ||
+ (existing.state() == GroupState.PENDING_ADD_RETRY)) {
+ log.debug("addOrUpdateGroupEntry: group entry {} in device {} moving from {} to ADDED",
+ existing.id(),
+ existing.deviceId(),
+ existing.state());
+ existing.setState(GroupState.ADDED);
+ existing.setIsGroupStateAddedFirstTime(true);
+ event = new GroupEvent(Type.GROUP_ADDED, existing);
+ } else {
+ log.debug("addOrUpdateGroupEntry: group entry {} in device {} moving from {} to ADDED",
+ existing.id(),
+ existing.deviceId(),
+ GroupState.PENDING_UPDATE);
+ existing.setState(GroupState.ADDED);
+ existing.setIsGroupStateAddedFirstTime(false);
+ event = new GroupEvent(Type.GROUP_UPDATED, existing);
+ }
+ //Re-PUT map entries to trigger map update events
+ getGroupStoreKeyMap().
+ put(new GroupStoreKeyMapKey(existing.deviceId(),
+ existing.appCookie()), existing);
+ }
+ } else {
+ log.warn("addOrUpdateGroupEntry: Group update "
+ + "happening for a non-existing entry in the map");
+ }
+
+ if (event != null) {
+ notifyDelegate(event);
+ }
+ }
+
+ /**
+ * Removes the group entry from store.
+ *
+ * @param group group entry
+ */
+ @Override
+ public void removeGroupEntry(Group group) {
+ StoredGroupEntry existing = getStoredGroupEntry(group.deviceId(),
+ group.id());
+
+ if (existing != null) {
+ log.debug("removeGroupEntry: removing group entry {} in device {}",
+ group.id(),
+ group.deviceId());
+ //Removal from groupid based map will happen in the
+ //map update listener
+ getGroupStoreKeyMap().remove(new GroupStoreKeyMapKey(existing.deviceId(),
+ existing.appCookie()));
+ notifyDelegate(new GroupEvent(Type.GROUP_REMOVED, existing));
+ } else {
+ log.warn("removeGroupEntry for {} in device{} is "
+ + "not existing in our maps",
+ group.id(),
+ group.deviceId());
+ }
+ }
+
+ @Override
+ public void deviceInitialAuditCompleted(DeviceId deviceId,
+ boolean completed) {
+ synchronized (deviceAuditStatus) {
+ if (completed) {
+ log.debug("AUDIT completed for device {}",
+ deviceId);
+ deviceAuditStatus.put(deviceId, true);
+ // Execute all pending group requests
+ List<StoredGroupEntry> pendingGroupRequests =
+ getPendingGroupKeyTable().values()
+ .stream()
+ .filter(g-> g.deviceId().equals(deviceId))
+ .collect(Collectors.toList());
+ log.debug("processing pending group add requests for device {} and number of pending requests {}",
+ deviceId,
+ pendingGroupRequests.size());
+ for (Group group:pendingGroupRequests) {
+ GroupDescription tmp = new DefaultGroupDescription(
+ group.deviceId(),
+ group.type(),
+ group.buckets(),
+ group.appCookie(),
+ group.givenGroupId(),
+ group.appId());
+ storeGroupDescriptionInternal(tmp);
+ getPendingGroupKeyTable().
+ remove(new GroupStoreKeyMapKey(deviceId, group.appCookie()));
+ }
+ } else {
+ Boolean audited = deviceAuditStatus.get(deviceId);
+ if (audited != null && audited) {
+ log.debug("Clearing AUDIT status for device {}", deviceId);
+ deviceAuditStatus.put(deviceId, false);
+ }
+ }
+ }
+ }
+
+ @Override
+ public boolean deviceInitialAuditStatus(DeviceId deviceId) {
+ synchronized (deviceAuditStatus) {
+ Boolean audited = deviceAuditStatus.get(deviceId);
+ return audited != null && audited;
+ }
+ }
+
+ @Override
+ public void groupOperationFailed(DeviceId deviceId, GroupOperation operation) {
+
+ StoredGroupEntry existing = getStoredGroupEntry(deviceId,
+ operation.groupId());
+
+ if (existing == null) {
+ log.warn("No group entry with ID {} found ", operation.groupId());
+ return;
+ }
+
+ log.warn("groupOperationFailed: group operation {} failed"
+ + "for group {} in device {}",
+ operation.opType(),
+ existing.id(),
+ existing.deviceId());
+ switch (operation.opType()) {
+ case ADD:
+ if (existing.state() == GroupState.PENDING_ADD) {
+ //TODO: Need to add support for passing the group
+ //operation failure reason from group provider.
+ //If the error type is anything other than GROUP_EXISTS,
+ //then the GROUP_ADD_FAILED event should be raised even
+ //in PENDING_ADD_RETRY state also.
+ notifyDelegate(new GroupEvent(Type.GROUP_ADD_FAILED, existing));
+ log.warn("groupOperationFailed: cleaningup "
+ + "group {} from store in device {}....",
+ existing.id(),
+ existing.deviceId());
+ //Removal from groupid based map will happen in the
+ //map update listener
+ getGroupStoreKeyMap().remove(new GroupStoreKeyMapKey(existing.deviceId(),
+ existing.appCookie()));
+ }
+ break;
+ case MODIFY:
+ notifyDelegate(new GroupEvent(Type.GROUP_UPDATE_FAILED, existing));
+ break;
+ case DELETE:
+ notifyDelegate(new GroupEvent(Type.GROUP_REMOVE_FAILED, existing));
+ break;
+ default:
+ log.warn("Unknown group operation type {}", operation.opType());
+ }
+ }
+
+ @Override
+ public void addOrUpdateExtraneousGroupEntry(Group group) {
+ log.debug("add/update extraneous group entry {} in device {}",
+ group.id(),
+ group.deviceId());
+ ConcurrentMap<GroupId, Group> extraneousIdTable =
+ getExtraneousGroupIdTable(group.deviceId());
+ extraneousIdTable.put(group.id(), group);
+ // Don't remove the extraneous groups, instead re-use it when
+ // a group request comes with the same set of buckets
+ }
+
+ @Override
+ public void removeExtraneousGroupEntry(Group group) {
+ log.debug("remove extraneous group entry {} of device {} from store",
+ group.id(),
+ group.deviceId());
+ ConcurrentMap<GroupId, Group> extraneousIdTable =
+ getExtraneousGroupIdTable(group.deviceId());
+ extraneousIdTable.remove(group.id());
+ }
+
+ @Override
+ public Iterable<Group> getExtraneousGroups(DeviceId deviceId) {
+ // flatten and make iterator unmodifiable
+ return FluentIterable.from(
+ getExtraneousGroupIdTable(deviceId).values());
+ }
+
+ /**
+ * Map handler to receive any events when the group key map is updated.
+ */
+ private class GroupStoreKeyMapListener implements
+ EventuallyConsistentMapListener<GroupStoreKeyMapKey, StoredGroupEntry> {
+
+ @Override
+ public void event(EventuallyConsistentMapEvent<GroupStoreKeyMapKey,
+ StoredGroupEntry> mapEvent) {
+ GroupEvent groupEvent = null;
+ GroupStoreKeyMapKey key = mapEvent.key();
+ StoredGroupEntry group = mapEvent.value();
+ if ((key == null) && (group == null)) {
+ log.error("GroupStoreKeyMapListener: Received "
+ + "event {} with null entry", mapEvent.type());
+ return;
+ } else if (group == null) {
+ group = getGroupIdTable(key.deviceId()).values()
+ .stream()
+ .filter((storedGroup) -> (storedGroup.appCookie().equals(key.appCookie)))
+ .findFirst().get();
+ if (group == null) {
+ log.error("GroupStoreKeyMapListener: Received "
+ + "event {} with null entry... can not process", mapEvent.type());
+ return;
+ }
+ }
+ log.trace("received groupid map event {} for id {} in device {}",
+ mapEvent.type(),
+ group.id(),
+ key.deviceId());
+ if (mapEvent.type() == EventuallyConsistentMapEvent.Type.PUT) {
+ // Update the group ID table
+ getGroupIdTable(group.deviceId()).put(group.id(), group);
+ if (mapEvent.value().state() == Group.GroupState.ADDED) {
+ if (mapEvent.value().isGroupStateAddedFirstTime()) {
+ groupEvent = new GroupEvent(Type.GROUP_ADDED,
+ mapEvent.value());
+ log.trace("Received first time GROUP_ADDED state update for id {} in device {}",
+ group.id(),
+ group.deviceId());
+ } else {
+ groupEvent = new GroupEvent(Type.GROUP_UPDATED,
+ mapEvent.value());
+ log.trace("Received following GROUP_ADDED state update for id {} in device {}",
+ group.id(),
+ group.deviceId());
+ }
+ }
+ } else if (mapEvent.type() == EventuallyConsistentMapEvent.Type.REMOVE) {
+ groupEvent = new GroupEvent(Type.GROUP_REMOVED, group);
+ // Remove the entry from the group ID table
+ getGroupIdTable(group.deviceId()).remove(group.id(), group);
+ }
+
+ if (groupEvent != null) {
+ notifyDelegate(groupEvent);
+ }
+ }
+ }
+
+ private void process(GroupStoreMessage groupOp) {
+ log.debug("Received remote group operation {} request for device {}",
+ groupOp.type(),
+ groupOp.deviceId());
+ if (!mastershipService.isLocalMaster(groupOp.deviceId())) {
+ log.warn("This node is not MASTER for device {}", groupOp.deviceId());
+ return;
+ }
+ if (groupOp.type() == GroupStoreMessage.Type.ADD) {
+ storeGroupDescriptionInternal(groupOp.groupDesc());
+ } else if (groupOp.type() == GroupStoreMessage.Type.UPDATE) {
+ updateGroupDescriptionInternal(groupOp.deviceId(),
+ groupOp.appCookie(),
+ groupOp.updateType(),
+ groupOp.updateBuckets(),
+ groupOp.newAppCookie());
+ } else if (groupOp.type() == GroupStoreMessage.Type.DELETE) {
+ deleteGroupDescriptionInternal(groupOp.deviceId(),
+ groupOp.appCookie());
+ }
+ }
+
+ /**
+ * Flattened map key to be used to store group entries.
+ */
+ protected static class GroupStoreMapKey {
+ private final DeviceId deviceId;
+
+ public GroupStoreMapKey(DeviceId deviceId) {
+ this.deviceId = deviceId;
+ }
+
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (!(o instanceof GroupStoreMapKey)) {
+ return false;
+ }
+ GroupStoreMapKey that = (GroupStoreMapKey) o;
+ return this.deviceId.equals(that.deviceId);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = 17;
+
+ result = 31 * result + Objects.hash(this.deviceId);
+
+ return result;
+ }
+ }
+
+ protected static class GroupStoreKeyMapKey extends GroupStoreMapKey {
+ private final GroupKey appCookie;
+ public GroupStoreKeyMapKey(DeviceId deviceId,
+ GroupKey appCookie) {
+ super(deviceId);
+ this.appCookie = appCookie;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (!(o instanceof GroupStoreKeyMapKey)) {
+ return false;
+ }
+ GroupStoreKeyMapKey that = (GroupStoreKeyMapKey) o;
+ return (super.equals(that) &&
+ this.appCookie.equals(that.appCookie));
+ }
+
+ @Override
+ public int hashCode() {
+ int result = 17;
+
+ result = 31 * result + super.hashCode() + Objects.hash(this.appCookie);
+
+ return result;
+ }
+ }
+
+ protected static class GroupStoreIdMapKey extends GroupStoreMapKey {
+ private final GroupId groupId;
+ public GroupStoreIdMapKey(DeviceId deviceId,
+ GroupId groupId) {
+ super(deviceId);
+ this.groupId = groupId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (!(o instanceof GroupStoreIdMapKey)) {
+ return false;
+ }
+ GroupStoreIdMapKey that = (GroupStoreIdMapKey) o;
+ return (super.equals(that) &&
+ this.groupId.equals(that.groupId));
+ }
+
+ @Override
+ public int hashCode() {
+ int result = 17;
+
+ result = 31 * result + super.hashCode() + Objects.hash(this.groupId);
+
+ return result;
+ }
+ }
+
+ @Override
+ public void pushGroupMetrics(DeviceId deviceId,
+ Collection<Group> groupEntries) {
+ boolean deviceInitialAuditStatus =
+ deviceInitialAuditStatus(deviceId);
+ Set<Group> southboundGroupEntries =
+ Sets.newHashSet(groupEntries);
+ Set<StoredGroupEntry> storedGroupEntries =
+ Sets.newHashSet(getStoredGroups(deviceId));
+ Set<Group> extraneousStoredEntries =
+ Sets.newHashSet(getExtraneousGroups(deviceId));
+
+ log.trace("pushGroupMetrics: Displaying all ({}) southboundGroupEntries for device {}",
+ southboundGroupEntries.size(),
+ deviceId);
+ for (Iterator<Group> it = southboundGroupEntries.iterator(); it.hasNext();) {
+ Group group = it.next();
+ log.trace("Group {} in device {}", group, deviceId);
+ }
+
+ log.trace("Displaying all ({}) stored group entries for device {}",
+ storedGroupEntries.size(),
+ deviceId);
+ for (Iterator<StoredGroupEntry> it1 = storedGroupEntries.iterator();
+ it1.hasNext();) {
+ Group group = it1.next();
+ log.trace("Stored Group {} for device {}", group, deviceId);
+ }
+
+ for (Iterator<Group> it2 = southboundGroupEntries.iterator(); it2.hasNext();) {
+ Group group = it2.next();
+ if (storedGroupEntries.remove(group)) {
+ // we both have the group, let's update some info then.
+ log.trace("Group AUDIT: group {} exists in both planes for device {}",
+ group.id(), deviceId);
+ groupAdded(group);
+ it2.remove();
+ }
+ }
+ for (Group group : southboundGroupEntries) {
+ if (getGroup(group.deviceId(), group.id()) != null) {
+ // There is a group existing with the same id
+ // It is possible that group update is
+ // in progress while we got a stale info from switch
+ if (!storedGroupEntries.remove(getGroup(
+ group.deviceId(), group.id()))) {
+ log.warn("Group AUDIT: Inconsistent state:"
+ + "Group exists in ID based table while "
+ + "not present in key based table");
+ }
+ } else {
+ // there are groups in the switch that aren't in the store
+ log.debug("Group AUDIT: extraneous group {} exists in data plane for device {}",
+ group.id(), deviceId);
+ extraneousStoredEntries.remove(group);
+ extraneousGroup(group);
+ }
+ }
+ for (Group group : storedGroupEntries) {
+ // there are groups in the store that aren't in the switch
+ log.debug("Group AUDIT: group {} missing in data plane for device {}",
+ group.id(), deviceId);
+ groupMissing(group);
+ }
+ for (Group group : extraneousStoredEntries) {
+ // there are groups in the extraneous store that
+ // aren't in the switch
+ log.debug("Group AUDIT: clearing extransoeus group {} from store for device {}",
+ group.id(), deviceId);
+ removeExtraneousGroupEntry(group);
+ }
+
+ if (!deviceInitialAuditStatus) {
+ log.debug("Group AUDIT: Setting device {} initial AUDIT completed",
+ deviceId);
+ deviceInitialAuditCompleted(deviceId, true);
+ }
+ }
+
+ private void groupMissing(Group group) {
+ switch (group.state()) {
+ case PENDING_DELETE:
+ log.debug("Group {} delete confirmation from device {}",
+ group, group.deviceId());
+ removeGroupEntry(group);
+ break;
+ case ADDED:
+ case PENDING_ADD:
+ case PENDING_ADD_RETRY:
+ case PENDING_UPDATE:
+ log.debug("Group {} is in store but not on device {}",
+ group, group.deviceId());
+ StoredGroupEntry existing =
+ getStoredGroupEntry(group.deviceId(), group.id());
+ log.debug("groupMissing: group entry {} in device {} moving from {} to PENDING_ADD_RETRY",
+ existing.id(),
+ existing.deviceId(),
+ existing.state());
+ existing.setState(Group.GroupState.PENDING_ADD_RETRY);
+ //Re-PUT map entries to trigger map update events
+ getGroupStoreKeyMap().
+ put(new GroupStoreKeyMapKey(existing.deviceId(),
+ existing.appCookie()), existing);
+ notifyDelegate(new GroupEvent(GroupEvent.Type.GROUP_ADD_REQUESTED,
+ group));
+ break;
+ default:
+ log.debug("Group {} has not been installed.", group);
+ break;
+ }
+ }
+
+ private void extraneousGroup(Group group) {
+ log.debug("Group {} is on device {} but not in store.",
+ group, group.deviceId());
+ addOrUpdateExtraneousGroupEntry(group);
+ }
+
+ private void groupAdded(Group group) {
+ log.trace("Group {} Added or Updated in device {}",
+ group, group.deviceId());
+ addOrUpdateGroupEntry(group);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/GroupStoreMessage.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/GroupStoreMessage.java
new file mode 100644
index 00000000..b82754b9
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/GroupStoreMessage.java
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.group.impl;
+
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.group.GroupBuckets;
+import org.onosproject.net.group.GroupDescription;
+import org.onosproject.net.group.GroupKey;
+import org.onosproject.net.group.GroupStore.UpdateType;
+
+/**
+ * Format of the Group store message that is used to
+ * communicate with the peer nodes in the cluster.
+ */
+public final class GroupStoreMessage {
+ private final DeviceId deviceId;
+ private final GroupKey appCookie;
+ private final GroupDescription groupDesc;
+ private final UpdateType updateType;
+ private final GroupBuckets updateBuckets;
+ private final GroupKey newAppCookie;
+ private final Type type;
+
+ /**
+ * Type of group store request.
+ */
+ public enum Type {
+ ADD,
+ UPDATE,
+ DELETE
+ }
+
+ private GroupStoreMessage(Type type,
+ DeviceId deviceId,
+ GroupKey appCookie,
+ GroupDescription groupDesc,
+ UpdateType updateType,
+ GroupBuckets updateBuckets,
+ GroupKey newAppCookie) {
+ this.type = type;
+ this.deviceId = deviceId;
+ this.appCookie = appCookie;
+ this.groupDesc = groupDesc;
+ this.updateType = updateType;
+ this.updateBuckets = updateBuckets;
+ this.newAppCookie = newAppCookie;
+ }
+
+ /**
+ * Creates a group store message for group ADD request.
+ *
+ * @param deviceId device identifier in which group to be added
+ * @param desc group creation parameters
+ * @return constructed group store message
+ */
+ public static GroupStoreMessage createGroupAddRequestMsg(DeviceId deviceId,
+ GroupDescription desc) {
+ return new GroupStoreMessage(Type.ADD,
+ deviceId,
+ null,
+ desc,
+ null,
+ null,
+ null);
+ }
+
+ /**
+ * Creates a group store message for group UPDATE request.
+ *
+ * @param deviceId the device ID
+ * @param appCookie the current group key
+ * @param updateType update (add or delete) type
+ * @param updateBuckets group buckets for updates
+ * @param newAppCookie optional new group key
+ * @return constructed group store message
+ */
+ public static GroupStoreMessage createGroupUpdateRequestMsg(DeviceId deviceId,
+ GroupKey appCookie,
+ UpdateType updateType,
+ GroupBuckets updateBuckets,
+ GroupKey newAppCookie) {
+ return new GroupStoreMessage(Type.UPDATE,
+ deviceId,
+ appCookie,
+ null,
+ updateType,
+ updateBuckets,
+ newAppCookie);
+ }
+
+ /**
+ * Creates a group store message for group DELETE request.
+ *
+ * @param deviceId the device ID
+ * @param appCookie the group key
+ * @return constructed group store message
+ */
+ public static GroupStoreMessage createGroupDeleteRequestMsg(DeviceId deviceId,
+ GroupKey appCookie) {
+ return new GroupStoreMessage(Type.DELETE,
+ deviceId,
+ appCookie,
+ null,
+ null,
+ null,
+ null);
+ }
+
+ /**
+ * Returns the device identifier of this group request.
+ *
+ * @return device identifier
+ */
+ public DeviceId deviceId() {
+ return deviceId;
+ }
+
+ /**
+ * Returns the application cookie associated with this group request.
+ *
+ * @return application cookie
+ */
+ public GroupKey appCookie() {
+ return appCookie;
+ }
+
+ /**
+ * Returns the group create parameters associated with this group request.
+ *
+ * @return group create parameters
+ */
+ public GroupDescription groupDesc() {
+ return groupDesc;
+ }
+
+ /**
+ * Returns the group buckets to be updated as part of this group request.
+ *
+ * @return group buckets to be updated
+ */
+ public GroupBuckets updateBuckets() {
+ return updateBuckets;
+ }
+
+ /**
+ * Returns the update group operation type.
+ *
+ * @return update operation type
+ */
+ public UpdateType updateType() {
+ return updateType;
+ }
+
+ /**
+ * Returns the new application cookie associated with this group operation.
+ *
+ * @return new application cookie
+ */
+ public GroupKey newAppCookie() {
+ return newAppCookie;
+ }
+
+ /**
+ * Returns the type of this group operation.
+ *
+ * @return group message type
+ */
+ public Type type() {
+ return type;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/GroupStoreMessageSubjects.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/GroupStoreMessageSubjects.java
new file mode 100644
index 00000000..dbee22c7
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/GroupStoreMessageSubjects.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.group.impl;
+
+import org.onosproject.store.cluster.messaging.MessageSubject;
+
+/**
+ * MessageSubjects used by DistributedGroupRuleStore peer-peer communication.
+ */
+public final class GroupStoreMessageSubjects {
+ private GroupStoreMessageSubjects() {}
+
+ public static final MessageSubject REMOTE_GROUP_OP_REQUEST
+ = new MessageSubject("peer-forward-group-op-req");
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/package-info.java
new file mode 100644
index 00000000..35e3b251
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/group/impl/package-info.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Implementation of the group store.
+ */
+package org.onosproject.store.group.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/host/impl/ECHostStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/host/impl/ECHostStore.java
new file mode 100644
index 00000000..d8b9daca
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/host/impl/ECHostStore.java
@@ -0,0 +1,267 @@
+package org.onosproject.store.host.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.onosproject.net.DefaultAnnotations.merge;
+import static org.onosproject.net.host.HostEvent.Type.HOST_ADDED;
+import static org.onosproject.net.host.HostEvent.Type.HOST_REMOVED;
+import static org.onosproject.net.host.HostEvent.Type.HOST_UPDATED;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.PUT;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.REMOVE;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Objects;
+import java.util.Set;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.MacAddress;
+import org.onlab.packet.VlanId;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.net.Annotations;
+import org.onosproject.net.ConnectPoint;
+import org.onosproject.net.DefaultAnnotations;
+import org.onosproject.net.DefaultHost;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.Host;
+import org.onosproject.net.HostId;
+import org.onosproject.net.host.HostDescription;
+import org.onosproject.net.host.HostEvent;
+import org.onosproject.net.host.HostStore;
+import org.onosproject.net.host.HostStoreDelegate;
+import org.onosproject.net.host.PortAddresses;
+import org.onosproject.net.host.HostEvent.Type;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapEvent;
+import org.onosproject.store.service.EventuallyConsistentMapListener;
+import org.onosproject.store.service.LogicalClockService;
+import org.onosproject.store.service.StorageService;
+import org.slf4j.Logger;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableMultimap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Multimaps;
+import com.google.common.collect.SetMultimap;
+import com.google.common.collect.Sets;
+
+/**
+ * Manages the inventory of hosts using a {@code EventuallyConsistentMap}.
+ */
+@Component(immediate = true)
+@Service
+public class ECHostStore
+ extends AbstractStore<HostEvent, HostStoreDelegate>
+ implements HostStore {
+
+ private final Logger log = getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected LogicalClockService clockService;
+
+ // Hosts tracked by their location
+ private final SetMultimap<ConnectPoint, Host> locations =
+ Multimaps.synchronizedSetMultimap(
+ HashMultimap.<ConnectPoint, Host>create());
+
+ private final SetMultimap<ConnectPoint, PortAddresses> portAddresses =
+ Multimaps.synchronizedSetMultimap(
+ HashMultimap.<ConnectPoint, PortAddresses>create());
+
+ private EventuallyConsistentMap<HostId, DefaultHost> hosts;
+
+ private EventuallyConsistentMapListener<HostId, DefaultHost> hostLocationTracker =
+ new HostLocationTracker();
+
+ @Activate
+ public void activate() {
+ KryoNamespace.Builder hostSerializer = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API);
+
+ hosts = storageService.<HostId, DefaultHost>eventuallyConsistentMapBuilder()
+ .withName("onos-hosts")
+ .withSerializer(hostSerializer)
+ .withTimestampProvider((k, v) -> clockService.getTimestamp())
+ .build();
+
+ hosts.addListener(hostLocationTracker);
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ hosts.removeListener(hostLocationTracker);
+ hosts.destroy();
+ locations.clear();
+ portAddresses.clear();
+
+ log.info("Stopped");
+ }
+
+ @Override
+ public HostEvent createOrUpdateHost(ProviderId providerId,
+ HostId hostId,
+ HostDescription hostDescription) {
+ DefaultHost currentHost = hosts.get(hostId);
+ if (currentHost == null) {
+ DefaultHost newhost = new DefaultHost(
+ providerId,
+ hostId,
+ hostDescription.hwAddress(),
+ hostDescription.vlan(),
+ hostDescription.location(),
+ ImmutableSet.copyOf(hostDescription.ipAddress()),
+ hostDescription.annotations());
+ hosts.put(hostId, newhost);
+ return new HostEvent(HOST_ADDED, newhost);
+ }
+ return updateHost(providerId, hostId, hostDescription, currentHost);
+ }
+
+ @Override
+ public HostEvent removeHost(HostId hostId) {
+ Host host = hosts.remove(hostId);
+ return host != null ? new HostEvent(HOST_REMOVED, host) : null;
+ }
+
+ @Override
+ public int getHostCount() {
+ return hosts.size();
+ }
+
+ @Override
+ public Iterable<Host> getHosts() {
+ return ImmutableSet.copyOf(hosts.values());
+ }
+
+ @Override
+ public Host getHost(HostId hostId) {
+ return hosts.get(hostId);
+ }
+
+ @Override
+ public Set<Host> getHosts(VlanId vlanId) {
+ return filter(hosts.values(), host -> Objects.equals(host.vlan(), vlanId));
+ }
+
+ @Override
+ public Set<Host> getHosts(MacAddress mac) {
+ return filter(hosts.values(), host -> Objects.equals(host.mac(), mac));
+ }
+
+ @Override
+ public Set<Host> getHosts(IpAddress ip) {
+ return filter(hosts.values(), host -> host.ipAddresses().contains(ip));
+ }
+
+ @Override
+ public Set<Host> getConnectedHosts(ConnectPoint connectPoint) {
+ return ImmutableSet.copyOf(locations.get(connectPoint));
+ }
+
+ @Override
+ public Set<Host> getConnectedHosts(DeviceId deviceId) {
+ return ImmutableMultimap.copyOf(locations)
+ .entries()
+ .stream()
+ .filter(entry -> entry.getKey().deviceId().equals(deviceId))
+ .map(entry -> entry.getValue())
+ .collect(Collectors.toSet());
+ }
+
+ @Override
+ public void updateAddressBindings(PortAddresses addresses) {
+ portAddresses.put(addresses.connectPoint(), addresses);
+ }
+
+ @Override
+ public void removeAddressBindings(PortAddresses addresses) {
+ portAddresses.remove(addresses.connectPoint(), addresses);
+ }
+
+ @Override
+ public void clearAddressBindings(ConnectPoint connectPoint) {
+ portAddresses.removeAll(connectPoint);
+ }
+
+ @Override
+ public Set<PortAddresses> getAddressBindings() {
+ return ImmutableSet.copyOf(portAddresses.values());
+ }
+
+ @Override
+ public Set<PortAddresses> getAddressBindingsForPort(ConnectPoint connectPoint) {
+ synchronized (portAddresses) {
+ Set<PortAddresses> addresses = portAddresses.get(connectPoint);
+ return addresses == null ? Collections.emptySet() : ImmutableSet.copyOf(addresses);
+ }
+ }
+
+ private Set<Host> filter(Collection<DefaultHost> collection, Predicate<DefaultHost> predicate) {
+ return collection.stream().filter(predicate).collect(Collectors.toSet());
+ }
+
+ // checks for type of update to host, sends appropriate event
+ private HostEvent updateHost(ProviderId providerId,
+ HostId hostId,
+ HostDescription descr,
+ DefaultHost currentHost) {
+
+ final boolean hostMoved = !currentHost.location().equals(descr.location());
+ if (hostMoved ||
+ !currentHost.ipAddresses().containsAll(descr.ipAddress()) ||
+ !descr.annotations().keys().isEmpty()) {
+
+ Set<IpAddress> addresses = Sets.newHashSet(currentHost.ipAddresses());
+ addresses.addAll(descr.ipAddress());
+ Annotations annotations = merge((DefaultAnnotations) currentHost.annotations(),
+ descr.annotations());
+
+ DefaultHost updatedHost = new DefaultHost(providerId, currentHost.id(),
+ currentHost.mac(), currentHost.vlan(),
+ descr.location(),
+ addresses,
+ annotations);
+
+ // TODO: We need a way to detect conflicting changes and abort update.
+ hosts.put(hostId, updatedHost);
+ locations.remove(currentHost.location(), currentHost);
+ locations.put(updatedHost.location(), updatedHost);
+
+ HostEvent.Type eventType = hostMoved ? Type.HOST_MOVED : Type.HOST_UPDATED;
+ return new HostEvent(eventType, updatedHost);
+ }
+ return null;
+ }
+
+ private class HostLocationTracker implements EventuallyConsistentMapListener<HostId, DefaultHost> {
+ @Override
+ public void event(EventuallyConsistentMapEvent<HostId, DefaultHost> event) {
+ DefaultHost host = checkNotNull(event.value());
+ if (event.type() == PUT) {
+ boolean isNew = locations.put(host.location(), host);
+ notifyDelegate(new HostEvent(isNew ? HOST_ADDED : HOST_UPDATED, host));
+ } else if (event.type() == REMOVE) {
+ if (locations.remove(host.location(), host)) {
+ notifyDelegate(new HostEvent(HOST_REMOVED, host));
+ }
+
+ }
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/host/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/host/impl/package-info.java
new file mode 100644
index 00000000..635b1131
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/host/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of the distributed host store using p2p synchronization protocol.
+ */
+package org.onosproject.store.host.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/LogicalTimestamp.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/LogicalTimestamp.java
new file mode 100644
index 00000000..5ae8b4f4
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/LogicalTimestamp.java
@@ -0,0 +1,68 @@
+package org.onosproject.store.impl;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import java.util.Objects;
+
+import org.onosproject.store.Timestamp;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ComparisonChain;
+
+/**
+ * Timestamp based on logical sequence value.
+ * <p>
+ * LogicalTimestamps are ordered by their sequence values.
+ */
+public class LogicalTimestamp implements Timestamp {
+
+ private final long value;
+
+ public LogicalTimestamp(long value) {
+ this.value = value;
+ }
+
+ @Override
+ public int compareTo(Timestamp o) {
+ checkArgument(o instanceof LogicalTimestamp,
+ "Must be LogicalTimestamp", o);
+ LogicalTimestamp that = (LogicalTimestamp) o;
+
+ return ComparisonChain.start()
+ .compare(this.value, that.value)
+ .result();
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(value);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof LogicalTimestamp)) {
+ return false;
+ }
+ LogicalTimestamp that = (LogicalTimestamp) obj;
+ return Objects.equals(this.value, that.value);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("value", value)
+ .toString();
+ }
+
+ /**
+ * Returns the sequence value.
+ *
+ * @return sequence value
+ */
+ public long value() {
+ return this.value;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/MastershipBasedTimestamp.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/MastershipBasedTimestamp.java
new file mode 100644
index 00000000..15b3c3c6
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/MastershipBasedTimestamp.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.impl;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import java.util.Objects;
+
+import org.onosproject.store.Timestamp;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ComparisonChain;
+
+/**
+ * A logical timestamp that derives its value from two things:
+ * <ul>
+ * <li> The current mastership term of the device.</li>
+ * <li> The value of the counter used for tracking topology events observed from
+ * the device during that current time of a device. </li>
+ * </ul>
+ */
+public final class MastershipBasedTimestamp implements Timestamp {
+
+ private final long termNumber;
+ private final long sequenceNumber;
+
+ /**
+ * Default version tuple.
+ *
+ * @param termNumber the mastership termNumber
+ * @param sequenceNumber the sequenceNumber number within the termNumber
+ */
+ public MastershipBasedTimestamp(long termNumber, long sequenceNumber) {
+ this.termNumber = termNumber;
+ this.sequenceNumber = sequenceNumber;
+ }
+
+ @Override
+ public int compareTo(Timestamp o) {
+ checkArgument(o instanceof MastershipBasedTimestamp,
+ "Must be MastershipBasedTimestamp", o);
+ MastershipBasedTimestamp that = (MastershipBasedTimestamp) o;
+
+ return ComparisonChain.start()
+ .compare(this.termNumber, that.termNumber)
+ .compare(this.sequenceNumber, that.sequenceNumber)
+ .result();
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(termNumber, sequenceNumber);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof MastershipBasedTimestamp)) {
+ return false;
+ }
+ MastershipBasedTimestamp that = (MastershipBasedTimestamp) obj;
+ return Objects.equals(this.termNumber, that.termNumber) &&
+ Objects.equals(this.sequenceNumber, that.sequenceNumber);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("termNumber", termNumber)
+ .add("sequenceNumber", sequenceNumber)
+ .toString();
+ }
+
+ /**
+ * Returns the termNumber.
+ *
+ * @return termNumber
+ */
+ public long termNumber() {
+ return termNumber;
+ }
+
+ /**
+ * Returns the sequenceNumber number.
+ *
+ * @return sequenceNumber
+ */
+ public long sequenceNumber() {
+ return sequenceNumber;
+ }
+
+ // Default constructor for serialization
+
+ /**
+ * @deprecated in Cardinal Release
+ */
+ @Deprecated
+ protected MastershipBasedTimestamp() {
+ this.termNumber = -1;
+ this.sequenceNumber = -1;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/Timestamped.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/Timestamped.java
new file mode 100644
index 00000000..ae7267b8
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/Timestamped.java
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.impl;
+
+import com.google.common.base.MoreObjects;
+import org.onosproject.store.Timestamp;
+
+import java.util.Objects;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Wrapper class to store Timestamped value.
+ *
+ * @param <T> Timestamped value type
+ */
+public final class Timestamped<T> {
+
+ private final Timestamp timestamp;
+ private final T value;
+
+ /**
+ * Creates a time stamped value.
+ *
+ * @param value to be timestamp
+ * @param timestamp the timestamp
+ */
+ public Timestamped(T value, Timestamp timestamp) {
+ this.value = checkNotNull(value);
+ this.timestamp = checkNotNull(timestamp);
+ }
+
+ /**
+ * Returns the value.
+ *
+ * @return value
+ */
+ public T value() {
+ return value;
+ }
+
+ /**
+ * Returns the time stamp.
+ *
+ * @return time stamp
+ */
+ public Timestamp timestamp() {
+ return timestamp;
+ }
+
+ /**
+ * Tests if this timestamped value is newer than the other.
+ *
+ * @param other timestamped value
+ * @return true if this instance is newer.
+ */
+ public boolean isNewer(Timestamped<T> other) {
+ return isNewerThan(checkNotNull(other).timestamp());
+ }
+
+ /**
+ * Tests if this timestamp is newer than the specified timestamp.
+ *
+ * @param other timestamp to compare against
+ * @return true if this instance is newer
+ */
+ public boolean isNewerThan(Timestamp other) {
+ return timestamp.isNewerThan(other);
+ }
+
+ @Override
+ public int hashCode() {
+ return timestamp.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof Timestamped)) {
+ return false;
+ }
+ @SuppressWarnings("unchecked")
+ Timestamped<T> that = (Timestamped<T>) obj;
+ return Objects.equals(this.timestamp, that.timestamp);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("timestamp", timestamp)
+ .add("value", value)
+ .toString();
+ }
+
+ // Default constructor for serialization
+ /**
+ * @deprecated in Cardinal Release
+ */
+ @Deprecated
+ private Timestamped() {
+ this.value = null;
+ this.timestamp = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/package-info.java
new file mode 100644
index 00000000..03786fac
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Common facilities for use by various distributed stores.
+ */
+package org.onosproject.store.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/GossipIntentStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/GossipIntentStore.java
new file mode 100644
index 00000000..fa3a0751
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/GossipIntentStore.java
@@ -0,0 +1,334 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.intent.impl;
+
+import com.google.common.collect.ImmutableList;
+
+import org.apache.commons.lang.math.RandomUtils;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.net.intent.Intent;
+import org.onosproject.net.intent.IntentData;
+import org.onosproject.net.intent.IntentEvent;
+import org.onosproject.net.intent.IntentState;
+import org.onosproject.net.intent.IntentStore;
+import org.onosproject.net.intent.IntentStoreDelegate;
+import org.onosproject.net.intent.Key;
+import org.onosproject.net.intent.PartitionService;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.service.MultiValuedTimestamp;
+import org.onosproject.store.service.WallClockTimestamp;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapEvent;
+import org.onosproject.store.service.EventuallyConsistentMapListener;
+import org.onosproject.store.service.StorageService;
+import org.slf4j.Logger;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.onosproject.net.intent.IntentState.PURGE_REQ;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Manages inventory of Intents in a distributed data store that uses optimistic
+ * replication and gossip based techniques.
+ */
+//FIXME we should listen for leadership changes. if the local instance has just
+// ... become a leader, scan the pending map and process those
+@Component(immediate = true, enabled = true)
+@Service
+public class GossipIntentStore
+ extends AbstractStore<IntentEvent, IntentStoreDelegate>
+ implements IntentStore {
+
+ private final Logger log = getLogger(getClass());
+
+ // Map of intent key => current intent state
+ private EventuallyConsistentMap<Key, IntentData> currentMap;
+
+ // Map of intent key => pending intent operation
+ private EventuallyConsistentMap<Key, IntentData> pendingMap;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected PartitionService partitionService;
+
+ private final AtomicLong sequenceNumber = new AtomicLong(0);
+
+ @Activate
+ public void activate() {
+ KryoNamespace.Builder intentSerializer = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .register(IntentData.class)
+ .register(MultiValuedTimestamp.class)
+ .register(WallClockTimestamp.class);
+
+ currentMap = storageService.<Key, IntentData>eventuallyConsistentMapBuilder()
+ .withName("intent-current")
+ .withSerializer(intentSerializer)
+ .withTimestampProvider((key, intentData) ->
+ new MultiValuedTimestamp<>(intentData.version(),
+ sequenceNumber.getAndIncrement()))
+ .withPeerUpdateFunction((key, intentData) -> getPeerNodes(key, intentData))
+ .build();
+
+ pendingMap = storageService.<Key, IntentData>eventuallyConsistentMapBuilder()
+ .withName("intent-pending")
+ .withSerializer(intentSerializer)
+ .withTimestampProvider((key, intentData) -> new MultiValuedTimestamp<>(intentData.version(),
+ System.nanoTime()))
+ .withPeerUpdateFunction((key, intentData) -> getPeerNodes(key, intentData))
+ .build();
+
+ currentMap.addListener(new InternalCurrentListener());
+ pendingMap.addListener(new InternalPendingListener());
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ currentMap.destroy();
+ pendingMap.destroy();
+
+ log.info("Stopped");
+ }
+
+ @Override
+ public long getIntentCount() {
+ return currentMap.size();
+ }
+
+ @Override
+ public Iterable<Intent> getIntents() {
+ return currentMap.values().stream()
+ .map(IntentData::intent)
+ .collect(Collectors.toList());
+ }
+
+ @Override
+ public Iterable<IntentData> getIntentData(boolean localOnly, long olderThan) {
+ if (localOnly || olderThan > 0) {
+ long now = System.currentTimeMillis();
+ final WallClockTimestamp time = new WallClockTimestamp(now - olderThan);
+ return currentMap.values().stream()
+ .filter(data -> data.version().isOlderThan(time) &&
+ (!localOnly || isMaster(data.key())))
+ .collect(Collectors.toList());
+ }
+ return currentMap.values();
+ }
+
+ @Override
+ public IntentState getIntentState(Key intentKey) {
+ IntentData data = currentMap.get(intentKey);
+ if (data != null) {
+ return data.state();
+ }
+ return null;
+ }
+
+ @Override
+ public List<Intent> getInstallableIntents(Key intentKey) {
+ IntentData data = currentMap.get(intentKey);
+ if (data != null) {
+ return data.installables();
+ }
+ return null;
+ }
+
+
+
+ @Override
+ public void write(IntentData newData) {
+ checkNotNull(newData);
+
+ IntentData currentData = currentMap.get(newData.key());
+ if (IntentData.isUpdateAcceptable(currentData, newData)) {
+ // Only the master is modifying the current state. Therefore assume
+ // this always succeeds
+ if (newData.state() == PURGE_REQ) {
+ currentMap.remove(newData.key(), currentData);
+ } else {
+ currentMap.put(newData.key(), new IntentData(newData));
+ }
+
+ // if current.put succeeded
+ pendingMap.remove(newData.key(), newData);
+ }
+ }
+
+ private Collection<NodeId> getPeerNodes(Key key, IntentData data) {
+ NodeId master = partitionService.getLeader(key);
+ NodeId origin = (data != null) ? data.origin() : null;
+ if (master == null || origin == null) {
+ log.debug("Intent {} missing master and/or origin; master = {}, origin = {}",
+ key, master, origin);
+ }
+
+ NodeId me = clusterService.getLocalNode().id();
+ boolean isMaster = Objects.equals(master, me);
+ boolean isOrigin = Objects.equals(origin, me);
+ if (isMaster && isOrigin) {
+ return getRandomNode();
+ } else if (isMaster) {
+ return origin != null ? ImmutableList.of(origin) : getRandomNode();
+ } else if (isOrigin) {
+ return master != null ? ImmutableList.of(master) : getRandomNode();
+ } else {
+ log.warn("No master or origin for intent {}", key);
+ return master != null ? ImmutableList.of(master) : getRandomNode();
+ }
+ }
+
+ private List<NodeId> getRandomNode() {
+ NodeId me = clusterService.getLocalNode().id();
+ List<NodeId> nodes = clusterService.getNodes().stream()
+ .map(ControllerNode::id)
+ .filter(node -> !Objects.equals(node, me))
+ .collect(Collectors.toList());
+ if (nodes.size() == 0) {
+ return null;
+ }
+ return ImmutableList.of(nodes.get(RandomUtils.nextInt(nodes.size())));
+ }
+
+ @Override
+ public void batchWrite(Iterable<IntentData> updates) {
+ updates.forEach(this::write);
+ }
+
+ @Override
+ public Intent getIntent(Key key) {
+ IntentData data = currentMap.get(key);
+ if (data != null) {
+ return data.intent();
+ }
+ return null;
+ }
+
+ @Override
+ public IntentData getIntentData(Key key) {
+ IntentData current = currentMap.get(key);
+ if (current == null) {
+ return null;
+ }
+ return new IntentData(current);
+ }
+
+ @Override
+ public void addPending(IntentData data) {
+ checkNotNull(data);
+
+ if (data.version() == null) {
+ data.setVersion(new WallClockTimestamp());
+ }
+ data.setOrigin(clusterService.getLocalNode().id());
+ pendingMap.put(data.key(), new IntentData(data));
+ }
+
+ @Override
+ public boolean isMaster(Key intentKey) {
+ return partitionService.isMine(intentKey);
+ }
+
+ @Override
+ public Iterable<Intent> getPending() {
+ return pendingMap.values().stream()
+ .map(IntentData::intent)
+ .collect(Collectors.toList());
+ }
+
+ @Override
+ public Iterable<IntentData> getPendingData() {
+ return pendingMap.values();
+ }
+
+ @Override
+ public Iterable<IntentData> getPendingData(boolean localOnly, long olderThan) {
+ long now = System.currentTimeMillis();
+ final WallClockTimestamp time = new WallClockTimestamp(now - olderThan);
+ return pendingMap.values().stream()
+ .filter(data -> data.version().isOlderThan(time) &&
+ (!localOnly || isMaster(data.key())))
+ .collect(Collectors.toList());
+ }
+
+ private void notifyDelegateIfNotNull(IntentEvent event) {
+ if (event != null) {
+ notifyDelegate(event);
+ }
+ }
+
+ private final class InternalCurrentListener implements
+ EventuallyConsistentMapListener<Key, IntentData> {
+ @Override
+ public void event(EventuallyConsistentMapEvent<Key, IntentData> event) {
+ IntentData intentData = event.value();
+
+ if (event.type() == EventuallyConsistentMapEvent.Type.PUT) {
+ // The current intents map has been updated. If we are master for
+ // this intent's partition, notify the Manager that it should
+ // emit notifications about updated tracked resources.
+ if (delegate != null && isMaster(event.value().intent().key())) {
+ delegate.onUpdate(new IntentData(intentData)); // copy for safety, likely unnecessary
+ }
+ notifyDelegateIfNotNull(IntentEvent.getEvent(intentData));
+ }
+ }
+ }
+
+ private final class InternalPendingListener implements
+ EventuallyConsistentMapListener<Key, IntentData> {
+ @Override
+ public void event(
+ EventuallyConsistentMapEvent<Key, IntentData> event) {
+ if (event.type() == EventuallyConsistentMapEvent.Type.PUT) {
+ // The pending intents map has been updated. If we are master for
+ // this intent's partition, notify the Manager that it should do
+ // some work.
+ if (isMaster(event.value().intent().key())) {
+ if (delegate != null) {
+ delegate.process(new IntentData(event.value()));
+ }
+ }
+
+ notifyDelegateIfNotNull(IntentEvent.getEvent(event.value()));
+ }
+ }
+ }
+
+}
+
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/PartitionId.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/PartitionId.java
new file mode 100644
index 00000000..885361f0
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/PartitionId.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.intent.impl;
+
+import com.google.common.base.MoreObjects;
+
+import java.util.Objects;
+
+/**
+ * Identifies a partition of the intent keyspace which will be assigned to and
+ * processed by a single ONOS instance at a time.
+ */
+public class PartitionId {
+ private final int id;
+
+ /**
+ * Creates a new partition ID.
+ *
+ * @param id the partition ID
+ */
+ PartitionId(int id) {
+ this.id = id;
+ }
+
+ /**
+ * Returns the integer ID value.
+ *
+ * @return ID value
+ */
+ public int value() {
+ return id;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof PartitionId)) {
+ return false;
+ }
+
+ PartitionId that = (PartitionId) o;
+ return Objects.equals(this.id, that.id);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(id);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("partition ID", id)
+ .toString();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/PartitionManager.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/PartitionManager.java
new file mode 100644
index 00000000..09108d28
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/PartitionManager.java
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.intent.impl;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onosproject.cluster.ClusterEvent;
+import org.onosproject.cluster.ClusterEventListener;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.cluster.Leadership;
+import org.onosproject.cluster.LeadershipEvent;
+import org.onosproject.cluster.LeadershipEventListener;
+import org.onosproject.cluster.LeadershipService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.event.EventDeliveryService;
+import org.onosproject.event.ListenerRegistry;
+import org.onosproject.net.intent.Key;
+import org.onosproject.net.intent.PartitionEvent;
+import org.onosproject.net.intent.PartitionEventListener;
+import org.onosproject.net.intent.PartitionService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+/**
+ * Manages the assignment of intent keyspace partitions to instances.
+ */
+@Component(immediate = true)
+@Service
+public class PartitionManager implements PartitionService {
+
+ private static final Logger log = LoggerFactory.getLogger(PartitionManager.class);
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected LeadershipService leadershipService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected EventDeliveryService eventDispatcher;
+
+ protected final AtomicBoolean rebalanceScheduled = new AtomicBoolean(false);
+
+ static final int NUM_PARTITIONS = 14;
+ private static final int BACKOFF_TIME = 2;
+ private static final int CHECK_PARTITION_BALANCE_PERIOD_SEC = 10;
+ private static final int RETRY_AFTER_DELAY_SEC = 5;
+
+ private static final String ELECTION_PREFIX = "intent-partition-";
+
+ private ListenerRegistry<PartitionEvent, PartitionEventListener> listenerRegistry;
+ private LeadershipEventListener leaderListener = new InternalLeadershipListener();
+ private ClusterEventListener clusterListener = new InternalClusterEventListener();
+
+ private ScheduledExecutorService executor = Executors
+ .newScheduledThreadPool(1);
+
+ @Activate
+ public void activate() {
+ leadershipService.addListener(leaderListener);
+ clusterService.addListener(clusterListener);
+
+ listenerRegistry = new ListenerRegistry<>();
+ eventDispatcher.addSink(PartitionEvent.class, listenerRegistry);
+
+ for (int i = 0; i < NUM_PARTITIONS; i++) {
+ leadershipService.runForLeadership(getPartitionPath(i));
+ }
+
+ executor.scheduleAtFixedRate(() -> scheduleRebalance(0), 0,
+ CHECK_PARTITION_BALANCE_PERIOD_SEC, TimeUnit.SECONDS);
+ }
+
+ @Deactivate
+ public void deactivate() {
+ executor.shutdownNow();
+
+ eventDispatcher.removeSink(PartitionEvent.class);
+ leadershipService.removeListener(leaderListener);
+ clusterService.removeListener(clusterListener);
+ }
+
+ /**
+ * Sets the specified executor to be used for scheduling background tasks.
+ *
+ * @param executor scheduled executor service for background tasks
+ * @return this PartitionManager
+ */
+ public PartitionManager withScheduledExecutor(ScheduledExecutorService executor) {
+ this.executor = executor;
+ return this;
+ }
+
+ private String getPartitionPath(int i) {
+ return ELECTION_PREFIX + i;
+ }
+
+ private String getPartitionPath(PartitionId id) {
+ return getPartitionPath(id.value());
+ }
+
+ private PartitionId getPartitionForKey(Key intentKey) {
+ int partition = Math.abs((int) intentKey.hash()) % NUM_PARTITIONS;
+ //TODO investigate Guava consistent hash method
+ // ... does it add significant computational complexity? is it worth it?
+ //int partition = consistentHash(intentKey.hash(), NUM_PARTITIONS);
+ PartitionId id = new PartitionId(partition);
+ return id;
+ }
+
+ @Override
+ public boolean isMine(Key intentKey) {
+ return Objects.equals(leadershipService.getLeader(getPartitionPath(getPartitionForKey(intentKey))),
+ clusterService.getLocalNode().id());
+ }
+
+ @Override
+ public NodeId getLeader(Key intentKey) {
+ return leadershipService.getLeader(getPartitionPath(getPartitionForKey(intentKey)));
+ }
+
+ @Override
+ public void addListener(PartitionEventListener listener) {
+ listenerRegistry.addListener(listener);
+ }
+
+ @Override
+ public void removeListener(PartitionEventListener listener) {
+ listenerRegistry.removeListener(listener);
+ }
+
+ protected void doRebalance() {
+ rebalanceScheduled.set(false);
+ try {
+ rebalance();
+ } catch (Exception e) {
+ log.warn("Exception caught during rebalance task. Will retry in " + RETRY_AFTER_DELAY_SEC + " seconds", e);
+ scheduleRebalance(RETRY_AFTER_DELAY_SEC);
+ }
+ }
+
+ /**
+ * Determine whether we have more than our fair share of partitions, and if
+ * so, relinquish leadership of some of them for a little while to let
+ * other instances take over.
+ */
+ private void rebalance() {
+ int activeNodes = (int) clusterService.getNodes()
+ .stream()
+ .filter(node -> ControllerNode.State.ACTIVE == clusterService.getState(node.id()))
+ .count();
+
+ int myShare = (int) Math.ceil((double) NUM_PARTITIONS / activeNodes);
+
+ List<Leadership> myPartitions = leadershipService.getLeaderBoard().values()
+ .stream()
+ .filter(l -> clusterService.getLocalNode().id().equals(l.leader()))
+ .filter(l -> l.topic().startsWith(ELECTION_PREFIX))
+ .collect(Collectors.toList());
+
+ int relinquish = myPartitions.size() - myShare;
+
+ if (relinquish <= 0) {
+ return;
+ }
+
+ for (int i = 0; i < relinquish; i++) {
+ String topic = myPartitions.get(i).topic();
+ leadershipService.withdraw(topic);
+
+ executor.schedule(() -> recontest(topic),
+ BACKOFF_TIME, TimeUnit.SECONDS);
+ }
+ }
+
+ private void scheduleRebalance(int afterDelaySec) {
+ if (rebalanceScheduled.compareAndSet(false, true)) {
+ executor.schedule(this::doRebalance, afterDelaySec, TimeUnit.SECONDS);
+ }
+ }
+
+ /**
+ * Try and recontest for leadership of a partition.
+ *
+ * @param path topic name to recontest
+ */
+ private void recontest(String path) {
+ leadershipService.runForLeadership(path);
+ }
+
+ private final class InternalLeadershipListener implements LeadershipEventListener {
+
+ @Override
+ public void event(LeadershipEvent event) {
+ Leadership leadership = event.subject();
+
+ if (Objects.equals(leadership.leader(), clusterService.getLocalNode().id()) &&
+ leadership.topic().startsWith(ELECTION_PREFIX)) {
+
+ // See if we need to let some partitions go
+ scheduleRebalance(0);
+
+ eventDispatcher.post(new PartitionEvent(PartitionEvent.Type.LEADER_CHANGED,
+ leadership.topic()));
+ }
+ }
+ }
+
+ private final class InternalClusterEventListener implements
+ ClusterEventListener {
+
+ @Override
+ public void event(ClusterEvent event) {
+ scheduleRebalance(0);
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/package-info.java
new file mode 100644
index 00000000..a8db8ff2
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/intent/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of distributed intent store.
+ */
+package org.onosproject.store.intent.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/ECLinkStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/ECLinkStore.java
new file mode 100644
index 00000000..4577086c
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/ECLinkStore.java
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.link.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.onosproject.net.DefaultAnnotations.merge;
+import static org.onosproject.net.DefaultAnnotations.union;
+import static org.onosproject.net.Link.State.ACTIVE;
+import static org.onosproject.net.Link.State.INACTIVE;
+import static org.onosproject.net.Link.Type.DIRECT;
+import static org.onosproject.net.Link.Type.INDIRECT;
+import static org.onosproject.net.LinkKey.linkKey;
+import static org.onosproject.net.link.LinkEvent.Type.LINK_ADDED;
+import static org.onosproject.net.link.LinkEvent.Type.LINK_REMOVED;
+import static org.onosproject.net.link.LinkEvent.Type.LINK_UPDATED;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.PUT;
+import static org.onosproject.store.service.EventuallyConsistentMapEvent.Type.REMOVE;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onlab.util.SharedExecutors;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.net.AnnotationKeys;
+import org.onosproject.net.AnnotationsUtil;
+import org.onosproject.net.ConnectPoint;
+import org.onosproject.net.DefaultAnnotations;
+import org.onosproject.net.DefaultLink;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.Link;
+import org.onosproject.net.LinkKey;
+import org.onosproject.net.Link.Type;
+import org.onosproject.net.device.DeviceClockService;
+import org.onosproject.net.link.DefaultLinkDescription;
+import org.onosproject.net.link.LinkDescription;
+import org.onosproject.net.link.LinkEvent;
+import org.onosproject.net.link.LinkStore;
+import org.onosproject.net.link.LinkStoreDelegate;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+import org.onosproject.store.impl.MastershipBasedTimestamp;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.onosproject.store.serializers.custom.DistributedStoreSerializers;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapEvent;
+import org.onosproject.store.service.EventuallyConsistentMapListener;
+import org.onosproject.store.service.StorageService;
+import org.slf4j.Logger;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.Futures;
+
+/**
+ * Manages the inventory of links using a {@code EventuallyConsistentMap}.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class ECLinkStore
+ extends AbstractStore<LinkEvent, LinkStoreDelegate>
+ implements LinkStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private final Map<LinkKey, Link> links = Maps.newConcurrentMap();
+ private EventuallyConsistentMap<Provided<LinkKey>, LinkDescription> linkDescriptions;
+
+ private static final MessageSubject LINK_INJECT_MESSAGE = new MessageSubject("inject-link-request");
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected DeviceClockService deviceClockService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ private EventuallyConsistentMapListener<Provided<LinkKey>, LinkDescription> linkTracker =
+ new InternalLinkTracker();
+
+ protected static final KryoSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(DistributedStoreSerializers.STORE_COMMON)
+ .nextId(DistributedStoreSerializers.STORE_CUSTOM_BEGIN)
+ .register(Provided.class)
+ .build();
+ }
+ };
+
+ @Activate
+ public void activate() {
+ KryoNamespace.Builder serializer = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .register(MastershipBasedTimestamp.class)
+ .register(Provided.class);
+
+ linkDescriptions = storageService.<Provided<LinkKey>, LinkDescription>eventuallyConsistentMapBuilder()
+ .withName("onos-link-descriptions")
+ .withSerializer(serializer)
+ .withTimestampProvider((k, v) -> {
+ try {
+ return v == null ? null : deviceClockService.getTimestamp(v.dst().deviceId());
+ } catch (IllegalStateException e) {
+ return null;
+ }
+ }).build();
+
+ clusterCommunicator.addSubscriber(LINK_INJECT_MESSAGE,
+ SERIALIZER::decode,
+ this::injectLink,
+ SERIALIZER::encode,
+ SharedExecutors.getPoolThreadExecutor());
+
+ linkDescriptions.addListener(linkTracker);
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ linkDescriptions.removeListener(linkTracker);
+ linkDescriptions.destroy();
+ links.clear();
+ clusterCommunicator.removeSubscriber(LINK_INJECT_MESSAGE);
+
+ log.info("Stopped");
+ }
+
+ @Override
+ public int getLinkCount() {
+ return links.size();
+ }
+
+ @Override
+ public Iterable<Link> getLinks() {
+ return links.values();
+ }
+
+ @Override
+ public Set<Link> getDeviceEgressLinks(DeviceId deviceId) {
+ return filter(links.values(), link -> deviceId.equals(link.src().deviceId()));
+ }
+
+ @Override
+ public Set<Link> getDeviceIngressLinks(DeviceId deviceId) {
+ return filter(links.values(), link -> deviceId.equals(link.dst().deviceId()));
+ }
+
+ @Override
+ public Link getLink(ConnectPoint src, ConnectPoint dst) {
+ return links.get(linkKey(src, dst));
+ }
+
+ @Override
+ public Set<Link> getEgressLinks(ConnectPoint src) {
+ return filter(links.values(), link -> src.equals(link.src()));
+ }
+
+ @Override
+ public Set<Link> getIngressLinks(ConnectPoint dst) {
+ return filter(links.values(), link -> dst.equals(link.dst()));
+ }
+
+ @Override
+ public LinkEvent createOrUpdateLink(ProviderId providerId,
+ LinkDescription linkDescription) {
+ final DeviceId dstDeviceId = linkDescription.dst().deviceId();
+ final NodeId dstNodeId = mastershipService.getMasterFor(dstDeviceId);
+
+ // Process link update only if we're the master of the destination node,
+ // otherwise signal the actual master.
+ if (clusterService.getLocalNode().id().equals(dstNodeId)) {
+ LinkKey linkKey = linkKey(linkDescription.src(), linkDescription.dst());
+ Provided<LinkKey> internalLinkKey = new Provided<>(linkKey, providerId);
+ linkDescriptions.compute(internalLinkKey, (k, v) -> createOrUpdateLinkInternal(v , linkDescription));
+ return refreshLinkCache(linkKey);
+ } else {
+ if (dstNodeId == null) {
+ return null;
+ }
+ return Futures.getUnchecked(clusterCommunicator.sendAndReceive(new Provided<>(linkDescription, providerId),
+ LINK_INJECT_MESSAGE,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ dstNodeId));
+ }
+ }
+
+ private LinkDescription createOrUpdateLinkInternal(LinkDescription current, LinkDescription updated) {
+ if (current != null) {
+ // we only allow transition from INDIRECT -> DIRECT
+ return new DefaultLinkDescription(
+ current.src(),
+ current.dst(),
+ current.type() == DIRECT ? DIRECT : updated.type(),
+ union(current.annotations(), updated.annotations()));
+ }
+ return updated;
+ }
+
+ private LinkEvent refreshLinkCache(LinkKey linkKey) {
+ AtomicReference<LinkEvent.Type> eventType = new AtomicReference<>();
+ Link link = links.compute(linkKey, (key, existingLink) -> {
+ Link newLink = composeLink(linkKey);
+ if (existingLink == null) {
+ eventType.set(LINK_ADDED);
+ return newLink;
+ } else if (existingLink.state() != newLink.state() ||
+ (existingLink.type() == INDIRECT && newLink.type() == DIRECT) ||
+ !AnnotationsUtil.isEqual(existingLink.annotations(), newLink.annotations())) {
+ eventType.set(LINK_UPDATED);
+ return newLink;
+ } else {
+ return existingLink;
+ }
+ });
+ return eventType.get() != null ? new LinkEvent(eventType.get(), link) : null;
+ }
+
+ private Set<ProviderId> getAllProviders(LinkKey linkKey) {
+ return linkDescriptions.keySet()
+ .stream()
+ .filter(key -> key.key().equals(linkKey))
+ .map(key -> key.providerId())
+ .collect(Collectors.toSet());
+ }
+
+ private ProviderId getBaseProviderId(LinkKey linkKey) {
+ Set<ProviderId> allProviders = getAllProviders(linkKey);
+ if (allProviders.size() > 0) {
+ return allProviders.stream()
+ .filter(p -> !p.isAncillary())
+ .findFirst()
+ .orElse(Iterables.getFirst(allProviders, null));
+ }
+ return null;
+ }
+
+ private Link composeLink(LinkKey linkKey) {
+
+ ProviderId baseProviderId = checkNotNull(getBaseProviderId(linkKey));
+ LinkDescription base = linkDescriptions.get(new Provided<>(linkKey, baseProviderId));
+
+ ConnectPoint src = base.src();
+ ConnectPoint dst = base.dst();
+ Type type = base.type();
+ AtomicReference<DefaultAnnotations> annotations = new AtomicReference<>(DefaultAnnotations.builder().build());
+ annotations.set(merge(annotations.get(), base.annotations()));
+
+ getAllProviders(linkKey).stream()
+ .map(p -> new Provided<>(linkKey, p))
+ .forEach(key -> {
+ annotations.set(merge(annotations.get(),
+ linkDescriptions.get(key).annotations()));
+ });
+
+ boolean isDurable = Objects.equals(annotations.get().value(AnnotationKeys.DURABLE), "true");
+ return new DefaultLink(baseProviderId, src, dst, type, ACTIVE, isDurable, annotations.get());
+ }
+
+ // Updates, if necessary the specified link and returns the appropriate event.
+ // Guarded by linkDescs value (=locking each Link)
+ private LinkEvent updateLink(LinkKey key, Link oldLink, Link newLink) {
+ // Note: INDIRECT -> DIRECT transition only
+ // so that BDDP discovered Link will not overwrite LDDP Link
+ if (oldLink.state() != newLink.state() ||
+ (oldLink.type() == INDIRECT && newLink.type() == DIRECT) ||
+ !AnnotationsUtil.isEqual(oldLink.annotations(), newLink.annotations())) {
+
+ links.put(key, newLink);
+ return new LinkEvent(LINK_UPDATED, newLink);
+ }
+ return null;
+ }
+
+ @Override
+ public LinkEvent removeOrDownLink(ConnectPoint src, ConnectPoint dst) {
+ Link link = getLink(src, dst);
+ if (link == null) {
+ return null;
+ }
+
+ if (link.isDurable()) {
+ // FIXME: this will not sync link state!!!
+ return link.state() == INACTIVE ? null :
+ updateLink(linkKey(link.src(), link.dst()), link,
+ new DefaultLink(link.providerId(),
+ link.src(), link.dst(),
+ link.type(), INACTIVE,
+ link.isDurable(),
+ link.annotations()));
+ }
+ return removeLink(src, dst);
+ }
+
+ @Override
+ public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) {
+ final LinkKey linkKey = LinkKey.linkKey(src, dst);
+ ProviderId primaryProviderId = getBaseProviderId(linkKey);
+ // Stop if there is no base provider.
+ if (primaryProviderId == null) {
+ return null;
+ }
+ LinkDescription removedLinkDescription =
+ linkDescriptions.remove(new Provided<>(linkKey, primaryProviderId));
+ if (removedLinkDescription != null) {
+ return purgeLinkCache(linkKey);
+ }
+ return null;
+ }
+
+ private LinkEvent purgeLinkCache(LinkKey linkKey) {
+ Link removedLink = links.remove(linkKey);
+ if (removedLink != null) {
+ getAllProviders(linkKey).forEach(p -> linkDescriptions.remove(new Provided<>(linkKey, p)));
+ return new LinkEvent(LINK_REMOVED, removedLink);
+ }
+ return null;
+ }
+
+ private Set<Link> filter(Collection<Link> links, Predicate<Link> predicate) {
+ return links.stream().filter(predicate).collect(Collectors.toSet());
+ }
+
+ private LinkEvent injectLink(Provided<LinkDescription> linkInjectRequest) {
+ log.trace("Received request to inject link {}", linkInjectRequest);
+
+ ProviderId providerId = linkInjectRequest.providerId();
+ LinkDescription linkDescription = linkInjectRequest.key();
+
+ final DeviceId deviceId = linkDescription.dst().deviceId();
+ if (!deviceClockService.isTimestampAvailable(deviceId)) {
+ // workaround for ONOS-1208
+ log.warn("Not ready to accept update. Dropping {}", linkInjectRequest);
+ return null;
+ }
+ return createOrUpdateLink(providerId, linkDescription);
+ }
+
+ private class InternalLinkTracker implements EventuallyConsistentMapListener<Provided<LinkKey>, LinkDescription> {
+ @Override
+ public void event(EventuallyConsistentMapEvent<Provided<LinkKey>, LinkDescription> event) {
+ if (event.type() == PUT) {
+ notifyDelegate(refreshLinkCache(event.key().key()));
+ } else if (event.type() == REMOVE) {
+ notifyDelegate(purgeLinkCache(event.key().key()));
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/GossipLinkStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/GossipLinkStore.java
new file mode 100644
index 00000000..767ede54
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/GossipLinkStore.java
@@ -0,0 +1,903 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.link.impl;
+
+import com.google.common.base.Function;
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Multimaps;
+import com.google.common.collect.SetMultimap;
+import com.google.common.collect.Sets;
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.ControllerNode;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.net.AnnotationKeys;
+import org.onosproject.net.AnnotationsUtil;
+import org.onosproject.net.ConnectPoint;
+import org.onosproject.net.DefaultAnnotations;
+import org.onosproject.net.DefaultLink;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.Link;
+import org.onosproject.net.Link.Type;
+import org.onosproject.net.LinkKey;
+import org.onosproject.net.SparseAnnotations;
+import org.onosproject.net.device.DeviceClockService;
+import org.onosproject.net.link.DefaultLinkDescription;
+import org.onosproject.net.link.LinkDescription;
+import org.onosproject.net.link.LinkEvent;
+import org.onosproject.net.link.LinkStore;
+import org.onosproject.net.link.LinkStoreDelegate;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.Timestamp;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.ClusterMessage;
+import org.onosproject.store.cluster.messaging.ClusterMessageHandler;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+import org.onosproject.store.impl.Timestamped;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.onosproject.store.serializers.custom.DistributedStoreSerializers;
+import org.slf4j.Logger;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Predicates.notNull;
+import static com.google.common.collect.Multimaps.synchronizedSetMultimap;
+import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor;
+import static org.onlab.util.Tools.groupedThreads;
+import static org.onlab.util.Tools.minPriority;
+import static org.onosproject.cluster.ControllerNodeToNodeId.toNodeId;
+import static org.onosproject.net.DefaultAnnotations.merge;
+import static org.onosproject.net.DefaultAnnotations.union;
+import static org.onosproject.net.Link.State.ACTIVE;
+import static org.onosproject.net.Link.State.INACTIVE;
+import static org.onosproject.net.Link.Type.DIRECT;
+import static org.onosproject.net.Link.Type.INDIRECT;
+import static org.onosproject.net.LinkKey.linkKey;
+import static org.onosproject.net.link.LinkEvent.Type.*;
+import static org.onosproject.store.link.impl.GossipLinkStoreMessageSubjects.LINK_ANTI_ENTROPY_ADVERTISEMENT;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Manages inventory of infrastructure links in distributed data store
+ * that uses optimistic replication and gossip based techniques.
+ */
+@Component(immediate = true, enabled = false)
+@Service
+public class GossipLinkStore
+ extends AbstractStore<LinkEvent, LinkStoreDelegate>
+ implements LinkStore {
+
+ // Timeout in milliseconds to process links on remote master node
+ private static final int REMOTE_MASTER_TIMEOUT = 1000;
+
+ private final Logger log = getLogger(getClass());
+
+ // Link inventory
+ private final ConcurrentMap<LinkKey, Map<ProviderId, Timestamped<LinkDescription>>> linkDescs =
+ new ConcurrentHashMap<>();
+
+ // Link instance cache
+ private final ConcurrentMap<LinkKey, Link> links = new ConcurrentHashMap<>();
+
+ // Egress and ingress link sets
+ private final SetMultimap<DeviceId, LinkKey> srcLinks = createSynchronizedHashMultiMap();
+ private final SetMultimap<DeviceId, LinkKey> dstLinks = createSynchronizedHashMultiMap();
+
+ // Remove links
+ private final Map<LinkKey, Timestamp> removedLinks = new ConcurrentHashMap<>();
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected DeviceClockService deviceClockService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ protected static final KryoSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(DistributedStoreSerializers.STORE_COMMON)
+ .nextId(DistributedStoreSerializers.STORE_CUSTOM_BEGIN)
+ .register(InternalLinkEvent.class)
+ .register(InternalLinkRemovedEvent.class)
+ .register(LinkAntiEntropyAdvertisement.class)
+ .register(LinkFragmentId.class)
+ .register(LinkInjectedEvent.class)
+ .build();
+ }
+ };
+
+ private ExecutorService executor;
+
+ private ScheduledExecutorService backgroundExecutors;
+
+ @Activate
+ public void activate() {
+
+ executor = Executors.newCachedThreadPool(groupedThreads("onos/link", "fg-%d"));
+
+ backgroundExecutors =
+ newSingleThreadScheduledExecutor(minPriority(groupedThreads("onos/link", "bg-%d")));
+
+ clusterCommunicator.addSubscriber(
+ GossipLinkStoreMessageSubjects.LINK_UPDATE,
+ new InternalLinkEventListener(), executor);
+ clusterCommunicator.addSubscriber(
+ GossipLinkStoreMessageSubjects.LINK_REMOVED,
+ new InternalLinkRemovedEventListener(), executor);
+ clusterCommunicator.addSubscriber(
+ GossipLinkStoreMessageSubjects.LINK_ANTI_ENTROPY_ADVERTISEMENT,
+ new InternalLinkAntiEntropyAdvertisementListener(), backgroundExecutors);
+ clusterCommunicator.addSubscriber(
+ GossipLinkStoreMessageSubjects.LINK_INJECTED,
+ new LinkInjectedEventListener(), executor);
+
+ long initialDelaySec = 5;
+ long periodSec = 5;
+ // start anti-entropy thread
+ backgroundExecutors.scheduleAtFixedRate(new SendAdvertisementTask(),
+ initialDelaySec, periodSec, TimeUnit.SECONDS);
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+
+ executor.shutdownNow();
+
+ backgroundExecutors.shutdownNow();
+ try {
+ if (!backgroundExecutors.awaitTermination(5, TimeUnit.SECONDS)) {
+ log.error("Timeout during executor shutdown");
+ }
+ } catch (InterruptedException e) {
+ log.error("Error during executor shutdown", e);
+ }
+
+ linkDescs.clear();
+ links.clear();
+ srcLinks.clear();
+ dstLinks.clear();
+ log.info("Stopped");
+ }
+
+ @Override
+ public int getLinkCount() {
+ return links.size();
+ }
+
+ @Override
+ public Iterable<Link> getLinks() {
+ return Collections.unmodifiableCollection(links.values());
+ }
+
+ @Override
+ public Set<Link> getDeviceEgressLinks(DeviceId deviceId) {
+ // lock for iteration
+ synchronized (srcLinks) {
+ return FluentIterable.from(srcLinks.get(deviceId))
+ .transform(lookupLink())
+ .filter(notNull())
+ .toSet();
+ }
+ }
+
+ @Override
+ public Set<Link> getDeviceIngressLinks(DeviceId deviceId) {
+ // lock for iteration
+ synchronized (dstLinks) {
+ return FluentIterable.from(dstLinks.get(deviceId))
+ .transform(lookupLink())
+ .filter(notNull())
+ .toSet();
+ }
+ }
+
+ @Override
+ public Link getLink(ConnectPoint src, ConnectPoint dst) {
+ return links.get(linkKey(src, dst));
+ }
+
+ @Override
+ public Set<Link> getEgressLinks(ConnectPoint src) {
+ Set<Link> egress = new HashSet<>();
+ //
+ // Change `srcLinks` to ConcurrentMap<DeviceId, (Concurrent)Set>
+ // to remove this synchronized block, if we hit performance issue.
+ // SetMultiMap#get returns wrapped collection to provide modifiable-view.
+ // And the wrapped collection is not concurrent access safe.
+ //
+ // Our use case here does not require returned collection to be modifiable,
+ // so the wrapped collection forces us to lock the whole multiset,
+ // for benefit we don't need.
+ //
+ // Same applies to `dstLinks`
+ synchronized (srcLinks) {
+ for (LinkKey linkKey : srcLinks.get(src.deviceId())) {
+ if (linkKey.src().equals(src)) {
+ Link link = links.get(linkKey);
+ if (link != null) {
+ egress.add(link);
+ } else {
+ log.debug("Egress link for {} was null, skipped", linkKey);
+ }
+ }
+ }
+ }
+ return egress;
+ }
+
+ @Override
+ public Set<Link> getIngressLinks(ConnectPoint dst) {
+ Set<Link> ingress = new HashSet<>();
+ synchronized (dstLinks) {
+ for (LinkKey linkKey : dstLinks.get(dst.deviceId())) {
+ if (linkKey.dst().equals(dst)) {
+ Link link = links.get(linkKey);
+ if (link != null) {
+ ingress.add(link);
+ } else {
+ log.debug("Ingress link for {} was null, skipped", linkKey);
+ }
+ }
+ }
+ }
+ return ingress;
+ }
+
+ @Override
+ public LinkEvent createOrUpdateLink(ProviderId providerId,
+ LinkDescription linkDescription) {
+
+ final DeviceId dstDeviceId = linkDescription.dst().deviceId();
+ final NodeId localNode = clusterService.getLocalNode().id();
+ final NodeId dstNode = mastershipService.getMasterFor(dstDeviceId);
+
+ // Process link update only if we're the master of the destination node,
+ // otherwise signal the actual master.
+ LinkEvent linkEvent = null;
+ if (localNode.equals(dstNode)) {
+
+ Timestamp newTimestamp = deviceClockService.getTimestamp(dstDeviceId);
+
+ final Timestamped<LinkDescription> deltaDesc = new Timestamped<>(linkDescription, newTimestamp);
+
+ LinkKey key = linkKey(linkDescription.src(), linkDescription.dst());
+ final Timestamped<LinkDescription> mergedDesc;
+ Map<ProviderId, Timestamped<LinkDescription>> map = getOrCreateLinkDescriptions(key);
+
+ synchronized (map) {
+ linkEvent = createOrUpdateLinkInternal(providerId, deltaDesc);
+ mergedDesc = map.get(providerId);
+ }
+
+ if (linkEvent != null) {
+ log.debug("Notifying peers of a link update topology event from providerId: "
+ + "{} between src: {} and dst: {}",
+ providerId, linkDescription.src(), linkDescription.dst());
+ notifyPeers(new InternalLinkEvent(providerId, mergedDesc));
+ }
+
+ } else {
+ // FIXME Temporary hack for NPE (ONOS-1171).
+ // Proper fix is to implement forwarding to master on ConfigProvider
+ // redo ONOS-490
+ if (dstNode == null) {
+ // silently ignore
+ return null;
+ }
+
+
+ LinkInjectedEvent linkInjectedEvent = new LinkInjectedEvent(providerId, linkDescription);
+
+ // TODO check unicast return value
+ clusterCommunicator.unicast(linkInjectedEvent,
+ GossipLinkStoreMessageSubjects.LINK_INJECTED,
+ SERIALIZER::encode,
+ dstNode);
+ }
+
+ return linkEvent;
+ }
+
+ @Override
+ public LinkEvent removeOrDownLink(ConnectPoint src, ConnectPoint dst) {
+ Link link = getLink(src, dst);
+ if (link == null) {
+ return null;
+ }
+
+ if (link.isDurable()) {
+ // FIXME: this is not the right thing to call for the gossip store; will not sync link state!!!
+ return link.state() == INACTIVE ? null :
+ updateLink(linkKey(link.src(), link.dst()), link,
+ new DefaultLink(link.providerId(),
+ link.src(), link.dst(),
+ link.type(), INACTIVE,
+ link.isDurable(),
+ link.annotations()));
+ }
+ return removeLink(src, dst);
+ }
+
+ private LinkEvent createOrUpdateLinkInternal(
+ ProviderId providerId,
+ Timestamped<LinkDescription> linkDescription) {
+
+ final LinkKey key = linkKey(linkDescription.value().src(),
+ linkDescription.value().dst());
+ Map<ProviderId, Timestamped<LinkDescription>> descs = getOrCreateLinkDescriptions(key);
+
+ synchronized (descs) {
+ // if the link was previously removed, we should proceed if and
+ // only if this request is more recent.
+ Timestamp linkRemovedTimestamp = removedLinks.get(key);
+ if (linkRemovedTimestamp != null) {
+ if (linkDescription.isNewerThan(linkRemovedTimestamp)) {
+ removedLinks.remove(key);
+ } else {
+ log.trace("Link {} was already removed ignoring.", key);
+ return null;
+ }
+ }
+
+ final Link oldLink = links.get(key);
+ // update description
+ createOrUpdateLinkDescription(descs, providerId, linkDescription);
+ final Link newLink = composeLink(descs);
+ if (oldLink == null) {
+ return createLink(key, newLink);
+ }
+ return updateLink(key, oldLink, newLink);
+ }
+ }
+
+ // Guarded by linkDescs value (=locking each Link)
+ private Timestamped<LinkDescription> createOrUpdateLinkDescription(
+ Map<ProviderId, Timestamped<LinkDescription>> descs,
+ ProviderId providerId,
+ Timestamped<LinkDescription> linkDescription) {
+
+ // merge existing annotations
+ Timestamped<LinkDescription> existingLinkDescription = descs.get(providerId);
+ if (existingLinkDescription != null && existingLinkDescription.isNewer(linkDescription)) {
+ log.trace("local info is more up-to-date, ignoring {}.", linkDescription);
+ return null;
+ }
+ Timestamped<LinkDescription> newLinkDescription = linkDescription;
+ if (existingLinkDescription != null) {
+ // we only allow transition from INDIRECT -> DIRECT
+ final Type newType;
+ if (existingLinkDescription.value().type() == DIRECT) {
+ newType = DIRECT;
+ } else {
+ newType = linkDescription.value().type();
+ }
+ SparseAnnotations merged = union(existingLinkDescription.value().annotations(),
+ linkDescription.value().annotations());
+ newLinkDescription = new Timestamped<>(
+ new DefaultLinkDescription(
+ linkDescription.value().src(),
+ linkDescription.value().dst(),
+ newType, merged),
+ linkDescription.timestamp());
+ }
+ return descs.put(providerId, newLinkDescription);
+ }
+
+ // Creates and stores the link and returns the appropriate event.
+ // Guarded by linkDescs value (=locking each Link)
+ private LinkEvent createLink(LinkKey key, Link newLink) {
+ links.put(key, newLink);
+ srcLinks.put(newLink.src().deviceId(), key);
+ dstLinks.put(newLink.dst().deviceId(), key);
+ return new LinkEvent(LINK_ADDED, newLink);
+ }
+
+ // Updates, if necessary the specified link and returns the appropriate event.
+ // Guarded by linkDescs value (=locking each Link)
+ private LinkEvent updateLink(LinkKey key, Link oldLink, Link newLink) {
+ // Note: INDIRECT -> DIRECT transition only
+ // so that BDDP discovered Link will not overwrite LDDP Link
+ if (oldLink.state() != newLink.state() ||
+ (oldLink.type() == INDIRECT && newLink.type() == DIRECT) ||
+ !AnnotationsUtil.isEqual(oldLink.annotations(), newLink.annotations())) {
+
+ links.put(key, newLink);
+ // strictly speaking following can be omitted
+ srcLinks.put(oldLink.src().deviceId(), key);
+ dstLinks.put(oldLink.dst().deviceId(), key);
+ return new LinkEvent(LINK_UPDATED, newLink);
+ }
+ return null;
+ }
+
+ @Override
+ public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) {
+ final LinkKey key = linkKey(src, dst);
+
+ DeviceId dstDeviceId = dst.deviceId();
+ Timestamp timestamp = null;
+ try {
+ timestamp = deviceClockService.getTimestamp(dstDeviceId);
+ } catch (IllegalStateException e) {
+ log.debug("Failed to remove link {}, was not the master", key);
+ // there are times when this is called before mastership
+ // handoff correctly completes.
+ return null;
+ }
+
+ LinkEvent event = removeLinkInternal(key, timestamp);
+
+ if (event != null) {
+ log.debug("Notifying peers of a link removed topology event for a link "
+ + "between src: {} and dst: {}", src, dst);
+ notifyPeers(new InternalLinkRemovedEvent(key, timestamp));
+ }
+ return event;
+ }
+
+ private static Timestamped<LinkDescription> getPrimaryDescription(
+ Map<ProviderId, Timestamped<LinkDescription>> linkDescriptions) {
+
+ synchronized (linkDescriptions) {
+ for (Entry<ProviderId, Timestamped<LinkDescription>>
+ e : linkDescriptions.entrySet()) {
+
+ if (!e.getKey().isAncillary()) {
+ return e.getValue();
+ }
+ }
+ }
+ return null;
+ }
+
+
+ // TODO: consider slicing out as Timestamp utils
+ /**
+ * Checks is timestamp is more recent than timestamped object.
+ *
+ * @param timestamp to check if this is more recent then other
+ * @param timestamped object to be tested against
+ * @return true if {@code timestamp} is more recent than {@code timestamped}
+ * or {@code timestamped is null}
+ */
+ private static boolean isMoreRecent(Timestamp timestamp, Timestamped<?> timestamped) {
+ checkNotNull(timestamp);
+ if (timestamped == null) {
+ return true;
+ }
+ return timestamp.compareTo(timestamped.timestamp()) > 0;
+ }
+
+ private LinkEvent removeLinkInternal(LinkKey key, Timestamp timestamp) {
+ Map<ProviderId, Timestamped<LinkDescription>> linkDescriptions
+ = getOrCreateLinkDescriptions(key);
+
+ synchronized (linkDescriptions) {
+ if (linkDescriptions.isEmpty()) {
+ // never seen such link before. keeping timestamp for record
+ removedLinks.put(key, timestamp);
+ return null;
+ }
+ // accept removal request if given timestamp is newer than
+ // the latest Timestamp from Primary provider
+ Timestamped<LinkDescription> prim = getPrimaryDescription(linkDescriptions);
+ if (!isMoreRecent(timestamp, prim)) {
+ // outdated remove request, ignore
+ return null;
+ }
+ removedLinks.put(key, timestamp);
+ Link link = links.remove(key);
+ linkDescriptions.clear();
+ if (link != null) {
+ srcLinks.remove(link.src().deviceId(), key);
+ dstLinks.remove(link.dst().deviceId(), key);
+ return new LinkEvent(LINK_REMOVED, link);
+ }
+ return null;
+ }
+ }
+
+ /**
+ * Creates concurrent readable, synchronized HashMultimap.
+ *
+ * @return SetMultimap
+ */
+ private static <K, V> SetMultimap<K, V> createSynchronizedHashMultiMap() {
+ return synchronizedSetMultimap(
+ Multimaps.newSetMultimap(new ConcurrentHashMap<K, Collection<V>>(),
+ () -> Sets.newConcurrentHashSet()));
+ }
+
+ /**
+ * @return primary ProviderID, or randomly chosen one if none exists
+ */
+ private static ProviderId pickBaseProviderId(
+ Map<ProviderId, Timestamped<LinkDescription>> linkDescriptions) {
+
+ ProviderId fallBackPrimary = null;
+ for (Entry<ProviderId, Timestamped<LinkDescription>> e : linkDescriptions.entrySet()) {
+ if (!e.getKey().isAncillary()) {
+ // found primary
+ return e.getKey();
+ } else if (fallBackPrimary == null) {
+ // pick randomly as a fallback in case there is no primary
+ fallBackPrimary = e.getKey();
+ }
+ }
+ return fallBackPrimary;
+ }
+
+ // Guarded by linkDescs value (=locking each Link)
+ private Link composeLink(Map<ProviderId, Timestamped<LinkDescription>> descs) {
+ ProviderId baseProviderId = pickBaseProviderId(descs);
+ Timestamped<LinkDescription> base = descs.get(baseProviderId);
+
+ ConnectPoint src = base.value().src();
+ ConnectPoint dst = base.value().dst();
+ Type type = base.value().type();
+ DefaultAnnotations annotations = DefaultAnnotations.builder().build();
+ annotations = merge(annotations, base.value().annotations());
+
+ for (Entry<ProviderId, Timestamped<LinkDescription>> e : descs.entrySet()) {
+ if (baseProviderId.equals(e.getKey())) {
+ continue;
+ }
+
+ // Note: In the long run we should keep track of Description timestamp
+ // and only merge conflicting keys when timestamp is newer
+ // Currently assuming there will never be a key conflict between
+ // providers
+
+ // annotation merging. not so efficient, should revisit later
+ annotations = merge(annotations, e.getValue().value().annotations());
+ }
+
+ boolean isDurable = Objects.equals(annotations.value(AnnotationKeys.DURABLE), "true");
+ return new DefaultLink(baseProviderId, src, dst, type, ACTIVE, isDurable, annotations);
+ }
+
+ private Map<ProviderId, Timestamped<LinkDescription>> getOrCreateLinkDescriptions(LinkKey key) {
+ Map<ProviderId, Timestamped<LinkDescription>> r;
+ r = linkDescs.get(key);
+ if (r != null) {
+ return r;
+ }
+ r = new HashMap<>();
+ final Map<ProviderId, Timestamped<LinkDescription>> concurrentlyAdded;
+ concurrentlyAdded = linkDescs.putIfAbsent(key, r);
+ if (concurrentlyAdded != null) {
+ return concurrentlyAdded;
+ } else {
+ return r;
+ }
+ }
+
+ private final Function<LinkKey, Link> lookupLink = new LookupLink();
+
+ /**
+ * Returns a Function to lookup Link instance using LinkKey from cache.
+ *
+ * @return lookup link function
+ */
+ private Function<LinkKey, Link> lookupLink() {
+ return lookupLink;
+ }
+
+ private final class LookupLink implements Function<LinkKey, Link> {
+ @Override
+ public Link apply(LinkKey input) {
+ if (input == null) {
+ return null;
+ } else {
+ return links.get(input);
+ }
+ }
+ }
+
+ private void notifyDelegateIfNotNull(LinkEvent event) {
+ if (event != null) {
+ notifyDelegate(event);
+ }
+ }
+
+ private void broadcastMessage(MessageSubject subject, Object event) {
+ clusterCommunicator.broadcast(event, subject, SERIALIZER::encode);
+ }
+
+ private void unicastMessage(NodeId recipient, MessageSubject subject, Object event) throws IOException {
+ clusterCommunicator.unicast(event, subject, SERIALIZER::encode, recipient);
+ }
+
+ private void notifyPeers(InternalLinkEvent event) {
+ broadcastMessage(GossipLinkStoreMessageSubjects.LINK_UPDATE, event);
+ }
+
+ private void notifyPeers(InternalLinkRemovedEvent event) {
+ broadcastMessage(GossipLinkStoreMessageSubjects.LINK_REMOVED, event);
+ }
+
+ // notify peer, silently ignoring error
+ private void notifyPeer(NodeId peer, InternalLinkEvent event) {
+ try {
+ unicastMessage(peer, GossipLinkStoreMessageSubjects.LINK_UPDATE, event);
+ } catch (IOException e) {
+ log.debug("Failed to notify peer {} with message {}", peer, event);
+ }
+ }
+
+ // notify peer, silently ignoring error
+ private void notifyPeer(NodeId peer, InternalLinkRemovedEvent event) {
+ try {
+ unicastMessage(peer, GossipLinkStoreMessageSubjects.LINK_REMOVED, event);
+ } catch (IOException e) {
+ log.debug("Failed to notify peer {} with message {}", peer, event);
+ }
+ }
+
+ private final class SendAdvertisementTask implements Runnable {
+
+ @Override
+ public void run() {
+ if (Thread.currentThread().isInterrupted()) {
+ log.debug("Interrupted, quitting");
+ return;
+ }
+
+ try {
+ final NodeId self = clusterService.getLocalNode().id();
+ Set<ControllerNode> nodes = clusterService.getNodes();
+
+ ImmutableList<NodeId> nodeIds = FluentIterable.from(nodes)
+ .transform(toNodeId())
+ .toList();
+
+ if (nodeIds.size() == 1 && nodeIds.get(0).equals(self)) {
+ log.trace("No other peers in the cluster.");
+ return;
+ }
+
+ NodeId peer;
+ do {
+ int idx = RandomUtils.nextInt(0, nodeIds.size());
+ peer = nodeIds.get(idx);
+ } while (peer.equals(self));
+
+ LinkAntiEntropyAdvertisement ad = createAdvertisement();
+
+ if (Thread.currentThread().isInterrupted()) {
+ log.debug("Interrupted, quitting");
+ return;
+ }
+
+ try {
+ unicastMessage(peer, LINK_ANTI_ENTROPY_ADVERTISEMENT, ad);
+ } catch (IOException e) {
+ log.debug("Failed to send anti-entropy advertisement to {}", peer);
+ return;
+ }
+ } catch (Exception e) {
+ // catch all Exception to avoid Scheduled task being suppressed.
+ log.error("Exception thrown while sending advertisement", e);
+ }
+ }
+ }
+
+ private LinkAntiEntropyAdvertisement createAdvertisement() {
+ final NodeId self = clusterService.getLocalNode().id();
+
+ Map<LinkFragmentId, Timestamp> linkTimestamps = new HashMap<>(linkDescs.size());
+ Map<LinkKey, Timestamp> linkTombstones = new HashMap<>(removedLinks.size());
+
+ linkDescs.forEach((linkKey, linkDesc) -> {
+ synchronized (linkDesc) {
+ for (Map.Entry<ProviderId, Timestamped<LinkDescription>> e : linkDesc.entrySet()) {
+ linkTimestamps.put(new LinkFragmentId(linkKey, e.getKey()), e.getValue().timestamp());
+ }
+ }
+ });
+
+ linkTombstones.putAll(removedLinks);
+
+ return new LinkAntiEntropyAdvertisement(self, linkTimestamps, linkTombstones);
+ }
+
+ private void handleAntiEntropyAdvertisement(LinkAntiEntropyAdvertisement ad) {
+
+ final NodeId sender = ad.sender();
+ boolean localOutdated = false;
+
+ for (Entry<LinkKey, Map<ProviderId, Timestamped<LinkDescription>>>
+ l : linkDescs.entrySet()) {
+
+ final LinkKey key = l.getKey();
+ final Map<ProviderId, Timestamped<LinkDescription>> link = l.getValue();
+ synchronized (link) {
+ Timestamp localLatest = removedLinks.get(key);
+
+ for (Entry<ProviderId, Timestamped<LinkDescription>> p : link.entrySet()) {
+ final ProviderId providerId = p.getKey();
+ final Timestamped<LinkDescription> pDesc = p.getValue();
+
+ final LinkFragmentId fragId = new LinkFragmentId(key, providerId);
+ // remote
+ Timestamp remoteTimestamp = ad.linkTimestamps().get(fragId);
+ if (remoteTimestamp == null) {
+ remoteTimestamp = ad.linkTombstones().get(key);
+ }
+ if (remoteTimestamp == null ||
+ pDesc.isNewerThan(remoteTimestamp)) {
+ // I have more recent link description. update peer.
+ notifyPeer(sender, new InternalLinkEvent(providerId, pDesc));
+ } else {
+ final Timestamp remoteLive = ad.linkTimestamps().get(fragId);
+ if (remoteLive != null &&
+ remoteLive.compareTo(pDesc.timestamp()) > 0) {
+ // I have something outdated
+ localOutdated = true;
+ }
+ }
+
+ // search local latest along the way
+ if (localLatest == null ||
+ pDesc.isNewerThan(localLatest)) {
+ localLatest = pDesc.timestamp();
+ }
+ }
+ // Tests if remote remove is more recent then local latest.
+ final Timestamp remoteRemove = ad.linkTombstones().get(key);
+ if (remoteRemove != null) {
+ if (localLatest != null &&
+ localLatest.compareTo(remoteRemove) < 0) {
+ // remote remove is more recent
+ notifyDelegateIfNotNull(removeLinkInternal(key, remoteRemove));
+ }
+ }
+ }
+ }
+
+ // populate remove info if not known locally
+ for (Entry<LinkKey, Timestamp> remoteRm : ad.linkTombstones().entrySet()) {
+ final LinkKey key = remoteRm.getKey();
+ final Timestamp remoteRemove = remoteRm.getValue();
+ // relying on removeLinkInternal to ignore stale info
+ notifyDelegateIfNotNull(removeLinkInternal(key, remoteRemove));
+ }
+
+ if (localOutdated) {
+ // send back advertisement to speed up convergence
+ try {
+ unicastMessage(sender, LINK_ANTI_ENTROPY_ADVERTISEMENT,
+ createAdvertisement());
+ } catch (IOException e) {
+ log.debug("Failed to send back active advertisement");
+ }
+ }
+ }
+
+ private final class InternalLinkEventListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+
+ log.trace("Received link event from peer: {}", message.sender());
+ InternalLinkEvent event = (InternalLinkEvent) SERIALIZER.decode(message.payload());
+
+ ProviderId providerId = event.providerId();
+ Timestamped<LinkDescription> linkDescription = event.linkDescription();
+
+ try {
+ notifyDelegateIfNotNull(createOrUpdateLinkInternal(providerId, linkDescription));
+ } catch (Exception e) {
+ log.warn("Exception thrown handling link event", e);
+ }
+ }
+ }
+
+ private final class InternalLinkRemovedEventListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+
+ log.trace("Received link removed event from peer: {}", message.sender());
+ InternalLinkRemovedEvent event = (InternalLinkRemovedEvent) SERIALIZER.decode(message.payload());
+
+ LinkKey linkKey = event.linkKey();
+ Timestamp timestamp = event.timestamp();
+
+ try {
+ notifyDelegateIfNotNull(removeLinkInternal(linkKey, timestamp));
+ } catch (Exception e) {
+ log.warn("Exception thrown handling link removed", e);
+ }
+ }
+ }
+
+ private final class InternalLinkAntiEntropyAdvertisementListener
+ implements ClusterMessageHandler {
+
+ @Override
+ public void handle(ClusterMessage message) {
+ log.trace("Received Link Anti-Entropy advertisement from peer: {}", message.sender());
+ LinkAntiEntropyAdvertisement advertisement = SERIALIZER.decode(message.payload());
+ try {
+ handleAntiEntropyAdvertisement(advertisement);
+ } catch (Exception e) {
+ log.warn("Exception thrown while handling Link advertisements", e);
+ throw e;
+ }
+ }
+ }
+
+ private final class LinkInjectedEventListener
+ implements ClusterMessageHandler {
+ @Override
+ public void handle(ClusterMessage message) {
+
+ log.trace("Received injected link event from peer: {}", message.sender());
+ LinkInjectedEvent linkInjectedEvent = SERIALIZER.decode(message.payload());
+
+ ProviderId providerId = linkInjectedEvent.providerId();
+ LinkDescription linkDescription = linkInjectedEvent.linkDescription();
+
+ final DeviceId deviceId = linkDescription.dst().deviceId();
+ if (!deviceClockService.isTimestampAvailable(deviceId)) {
+ // workaround for ONOS-1208
+ log.warn("Not ready to accept update. Dropping {}", linkDescription);
+ return;
+ }
+
+ try {
+ createOrUpdateLink(providerId, linkDescription);
+ } catch (Exception e) {
+ log.warn("Exception thrown while handling link injected event", e);
+ }
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/GossipLinkStoreMessageSubjects.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/GossipLinkStoreMessageSubjects.java
new file mode 100644
index 00000000..e0e1dda3
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/GossipLinkStoreMessageSubjects.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.link.impl;
+
+ import org.onosproject.store.cluster.messaging.MessageSubject;
+
+/**
+ * MessageSubjects used by GossipLinkStore peer-peer communication.
+ */
+public final class GossipLinkStoreMessageSubjects {
+
+ private GossipLinkStoreMessageSubjects() {}
+
+ public static final MessageSubject LINK_UPDATE =
+ new MessageSubject("peer-link-update");
+ public static final MessageSubject LINK_REMOVED =
+ new MessageSubject("peer-link-removed");
+ public static final MessageSubject LINK_ANTI_ENTROPY_ADVERTISEMENT =
+ new MessageSubject("link-enti-entropy-advertisement");
+ public static final MessageSubject LINK_INJECTED =
+ new MessageSubject("peer-link-injected");
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/InternalLinkEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/InternalLinkEvent.java
new file mode 100644
index 00000000..2319f274
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/InternalLinkEvent.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.link.impl;
+
+import com.google.common.base.MoreObjects;
+
+import org.onosproject.net.link.LinkDescription;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.store.impl.Timestamped;
+
+/**
+ * Information published by GossipDeviceStore to notify peers of a device
+ * change event.
+ */
+public class InternalLinkEvent {
+
+ private final ProviderId providerId;
+ private final Timestamped<LinkDescription> linkDescription;
+
+ protected InternalLinkEvent(
+ ProviderId providerId,
+ Timestamped<LinkDescription> linkDescription) {
+ this.providerId = providerId;
+ this.linkDescription = linkDescription;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ public Timestamped<LinkDescription> linkDescription() {
+ return linkDescription;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("linkDescription", linkDescription)
+ .toString();
+ }
+
+ // for serializer
+ protected InternalLinkEvent() {
+ this.providerId = null;
+ this.linkDescription = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/InternalLinkRemovedEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/InternalLinkRemovedEvent.java
new file mode 100644
index 00000000..9d867203
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/InternalLinkRemovedEvent.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.link.impl;
+
+import org.onosproject.net.LinkKey;
+import org.onosproject.store.Timestamp;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Information published by GossipLinkStore to notify peers of a link
+ * being removed.
+ */
+public class InternalLinkRemovedEvent {
+
+ private final LinkKey linkKey;
+ private final Timestamp timestamp;
+
+ /**
+ * Creates a InternalLinkRemovedEvent.
+ * @param linkKey identifier of the removed link.
+ * @param timestamp timestamp of when the link was removed.
+ */
+ public InternalLinkRemovedEvent(LinkKey linkKey, Timestamp timestamp) {
+ this.linkKey = linkKey;
+ this.timestamp = timestamp;
+ }
+
+ public LinkKey linkKey() {
+ return linkKey;
+ }
+
+ public Timestamp timestamp() {
+ return timestamp;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("linkKey", linkKey)
+ .add("timestamp", timestamp)
+ .toString();
+ }
+
+ // for serializer
+ @SuppressWarnings("unused")
+ private InternalLinkRemovedEvent() {
+ linkKey = null;
+ timestamp = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkAntiEntropyAdvertisement.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkAntiEntropyAdvertisement.java
new file mode 100644
index 00000000..73c1042e
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkAntiEntropyAdvertisement.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.link.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Map;
+
+import org.onosproject.cluster.NodeId;
+import org.onosproject.net.LinkKey;
+import org.onosproject.store.Timestamp;
+
+/**
+ * Link AE Advertisement message.
+ */
+public class LinkAntiEntropyAdvertisement {
+
+ private final NodeId sender;
+ private final Map<LinkFragmentId, Timestamp> linkTimestamps;
+ private final Map<LinkKey, Timestamp> linkTombstones;
+
+
+ public LinkAntiEntropyAdvertisement(NodeId sender,
+ Map<LinkFragmentId, Timestamp> linkTimestamps,
+ Map<LinkKey, Timestamp> linkTombstones) {
+ this.sender = checkNotNull(sender);
+ this.linkTimestamps = checkNotNull(linkTimestamps);
+ this.linkTombstones = checkNotNull(linkTombstones);
+ }
+
+ public NodeId sender() {
+ return sender;
+ }
+
+ public Map<LinkFragmentId, Timestamp> linkTimestamps() {
+ return linkTimestamps;
+ }
+
+ public Map<LinkKey, Timestamp> linkTombstones() {
+ return linkTombstones;
+ }
+
+ // For serializer
+ @SuppressWarnings("unused")
+ private LinkAntiEntropyAdvertisement() {
+ this.sender = null;
+ this.linkTimestamps = null;
+ this.linkTombstones = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkFragmentId.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkFragmentId.java
new file mode 100644
index 00000000..af7ce4fc
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkFragmentId.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.link.impl;
+
+import java.util.Objects;
+
+import org.onosproject.net.LinkKey;
+import org.onosproject.net.provider.ProviderId;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Identifier for LinkDescription from a Provider.
+ */
+public final class LinkFragmentId {
+ public final ProviderId providerId;
+ public final LinkKey linkKey;
+
+ public LinkFragmentId(LinkKey linkKey, ProviderId providerId) {
+ this.providerId = providerId;
+ this.linkKey = linkKey;
+ }
+
+ public LinkKey linkKey() {
+ return linkKey;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(providerId, linkKey);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof LinkFragmentId)) {
+ return false;
+ }
+ LinkFragmentId that = (LinkFragmentId) obj;
+ return Objects.equals(this.linkKey, that.linkKey) &&
+ Objects.equals(this.providerId, that.providerId);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("linkKey", linkKey)
+ .toString();
+ }
+
+ // for serializer
+ @SuppressWarnings("unused")
+ private LinkFragmentId() {
+ this.providerId = null;
+ this.linkKey = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkInjectedEvent.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkInjectedEvent.java
new file mode 100644
index 00000000..356033b0
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/LinkInjectedEvent.java
@@ -0,0 +1,38 @@
+package org.onosproject.store.link.impl;
+
+import com.google.common.base.MoreObjects;
+import org.onosproject.net.link.LinkDescription;
+import org.onosproject.net.provider.ProviderId;
+
+public class LinkInjectedEvent {
+
+ ProviderId providerId;
+ LinkDescription linkDescription;
+
+ public LinkInjectedEvent(ProviderId providerId, LinkDescription linkDescription) {
+ this.providerId = providerId;
+ this.linkDescription = linkDescription;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ public LinkDescription linkDescription() {
+ return linkDescription;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("linkDescription", linkDescription)
+ .toString();
+ }
+
+ // for serializer
+ protected LinkInjectedEvent() {
+ this.providerId = null;
+ this.linkDescription = null;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/Provided.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/Provided.java
new file mode 100644
index 00000000..b5b9e644
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/Provided.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.link.impl;
+
+import java.util.Objects;
+
+import org.onosproject.net.provider.ProviderId;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Encapsulation of a provider supplied key.
+ *
+ * @param <K> key
+ */
+public class Provided<K> {
+ private final K key;
+ private final ProviderId providerId;
+
+ public Provided(K key, ProviderId providerId) {
+ this.key = key;
+ this.providerId = providerId;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ public K key() {
+ return key;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(key, providerId);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof Provided) {
+ Provided<K> that = (Provided) other;
+ return Objects.equals(key, that.key) &&
+ Objects.equals(providerId, that.providerId);
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("key", key)
+ .add("providerId", providerId)
+ .toString();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/package-info.java
new file mode 100644
index 00000000..97f2ccae
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/link/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of distributed link store using p2p synchronization protocol.
+ */
+package org.onosproject.store.link.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/ConsistentDeviceMastershipStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/ConsistentDeviceMastershipStore.java
new file mode 100644
index 00000000..c6fc6933
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/ConsistentDeviceMastershipStore.java
@@ -0,0 +1,419 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.mastership.impl;
+
+import static org.onlab.util.Tools.groupedThreads;
+import static org.onlab.util.Tools.futureGetOrElse;
+import static org.onosproject.mastership.MastershipEvent.Type.BACKUPS_CHANGED;
+import static org.onosproject.mastership.MastershipEvent.Type.MASTER_CHANGED;
+import static org.slf4j.LoggerFactory.getLogger;
+import static com.google.common.base.Preconditions.checkArgument;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.Leadership;
+import org.onosproject.cluster.LeadershipEvent;
+import org.onosproject.cluster.LeadershipEventListener;
+import org.onosproject.cluster.LeadershipService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.cluster.RoleInfo;
+import org.onosproject.mastership.MastershipEvent;
+import org.onosproject.mastership.MastershipStore;
+import org.onosproject.mastership.MastershipStoreDelegate;
+import org.onosproject.mastership.MastershipTerm;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.MastershipRole;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.onosproject.store.serializers.StoreSerializer;
+import org.slf4j.Logger;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+/**
+ * Implementation of the MastershipStore on top of Leadership Service.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class ConsistentDeviceMastershipStore
+ extends AbstractStore<MastershipEvent, MastershipStoreDelegate>
+ implements MastershipStore {
+
+ private final Logger log = getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected LeadershipService leadershipService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ private NodeId localNodeId;
+ private final Set<DeviceId> connectedDevices = Sets.newHashSet();
+
+ private static final MessageSubject ROLE_RELINQUISH_SUBJECT =
+ new MessageSubject("mastership-store-device-role-relinquish");
+ private static final MessageSubject TRANSITION_FROM_MASTER_TO_STANDBY_SUBJECT =
+ new MessageSubject("mastership-store-device-mastership-relinquish");
+
+ private static final Pattern DEVICE_MASTERSHIP_TOPIC_PATTERN =
+ Pattern.compile("device:(.*)");
+
+ private ExecutorService messageHandlingExecutor;
+ private ScheduledExecutorService transferExecutor;
+ private final LeadershipEventListener leadershipEventListener =
+ new InternalDeviceMastershipEventListener();
+
+ private static final String NODE_ID_NULL = "Node ID cannot be null";
+ private static final String DEVICE_ID_NULL = "Device ID cannot be null";
+ private static final int WAIT_BEFORE_MASTERSHIP_HANDOFF_MILLIS = 3000;
+
+ public static final StoreSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .register(MastershipRole.class)
+ .register(MastershipEvent.class)
+ .register(MastershipEvent.Type.class)
+ .build();
+ }
+ };
+
+ @Activate
+ public void activate() {
+ messageHandlingExecutor =
+ Executors.newSingleThreadExecutor(
+ groupedThreads("onos/store/device/mastership", "message-handler"));
+ transferExecutor =
+ Executors.newSingleThreadScheduledExecutor(
+ groupedThreads("onos/store/device/mastership", "mastership-transfer-executor"));
+ clusterCommunicator.addSubscriber(ROLE_RELINQUISH_SUBJECT,
+ SERIALIZER::decode,
+ this::relinquishLocalRole,
+ SERIALIZER::encode,
+ messageHandlingExecutor);
+ clusterCommunicator.addSubscriber(TRANSITION_FROM_MASTER_TO_STANDBY_SUBJECT,
+ SERIALIZER::decode,
+ this::transitionFromMasterToStandby,
+ SERIALIZER::encode,
+ messageHandlingExecutor);
+ localNodeId = clusterService.getLocalNode().id();
+ leadershipService.addListener(leadershipEventListener);
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ clusterCommunicator.removeSubscriber(ROLE_RELINQUISH_SUBJECT);
+ clusterCommunicator.removeSubscriber(TRANSITION_FROM_MASTER_TO_STANDBY_SUBJECT);
+ messageHandlingExecutor.shutdown();
+ transferExecutor.shutdown();
+ leadershipService.removeListener(leadershipEventListener);
+
+ log.info("Stopped");
+ }
+
+ @Override
+ public CompletableFuture<MastershipRole> requestRole(DeviceId deviceId) {
+ checkArgument(deviceId != null, DEVICE_ID_NULL);
+
+ String leadershipTopic = createDeviceMastershipTopic(deviceId);
+ if (connectedDevices.add(deviceId)) {
+ return leadershipService.runForLeadership(leadershipTopic)
+ .thenApply(leadership -> {
+ return Objects.equal(localNodeId, leadership.leader())
+ ? MastershipRole.MASTER : MastershipRole.STANDBY;
+ });
+ } else {
+ NodeId leader = leadershipService.getLeader(leadershipTopic);
+ if (Objects.equal(localNodeId, leader)) {
+ return CompletableFuture.completedFuture(MastershipRole.MASTER);
+ } else {
+ return CompletableFuture.completedFuture(MastershipRole.STANDBY);
+ }
+ }
+ }
+
+ @Override
+ public MastershipRole getRole(NodeId nodeId, DeviceId deviceId) {
+ checkArgument(nodeId != null, NODE_ID_NULL);
+ checkArgument(deviceId != null, DEVICE_ID_NULL);
+
+ String leadershipTopic = createDeviceMastershipTopic(deviceId);
+ NodeId leader = leadershipService.getLeader(leadershipTopic);
+ if (Objects.equal(nodeId, leader)) {
+ return MastershipRole.MASTER;
+ }
+ return leadershipService.getCandidates(leadershipTopic).contains(nodeId) ?
+ MastershipRole.STANDBY : MastershipRole.NONE;
+ }
+
+ @Override
+ public NodeId getMaster(DeviceId deviceId) {
+ checkArgument(deviceId != null, DEVICE_ID_NULL);
+
+ String leadershipTopic = createDeviceMastershipTopic(deviceId);
+ return leadershipService.getLeader(leadershipTopic);
+ }
+
+ @Override
+ public RoleInfo getNodes(DeviceId deviceId) {
+ checkArgument(deviceId != null, DEVICE_ID_NULL);
+
+ Map<NodeId, MastershipRole> roles = Maps.newHashMap();
+ clusterService
+ .getNodes()
+ .forEach((node) -> roles.put(node.id(), getRole(node.id(), deviceId)));
+
+ NodeId master = null;
+ final List<NodeId> standbys = Lists.newLinkedList();
+
+ List<NodeId> candidates = leadershipService.getCandidates(createDeviceMastershipTopic(deviceId));
+
+ for (Map.Entry<NodeId, MastershipRole> entry : roles.entrySet()) {
+ if (entry.getValue() == MastershipRole.MASTER) {
+ master = entry.getKey();
+ } else if (entry.getValue() == MastershipRole.STANDBY) {
+ standbys.add(entry.getKey());
+ }
+ }
+
+ List<NodeId> sortedStandbyList = candidates.stream().filter(standbys::contains).collect(Collectors.toList());
+
+ return new RoleInfo(master, sortedStandbyList);
+ }
+
+ @Override
+ public Set<DeviceId> getDevices(NodeId nodeId) {
+ checkArgument(nodeId != null, NODE_ID_NULL);
+
+ return leadershipService
+ .ownedTopics(nodeId)
+ .stream()
+ .filter(this::isDeviceMastershipTopic)
+ .map(this::extractDeviceIdFromTopic)
+ .collect(Collectors.toSet());
+ }
+
+ @Override
+ public CompletableFuture<MastershipEvent> setMaster(NodeId nodeId, DeviceId deviceId) {
+ checkArgument(nodeId != null, NODE_ID_NULL);
+ checkArgument(deviceId != null, DEVICE_ID_NULL);
+
+ NodeId currentMaster = getMaster(deviceId);
+ if (nodeId.equals(currentMaster)) {
+ return CompletableFuture.completedFuture(null);
+ } else {
+ String leadershipTopic = createDeviceMastershipTopic(deviceId);
+ List<NodeId> candidates = leadershipService.getCandidates(leadershipTopic);
+ if (candidates.isEmpty()) {
+ return CompletableFuture.completedFuture(null);
+ }
+ if (leadershipService.makeTopCandidate(leadershipTopic, nodeId)) {
+ CompletableFuture<MastershipEvent> result = new CompletableFuture<>();
+ // There is brief wait before we step down from mastership.
+ // This is to ensure any work that happens when standby preference
+ // order changes can complete. For example: flow entries need to be backed
+ // to the new top standby (ONOS-1883)
+ // FIXME: This potentially introduces a race-condition.
+ // Right now role changes are only forced via CLI.
+ transferExecutor.schedule(() -> {
+ result.complete(transitionFromMasterToStandby(deviceId));
+ }, WAIT_BEFORE_MASTERSHIP_HANDOFF_MILLIS, TimeUnit.MILLISECONDS);
+ return result;
+ } else {
+ log.warn("Failed to promote {} to mastership for {}", nodeId, deviceId);
+ }
+ }
+ return CompletableFuture.completedFuture(null);
+ }
+
+ @Override
+ public MastershipTerm getTermFor(DeviceId deviceId) {
+ checkArgument(deviceId != null, DEVICE_ID_NULL);
+
+ String leadershipTopic = createDeviceMastershipTopic(deviceId);
+ Leadership leadership = leadershipService.getLeadership(leadershipTopic);
+ return leadership != null ? MastershipTerm.of(leadership.leader(), leadership.epoch()) : null;
+ }
+
+ @Override
+ public CompletableFuture<MastershipEvent> setStandby(NodeId nodeId, DeviceId deviceId) {
+ checkArgument(nodeId != null, NODE_ID_NULL);
+ checkArgument(deviceId != null, DEVICE_ID_NULL);
+
+ NodeId currentMaster = getMaster(deviceId);
+ if (!nodeId.equals(currentMaster)) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ String leadershipTopic = createDeviceMastershipTopic(deviceId);
+ List<NodeId> candidates = leadershipService.getCandidates(leadershipTopic);
+
+ NodeId newMaster = candidates.stream()
+ .filter(candidate -> !Objects.equal(nodeId, candidate))
+ .findFirst()
+ .orElse(null);
+ log.info("Transitioning to role {} for {}. Next master: {}",
+ newMaster != null ? MastershipRole.STANDBY : MastershipRole.NONE, deviceId, newMaster);
+
+ if (newMaster != null) {
+ return setMaster(newMaster, deviceId);
+ }
+ return relinquishRole(nodeId, deviceId);
+ }
+
+ @Override
+ public CompletableFuture<MastershipEvent> relinquishRole(NodeId nodeId, DeviceId deviceId) {
+ checkArgument(nodeId != null, NODE_ID_NULL);
+ checkArgument(deviceId != null, DEVICE_ID_NULL);
+
+ if (nodeId.equals(localNodeId)) {
+ return relinquishLocalRole(deviceId);
+ }
+
+ log.debug("Forwarding request to relinquish "
+ + "role for device {} to {}", deviceId, nodeId);
+ return clusterCommunicator.sendAndReceive(
+ deviceId,
+ ROLE_RELINQUISH_SUBJECT,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ nodeId);
+ }
+
+ private CompletableFuture<MastershipEvent> relinquishLocalRole(DeviceId deviceId) {
+ checkArgument(deviceId != null, DEVICE_ID_NULL);
+
+ // Check if this node is can be managed by this node.
+ if (!connectedDevices.contains(deviceId)) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ String leadershipTopic = createDeviceMastershipTopic(deviceId);
+ NodeId currentLeader = leadershipService.getLeader(leadershipTopic);
+
+ MastershipEvent.Type eventType = Objects.equal(currentLeader, localNodeId)
+ ? MastershipEvent.Type.MASTER_CHANGED
+ : MastershipEvent.Type.BACKUPS_CHANGED;
+
+ connectedDevices.remove(deviceId);
+ return leadershipService.withdraw(leadershipTopic)
+ .thenApply(v -> new MastershipEvent(eventType, deviceId, getNodes(deviceId)));
+ }
+
+ private MastershipEvent transitionFromMasterToStandby(DeviceId deviceId) {
+ checkArgument(deviceId != null, DEVICE_ID_NULL);
+
+ NodeId currentMaster = getMaster(deviceId);
+ if (currentMaster == null) {
+ return null;
+ }
+
+ if (!currentMaster.equals(localNodeId)) {
+ log.info("Forwarding request to relinquish "
+ + "mastership for device {} to {}", deviceId, currentMaster);
+ return futureGetOrElse(clusterCommunicator.sendAndReceive(
+ deviceId,
+ TRANSITION_FROM_MASTER_TO_STANDBY_SUBJECT,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ currentMaster), null);
+ }
+
+ return leadershipService.stepdown(createDeviceMastershipTopic(deviceId))
+ ? new MastershipEvent(MastershipEvent.Type.MASTER_CHANGED, deviceId, getNodes(deviceId)) : null;
+ }
+
+ @Override
+ public void relinquishAllRole(NodeId nodeId) {
+ // Noop. LeadershipService already takes care of detecting and purging deadlocks.
+ }
+
+ private class InternalDeviceMastershipEventListener implements LeadershipEventListener {
+ @Override
+ public void event(LeadershipEvent event) {
+ Leadership leadership = event.subject();
+ if (!isDeviceMastershipTopic(leadership.topic())) {
+ return;
+ }
+ DeviceId deviceId = extractDeviceIdFromTopic(leadership.topic());
+ switch (event.type()) {
+ case LEADER_ELECTED:
+ notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, getNodes(deviceId)));
+ break;
+ case LEADER_REELECTED:
+ // There is no concept of leader re-election in the new distributed leadership manager.
+ throw new IllegalStateException("Unexpected event type");
+ case LEADER_BOOTED:
+ notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, getNodes(deviceId)));
+ break;
+ case CANDIDATES_CHANGED:
+ notifyDelegate(new MastershipEvent(BACKUPS_CHANGED, deviceId, getNodes(deviceId)));
+ break;
+ default:
+ return;
+ }
+ }
+ }
+
+ private String createDeviceMastershipTopic(DeviceId deviceId) {
+ return String.format("device:%s", deviceId.toString());
+ }
+
+ private DeviceId extractDeviceIdFromTopic(String topic) {
+ Matcher m = DEVICE_MASTERSHIP_TOPIC_PATTERN.matcher(topic);
+ if (m.matches()) {
+ return DeviceId.deviceId(m.group(1));
+ } else {
+ throw new IllegalArgumentException("Invalid device mastership topic: " + topic);
+ }
+ }
+
+ private boolean isDeviceMastershipTopic(String topic) {
+ Matcher m = DEVICE_MASTERSHIP_TOPIC_PATTERN.matcher(topic);
+ return m.matches();
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/RoleValue.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/RoleValue.java
new file mode 100644
index 00000000..9d3b1686
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/RoleValue.java
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.mastership.impl;
+
+import static org.onosproject.net.MastershipRole.MASTER;
+import static org.onosproject.net.MastershipRole.NONE;
+import static org.onosproject.net.MastershipRole.STANDBY;
+
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.onosproject.cluster.NodeId;
+import org.onosproject.cluster.RoleInfo;
+import org.onosproject.net.MastershipRole;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.MoreObjects.ToStringHelper;
+import com.google.common.collect.Lists;
+
+/**
+ * A structure that holds node mastership roles associated with a
+ * {@link org.onosproject.net.DeviceId}. This structure needs to be locked through IMap.
+ */
+final class RoleValue {
+
+ protected final Map<MastershipRole, List<NodeId>> value = new EnumMap<>(MastershipRole.class);
+
+ /**
+ * Constructs empty RoleValue.
+ */
+ public RoleValue() {
+ value.put(MastershipRole.MASTER, new LinkedList<NodeId>());
+ value.put(MastershipRole.STANDBY, new LinkedList<NodeId>());
+ value.put(MastershipRole.NONE, new LinkedList<NodeId>());
+ }
+
+ /**
+ * Constructs copy of specified RoleValue.
+ *
+ * @param original original to create copy from
+ */
+ public RoleValue(final RoleValue original) {
+ value.put(MASTER, Lists.newLinkedList(original.value.get(MASTER)));
+ value.put(STANDBY, Lists.newLinkedList(original.value.get(STANDBY)));
+ value.put(NONE, Lists.newLinkedList(original.value.get(NONE)));
+ }
+
+ // exposing internals for serialization purpose only
+ Map<MastershipRole, List<NodeId>> value() {
+ return Collections.unmodifiableMap(value);
+ }
+
+ public List<NodeId> nodesOfRole(MastershipRole type) {
+ return value.get(type);
+ }
+
+ /**
+ * Returns the first node to match the MastershipRole, or if there
+ * are none, null.
+ *
+ * @param type the role
+ * @return a node ID or null
+ */
+ public NodeId get(MastershipRole type) {
+ return value.get(type).isEmpty() ? null : value.get(type).get(0);
+ }
+
+ public boolean contains(MastershipRole type, NodeId nodeId) {
+ return value.get(type).contains(nodeId);
+ }
+
+ public MastershipRole getRole(NodeId nodeId) {
+ if (contains(MASTER, nodeId)) {
+ return MASTER;
+ }
+ if (contains(STANDBY, nodeId)) {
+ return STANDBY;
+ }
+ return NONE;
+ }
+
+ /**
+ * Associates a node to a certain role.
+ *
+ * @param type the role
+ * @param nodeId the node ID of the node to associate
+ * @return true if modified
+ */
+ public boolean add(MastershipRole type, NodeId nodeId) {
+ List<NodeId> nodes = value.get(type);
+
+ if (!nodes.contains(nodeId)) {
+ return nodes.add(nodeId);
+ }
+ return false;
+ }
+
+ /**
+ * Removes a node from a certain role.
+ *
+ * @param type the role
+ * @param nodeId the ID of the node to remove
+ * @return true if modified
+ */
+ public boolean remove(MastershipRole type, NodeId nodeId) {
+ List<NodeId> nodes = value.get(type);
+ if (!nodes.isEmpty()) {
+ return nodes.remove(nodeId);
+ } else {
+ return false;
+ }
+ }
+
+ /**
+ * Reassigns a node from one role to another. If the node was not of the
+ * old role, it will still be assigned the new role.
+ *
+ * @param nodeId the Node ID of node changing roles
+ * @param from the old role
+ * @param to the new role
+ * @return true if modified
+ */
+ public boolean reassign(NodeId nodeId, MastershipRole from, MastershipRole to) {
+ boolean modified = remove(from, nodeId);
+ modified |= add(to, nodeId);
+ return modified;
+ }
+
+ /**
+ * Replaces a node in one role with another node. Even if there is no node to
+ * replace, the new node is associated to the role.
+ *
+ * @param from the old NodeId to replace
+ * @param to the new NodeId
+ * @param type the role associated with the old NodeId
+ * @return true if modified
+ */
+ public boolean replace(NodeId from, NodeId to, MastershipRole type) {
+ boolean modified = remove(type, from);
+ modified |= add(type, to);
+ return modified;
+ }
+
+ /**
+ * Summarizes this RoleValue as a RoleInfo. Note that master and/or backups
+ * may be empty, so the values should be checked for safety.
+ *
+ * @return the RoleInfo.
+ */
+ public RoleInfo roleInfo() {
+ return new RoleInfo(
+ get(MastershipRole.MASTER), nodesOfRole(MastershipRole.STANDBY));
+ }
+
+ @Override
+ public String toString() {
+ ToStringHelper helper = MoreObjects.toStringHelper(this.getClass());
+ for (Map.Entry<MastershipRole, List<NodeId>> el : value.entrySet()) {
+ helper.add(el.getKey().toString(), el.getValue());
+ }
+ return helper.toString();
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/RoleValueSerializer.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/RoleValueSerializer.java
new file mode 100644
index 00000000..c81ea7f9
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/RoleValueSerializer.java
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.mastership.impl;
+
+import java.util.List;
+import java.util.Map;
+
+import org.onosproject.cluster.NodeId;
+import org.onosproject.net.MastershipRole;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+ * Serializer for RoleValues used by {@link org.onosproject.mastership.MastershipStore}.
+ */
+public class RoleValueSerializer extends Serializer<RoleValue> {
+
+ //RoleValues are assumed to hold a Map of MastershipRoles (an enum)
+ //to a List of NodeIds.
+
+ @Override
+ public RoleValue read(Kryo kryo, Input input, Class<RoleValue> type) {
+ RoleValue rv = new RoleValue();
+ int size = input.readInt();
+ for (int i = 0; i < size; i++) {
+ MastershipRole role = MastershipRole.values()[input.readInt()];
+ int s = input.readInt();
+ for (int j = 0; j < s; j++) {
+ rv.add(role, new NodeId(input.readString()));
+ }
+ }
+ return rv;
+ }
+
+ @Override
+ public void write(Kryo kryo, Output output, RoleValue type) {
+ final Map<MastershipRole, List<NodeId>> map = type.value();
+ output.writeInt(map.size());
+
+ for (Map.Entry<MastershipRole, List<NodeId>> el : map.entrySet()) {
+ output.writeInt(el.getKey().ordinal());
+
+ List<NodeId> nodes = el.getValue();
+ output.writeInt(nodes.size());
+ for (NodeId n : nodes) {
+ output.writeString(n.toString());
+ }
+ }
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/package-info.java
new file mode 100644
index 00000000..40ff6f76
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/mastership/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of a distributed mastership store using Hazelcast.
+ */
+package org.onosproject.store.mastership.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/newresource/impl/ConsistentResourceStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/newresource/impl/ConsistentResourceStore.java
new file mode 100644
index 00000000..648119e5
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/newresource/impl/ConsistentResourceStore.java
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.newresource.impl;
+
+import com.google.common.annotations.Beta;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onosproject.net.newresource.ResourceConsumer;
+import org.onosproject.net.newresource.ResourcePath;
+import org.onosproject.net.newresource.ResourceStore;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.StorageService;
+import org.onosproject.store.service.TransactionContext;
+import org.onosproject.store.service.TransactionException;
+import org.onosproject.store.service.TransactionalMap;
+import org.onosproject.store.service.Versioned;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Implementation of ResourceStore using TransactionalMap.
+ */
+@Component(immediate = true, enabled = false)
+@Service
+@Beta
+public class ConsistentResourceStore implements ResourceStore {
+ private static final Logger log = LoggerFactory.getLogger(ConsistentResourceStore.class);
+
+ private static final String CONSUMER_MAP = "onos-resource-consumers";
+ private static final String CHILD_MAP = "onos-resource-children";
+ private static final Serializer SERIALIZER = Serializer.using(
+ Arrays.asList(KryoNamespaces.BASIC, KryoNamespaces.API));
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService service;
+
+ private ConsistentMap<ResourcePath, ResourceConsumer> consumerMap;
+ private ConsistentMap<ResourcePath, List<ResourcePath>> childMap;
+
+ @Activate
+ public void activate() {
+ consumerMap = service.<ResourcePath, ResourceConsumer>consistentMapBuilder()
+ .withName(CONSUMER_MAP)
+ .withSerializer(SERIALIZER)
+ .build();
+ childMap = service.<ResourcePath, List<ResourcePath>>consistentMapBuilder()
+ .withName(CHILD_MAP)
+ .withSerializer(SERIALIZER)
+ .build();
+ }
+
+ @Override
+ public Optional<ResourceConsumer> getConsumer(ResourcePath resource) {
+ checkNotNull(resource);
+
+ Versioned<ResourceConsumer> consumer = consumerMap.get(resource);
+ if (consumer == null) {
+ return Optional.empty();
+ }
+
+ return Optional.of(consumer.value());
+ }
+
+ @Override
+ public boolean register(List<ResourcePath> resources) {
+ checkNotNull(resources);
+
+ TransactionContext tx = service.transactionContextBuilder().build();
+ tx.begin();
+
+ try {
+ TransactionalMap<ResourcePath, List<ResourcePath>> childTxMap =
+ tx.getTransactionalMap(CHILD_MAP, SERIALIZER);
+
+ Map<ResourcePath, List<ResourcePath>> resourceMap = resources.stream()
+ .filter(x -> x.parent().isPresent())
+ .collect(Collectors.groupingBy(x -> x.parent().get()));
+
+ for (Map.Entry<ResourcePath, List<ResourcePath>> entry: resourceMap.entrySet()) {
+ if (!isRegistered(childTxMap, entry.getKey())) {
+ return abortTransaction(tx);
+ }
+
+ if (!appendValues(childTxMap, entry.getKey(), entry.getValue())) {
+ return abortTransaction(tx);
+ }
+ }
+
+ return commitTransaction(tx);
+ } catch (TransactionException e) {
+ log.error("Exception thrown, abort the transaction", e);
+ return abortTransaction(tx);
+ }
+ }
+
+ @Override
+ public boolean unregister(List<ResourcePath> resources) {
+ checkNotNull(resources);
+
+ TransactionContext tx = service.transactionContextBuilder().build();
+ tx.begin();
+
+ try {
+ TransactionalMap<ResourcePath, List<ResourcePath>> childTxMap =
+ tx.getTransactionalMap(CHILD_MAP, SERIALIZER);
+ TransactionalMap<ResourcePath, ResourceConsumer> consumerTxMap =
+ tx.getTransactionalMap(CONSUMER_MAP, SERIALIZER);
+
+ Map<ResourcePath, List<ResourcePath>> resourceMap = resources.stream()
+ .filter(x -> x.parent().isPresent())
+ .collect(Collectors.groupingBy(x -> x.parent().get()));
+
+ // even if one of the resources is allocated to a consumer,
+ // all unregistrations are regarded as failure
+ for (Map.Entry<ResourcePath, List<ResourcePath>> entry: resourceMap.entrySet()) {
+ if (entry.getValue().stream().anyMatch(x -> consumerTxMap.get(x) != null)) {
+ return abortTransaction(tx);
+ }
+
+ if (!removeValues(childTxMap, entry.getKey(), entry.getValue())) {
+ return abortTransaction(tx);
+ }
+ }
+
+ return commitTransaction(tx);
+ } catch (TransactionException e) {
+ log.error("Exception thrown, abort the transaction", e);
+ return abortTransaction(tx);
+ }
+ }
+
+ @Override
+ public boolean allocate(List<ResourcePath> resources, ResourceConsumer consumer) {
+ checkNotNull(resources);
+ checkNotNull(consumer);
+
+ TransactionContext tx = service.transactionContextBuilder().build();
+ tx.begin();
+
+ try {
+ TransactionalMap<ResourcePath, List<ResourcePath>> childTxMap =
+ tx.getTransactionalMap(CHILD_MAP, SERIALIZER);
+ TransactionalMap<ResourcePath, ResourceConsumer> consumerTxMap =
+ tx.getTransactionalMap(CONSUMER_MAP, SERIALIZER);
+
+ for (ResourcePath resource: resources) {
+ if (!isRegistered(childTxMap, resource)) {
+ return abortTransaction(tx);
+ }
+
+ ResourceConsumer oldValue = consumerTxMap.put(resource, consumer);
+ if (oldValue != null) {
+ return abortTransaction(tx);
+ }
+ }
+
+ return commitTransaction(tx);
+ } catch (TransactionException e) {
+ log.error("Exception thrown, abort the transaction", e);
+ return abortTransaction(tx);
+ }
+ }
+
+ @Override
+ public boolean release(List<ResourcePath> resources, List<ResourceConsumer> consumers) {
+ checkNotNull(resources);
+ checkNotNull(consumers);
+ checkArgument(resources.size() == consumers.size());
+
+ TransactionContext tx = service.transactionContextBuilder().build();
+ tx.begin();
+
+ try {
+ TransactionalMap<ResourcePath, ResourceConsumer> consumerTxMap =
+ tx.getTransactionalMap(CONSUMER_MAP, SERIALIZER);
+ Iterator<ResourcePath> resourceIte = resources.iterator();
+ Iterator<ResourceConsumer> consumerIte = consumers.iterator();
+
+ while (resourceIte.hasNext() && consumerIte.hasNext()) {
+ ResourcePath resource = resourceIte.next();
+ ResourceConsumer consumer = consumerIte.next();
+
+ // if this single release fails (because the resource is allocated to another consumer,
+ // the whole release fails
+ if (!consumerTxMap.remove(resource, consumer)) {
+ return abortTransaction(tx);
+ }
+ }
+
+ return commitTransaction(tx);
+ } catch (TransactionException e) {
+ log.error("Exception thrown, abort the transaction", e);
+ return abortTransaction(tx);
+ }
+ }
+
+ @Override
+ public Collection<ResourcePath> getResources(ResourceConsumer consumer) {
+ checkNotNull(consumer);
+
+ // NOTE: getting all entries may become performance bottleneck
+ // TODO: revisit for better backend data structure
+ return consumerMap.entrySet().stream()
+ .filter(x -> x.getValue().value().equals(consumer))
+ .map(Map.Entry::getKey)
+ .collect(Collectors.toList());
+ }
+
+ @Override
+ public <T> Collection<ResourcePath> getAllocatedResources(ResourcePath parent, Class<T> cls) {
+ checkNotNull(parent);
+ checkNotNull(cls);
+
+ Versioned<List<ResourcePath>> children = childMap.get(parent);
+ if (children == null) {
+ return Collections.emptyList();
+ }
+
+ return children.value().stream()
+ .filter(x -> x.lastComponent().getClass().equals(cls))
+ .filter(consumerMap::containsKey)
+ .collect(Collectors.toList());
+ }
+
+ /**
+ * Abort the transaction.
+ *
+ * @param tx transaction context
+ * @return always false
+ */
+ private boolean abortTransaction(TransactionContext tx) {
+ tx.abort();
+ return false;
+ }
+
+ /**
+ * Commit the transaction.
+ *
+ * @param tx transaction context
+ * @return always true
+ */
+ private boolean commitTransaction(TransactionContext tx) {
+ tx.commit();
+ return true;
+ }
+
+ /**
+ * Appends the values to the existing values associated with the specified key.
+ * If the map already has all the given values, appending will not happen.
+ *
+ * @param map map holding multiple values for a key
+ * @param key key specifying values
+ * @param values values to be appended
+ * @param <K> type of the key
+ * @param <V> type of the element of the list
+ * @return true if the operation succeeds, false otherwise.
+ */
+ private <K, V> boolean appendValues(TransactionalMap<K, List<V>> map, K key, List<V> values) {
+ List<V> oldValues = map.get(key);
+ if (oldValues == null) {
+ return map.replace(key, oldValues, new ArrayList<>(values));
+ }
+
+ LinkedHashSet<V> oldSet = new LinkedHashSet<>(oldValues);
+ if (oldSet.containsAll(values)) {
+ // don't write to map because all values are already stored
+ return true;
+ }
+
+ oldSet.addAll(values);
+ return map.replace(key, oldValues, new ArrayList<>(oldSet));
+ }
+
+ /**
+ * Removes teh values from the existing values associated with the specified key.
+ * If the map doesn't contain the given values, removal will not happen.
+ *
+ * @param map map holding multiple values for a key
+ * @param key key specifying values
+ * @param values values to be removed
+ * @param <K> type of the key
+ * @param <V> type of the element of the list
+ * @return true if the operation succeeds, false otherwise
+ */
+ private <K, V> boolean removeValues(TransactionalMap<K, List<V>> map, K key, List<V> values) {
+ List<V> oldValues = map.get(key);
+ if (oldValues == null) {
+ return map.replace(key, oldValues, new ArrayList<>());
+ }
+
+ LinkedHashSet<V> oldSet = new LinkedHashSet<>(oldValues);
+ if (values.stream().allMatch(x -> !oldSet.contains(x))) {
+ // don't write map because none of the values are stored
+ return true;
+ }
+
+ oldSet.removeAll(values);
+ return map.replace(key, oldValues, new ArrayList<>(oldSet));
+ }
+
+ /**
+ * Checks if the specified resource is registered as a child of a resource in the map.
+ *
+ * @param map map storing parent - child relationship of resources
+ * @param resource resource to be checked
+ * @return true if the resource is registered, false otherwise.
+ */
+ private boolean isRegistered(TransactionalMap<ResourcePath, List<ResourcePath>> map, ResourcePath resource) {
+ // root is always regarded to be registered
+ if (resource.isRoot()) {
+ return true;
+ }
+
+ List<ResourcePath> value = map.get(resource.parent().get());
+ return value != null && value.contains(resource);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/newresource/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/newresource/impl/package-info.java
new file mode 100644
index 00000000..330d56c3
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/newresource/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of the network resource distributed store.
+ */
+package org.onosproject.store.newresource.impl; \ No newline at end of file
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/packet/impl/DistributedPacketStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/packet/impl/DistributedPacketStore.java
new file mode 100644
index 00000000..24ce2155
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/packet/impl/DistributedPacketStore.java
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.packet.impl;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.net.flow.TrafficSelector;
+import org.onosproject.net.packet.OutboundPacket;
+import org.onosproject.net.packet.PacketEvent;
+import org.onosproject.net.packet.PacketEvent.Type;
+import org.onosproject.net.packet.PacketRequest;
+import org.onosproject.net.packet.PacketStore;
+import org.onosproject.net.packet.PacketStoreDelegate;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.StorageService;
+import org.onosproject.store.service.Versioned;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import static org.onlab.util.Tools.groupedThreads;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Distributed packet store implementation allowing packets to be sent to
+ * remote instances.
+ */
+@Component(immediate = true)
+@Service
+public class DistributedPacketStore
+ extends AbstractStore<PacketEvent, PacketStoreDelegate>
+ implements PacketStore {
+
+ private final Logger log = getLogger(getClass());
+
+ // TODO: make this configurable.
+ private static final int MESSAGE_HANDLER_THREAD_POOL_SIZE = 4;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService communicationService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ private PacketRequestTracker tracker;
+
+ private static final MessageSubject PACKET_OUT_SUBJECT =
+ new MessageSubject("packet-out");
+
+ private static final KryoSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .nextId(KryoNamespaces.BEGIN_USER_CUSTOM_ID)
+ .build();
+ }
+ };
+
+ private ExecutorService messageHandlingExecutor;
+
+ @Activate
+ public void activate() {
+ messageHandlingExecutor = Executors.newFixedThreadPool(
+ MESSAGE_HANDLER_THREAD_POOL_SIZE,
+ groupedThreads("onos/store/packet", "message-handlers"));
+
+ communicationService.<OutboundPacket>addSubscriber(PACKET_OUT_SUBJECT,
+ SERIALIZER::decode,
+ packet -> notifyDelegate(new PacketEvent(Type.EMIT, packet)),
+ messageHandlingExecutor);
+
+ tracker = new PacketRequestTracker();
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ communicationService.removeSubscriber(PACKET_OUT_SUBJECT);
+ messageHandlingExecutor.shutdown();
+ log.info("Stopped");
+ }
+
+ @Override
+ public void emit(OutboundPacket packet) {
+ NodeId myId = clusterService.getLocalNode().id();
+ NodeId master = mastershipService.getMasterFor(packet.sendThrough());
+
+ if (master == null) {
+ return;
+ }
+
+ if (myId.equals(master)) {
+ notifyDelegate(new PacketEvent(Type.EMIT, packet));
+ return;
+ }
+
+ communicationService.unicast(packet, PACKET_OUT_SUBJECT, SERIALIZER::encode, master)
+ .whenComplete((r, error) -> {
+ if (error != null) {
+ log.warn("Failed to send packet-out to {}", master, error);
+ }
+ });
+ }
+
+ @Override
+ public boolean requestPackets(PacketRequest request) {
+ return tracker.add(request);
+ }
+
+ @Override
+ public boolean cancelPackets(PacketRequest request) {
+ return tracker.remove(request);
+ }
+
+ @Override
+ public Set<PacketRequest> existingRequests() {
+ return tracker.requests();
+ }
+
+ private class PacketRequestTracker {
+
+ private ConsistentMap<TrafficSelector, Set<PacketRequest>> requests;
+
+ public PacketRequestTracker() {
+ requests = storageService.<TrafficSelector, Set<PacketRequest>>consistentMapBuilder()
+ .withName("onos-packet-requests")
+ .withPartitionsDisabled()
+ .withSerializer(Serializer.using(KryoNamespaces.API))
+ .build();
+ }
+
+ public boolean add(PacketRequest request) {
+ Versioned<Set<PacketRequest>> old = requests.get(request.selector());
+ if (old != null && old.value().contains(request)) {
+ return false;
+ }
+ // FIXME: add retry logic using a random delay
+ Set<PacketRequest> newSet = new HashSet<>();
+ newSet.add(request);
+ if (old == null) {
+ return requests.putIfAbsent(request.selector(), newSet) == null;
+ }
+ newSet.addAll(old.value());
+ return requests.replace(request.selector(), old.version(), newSet);
+ }
+
+ public boolean remove(PacketRequest request) {
+ Versioned<Set<PacketRequest>> old = requests.get(request.selector());
+ if (old == null || !old.value().contains(request)) {
+ return false;
+ }
+ // FIXME: add retry logic using a random delay
+ Set<PacketRequest> newSet = new HashSet<>(old.value());
+ newSet.remove(request);
+ if (newSet.isEmpty()) {
+ return requests.remove(request.selector(), old.version());
+ }
+ return requests.replace(request.selector(), old.version(), newSet);
+ }
+
+ public Set<PacketRequest> requests() {
+ ImmutableSet.Builder<PacketRequest> builder = ImmutableSet.builder();
+ requests.values().forEach(v -> builder.addAll(v.value()));
+ return builder.build();
+ }
+
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/packet/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/packet/impl/package-info.java
new file mode 100644
index 00000000..43282583
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/packet/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of distributed packet store.
+ */
+package org.onosproject.store.packet.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/proxyarp/impl/DistributedProxyArpStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/proxyarp/impl/DistributedProxyArpStore.java
new file mode 100644
index 00000000..851185b5
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/proxyarp/impl/DistributedProxyArpStore.java
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.proxyarp.impl;
+
+import com.google.common.collect.Maps;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.net.ConnectPoint;
+import org.onosproject.net.Host;
+import org.onosproject.net.HostId;
+import org.onosproject.net.host.HostEvent;
+import org.onosproject.net.host.HostListener;
+import org.onosproject.net.host.HostService;
+import org.onosproject.net.proxyarp.ProxyArpStore;
+import org.onosproject.net.proxyarp.ProxyArpStoreDelegate;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+
+import static org.onlab.util.BoundedThreadPool.newFixedThreadPool;
+import static org.onlab.util.Tools.groupedThreads;
+
+/**
+ * Implementation of proxy ARP distribution mechanism.
+ */
+@Component(immediate = true)
+@Service
+public class DistributedProxyArpStore implements ProxyArpStore {
+
+ private Logger log = LoggerFactory.getLogger(getClass());
+
+ private static final MessageSubject ARP_RESPONSE_MESSAGE =
+ new MessageSubject("onos-arp-response");
+
+ protected final KryoSerializer serializer = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .register(ArpResponseMessage.class)
+ .register(ByteBuffer.class)
+ .build();
+ }
+ };
+
+ private ProxyArpStoreDelegate delegate;
+
+ private Map<HostId, ArpResponseMessage> pendingMessages = Maps.newConcurrentMap();
+
+ private ExecutorService executor =
+ newFixedThreadPool(4, groupedThreads("onos/arp", "sender-%d"));
+
+ private NodeId localNodeId;
+
+ private HostListener hostListener = new InternalHostListener();
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService commService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected HostService hostService;
+
+
+ @Activate
+ protected void activate() {
+ localNodeId = clusterService.getLocalNode().id();
+ hostService.addListener(hostListener);
+ commService.addSubscriber(ARP_RESPONSE_MESSAGE, serializer::decode,
+ this::processArpResponse, executor);
+ log.info("Started");
+ }
+
+ @Deactivate
+ protected void deactivate() {
+ commService.removeSubscriber(ARP_RESPONSE_MESSAGE);
+ hostService.removeListener(hostListener);
+ log.info("Stopped");
+ }
+
+ @Override
+ public void forward(ConnectPoint outPort, Host subject, ByteBuffer packet) {
+ NodeId nodeId = mastershipService.getMasterFor(outPort.deviceId());
+ if (nodeId.equals(localNodeId)) {
+ if (delegate != null) {
+ delegate.emitResponse(outPort, packet);
+ }
+ } else {
+ log.info("Forwarding ARP response from {} to {}", subject.id(), outPort);
+ commService.unicast(new ArpResponseMessage(outPort, subject, packet.array()),
+ ARP_RESPONSE_MESSAGE, serializer::encode, nodeId);
+ }
+ }
+
+ @Override
+ public void setDelegate(ProxyArpStoreDelegate delegate) {
+ this.delegate = delegate;
+ }
+
+ // Processes the incoming ARP response message.
+ private void processArpResponse(ArpResponseMessage msg) {
+ pendingMessages.put(msg.subject.id(), msg);
+ if (hostService.getHost(msg.subject.id()) != null) {
+ checkPendingArps(msg.subject.id());
+ }
+ // FIXME: figure out pruning so stuff does not build up
+ }
+
+ // Checks for pending ARP response message for the specified host.
+ // If one exists, emit response via delegate.
+ private void checkPendingArps(HostId id) {
+ ArpResponseMessage msg = pendingMessages.remove(id);
+ if (msg != null && delegate != null) {
+ log.info("Emitting ARP response from {} to {}", id, msg.outPort);
+ delegate.emitResponse(msg.outPort, ByteBuffer.wrap(msg.packet));
+ }
+ }
+
+ // Message carrying an ARP response.
+ private static class ArpResponseMessage {
+ private ConnectPoint outPort;
+ private Host subject;
+ private byte[] packet;
+
+ public ArpResponseMessage(ConnectPoint outPort, Host subject, byte[] packet) {
+ this.outPort = outPort;
+ this.subject = subject;
+ this.packet = packet;
+ }
+
+ private ArpResponseMessage() {
+ }
+ }
+
+ private class InternalHostListener implements HostListener {
+ @Override
+ public void event(HostEvent event) {
+ checkPendingArps(event.subject().id());
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/ConsistentDeviceResourceStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/ConsistentDeviceResourceStore.java
new file mode 100644
index 00000000..3266e96c
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/ConsistentDeviceResourceStore.java
@@ -0,0 +1,225 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.resource.impl;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.Port;
+import org.onosproject.net.device.DeviceService;
+import org.onosproject.net.intent.IntentId;
+import org.onosproject.net.resource.device.DeviceResourceStore;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.StorageService;
+import org.onosproject.store.service.TransactionContext;
+import org.onosproject.store.service.TransactionalMap;
+import org.onosproject.store.service.Versioned;
+import org.slf4j.Logger;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static org.slf4j.LoggerFactory.getLogger;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Store that manages device resources using Copycat-backed TransactionalMaps.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class ConsistentDeviceResourceStore implements DeviceResourceStore {
+ private final Logger log = getLogger(getClass());
+
+ private static final String PORT_ALLOCATIONS = "PortAllocations";
+ private static final String INTENT_MAPPING = "IntentMapping";
+ private static final String INTENT_ALLOCATIONS = "PortIntentAllocations";
+
+ private static final Serializer SERIALIZER = Serializer.using(KryoNamespaces.API);
+
+ private ConsistentMap<Port, IntentId> portAllocMap;
+ private ConsistentMap<IntentId, Set<Port>> intentAllocMap;
+ private ConsistentMap<IntentId, Set<IntentId>> intentMapping;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected DeviceService deviceService;
+
+ @Activate
+ public void activate() {
+ portAllocMap = storageService.<Port, IntentId>consistentMapBuilder()
+ .withName(PORT_ALLOCATIONS)
+ .withSerializer(SERIALIZER)
+ .build();
+ intentAllocMap = storageService.<IntentId, Set<Port>>consistentMapBuilder()
+ .withName(INTENT_ALLOCATIONS)
+ .withSerializer(SERIALIZER)
+ .build();
+ intentMapping = storageService.<IntentId, Set<IntentId>>consistentMapBuilder()
+ .withName(INTENT_MAPPING)
+ .withSerializer(SERIALIZER)
+ .build();
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ private TransactionalMap<Port, IntentId> getPortAllocs(TransactionContext tx) {
+ return tx.getTransactionalMap(PORT_ALLOCATIONS, SERIALIZER);
+ }
+
+ private TransactionalMap<IntentId, Set<Port>> getIntentAllocs(TransactionContext tx) {
+ return tx.getTransactionalMap(INTENT_ALLOCATIONS, SERIALIZER);
+ }
+
+ private TransactionContext getTxContext() {
+ return storageService.transactionContextBuilder().build();
+ }
+
+ @Override
+ public Set<Port> getFreePorts(DeviceId deviceId) {
+ checkNotNull(deviceId);
+
+ Set<Port> freePorts = new HashSet<>();
+ for (Port port : deviceService.getPorts(deviceId)) {
+ if (!portAllocMap.containsKey(port)) {
+ freePorts.add(port);
+ }
+ }
+
+ return freePorts;
+ }
+
+ @Override
+ public boolean allocatePorts(Set<Port> ports, IntentId intentId) {
+ checkNotNull(ports);
+ checkArgument(ports.size() > 0);
+ checkNotNull(intentId);
+
+ TransactionContext tx = getTxContext();
+ tx.begin();
+ try {
+ TransactionalMap<Port, IntentId> portAllocs = getPortAllocs(tx);
+ for (Port port : ports) {
+ if (portAllocs.putIfAbsent(port, intentId) != null) {
+ throw new Exception("Port already allocated " + port.toString());
+ }
+ }
+
+ TransactionalMap<IntentId, Set<Port>> intentAllocs = getIntentAllocs(tx);
+ intentAllocs.put(intentId, ports);
+ tx.commit();
+ } catch (Exception e) {
+ log.error("Exception thrown, rolling back", e);
+ tx.abort();
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public Set<Port> getAllocations(IntentId intentId) {
+ if (!intentAllocMap.containsKey(intentId)) {
+ Collections.emptySet();
+ }
+
+ return intentAllocMap.get(intentId).value();
+ }
+
+ @Override
+ public IntentId getAllocations(Port port) {
+ if (!portAllocMap.containsKey(port)) {
+ return null;
+ }
+
+ return portAllocMap.get(port).value();
+ }
+
+ @Override
+ public Set<IntentId> getMapping(IntentId intentId) {
+ Versioned<Set<IntentId>> result = intentMapping.get(intentId);
+
+ if (result != null) {
+ return result.value();
+ }
+
+ return null;
+ }
+
+ @Override
+ public boolean allocateMapping(IntentId keyIntentId, IntentId valIntentId) {
+ Versioned<Set<IntentId>> versionedIntents = intentMapping.get(keyIntentId);
+
+ if (versionedIntents == null) {
+ Set<IntentId> newSet = new HashSet<>();
+ newSet.add(valIntentId);
+ intentMapping.put(keyIntentId, newSet);
+ } else {
+ versionedIntents.value().add(valIntentId);
+ }
+
+ return true;
+ }
+
+ @Override
+ public void releaseMapping(IntentId intentId) {
+ for (IntentId intent : intentMapping.keySet()) {
+ // TODO: optimize by checking for identical src & dst
+ Set<IntentId> mapping = intentMapping.get(intent).value();
+ if (mapping.remove(intentId)) {
+ return;
+ }
+ }
+ }
+
+ @Override
+ public boolean releasePorts(IntentId intentId) {
+ checkNotNull(intentId);
+
+ TransactionContext tx = getTxContext();
+ tx.begin();
+ try {
+ TransactionalMap<IntentId, Set<Port>> intentAllocs = getIntentAllocs(tx);
+ Set<Port> ports = intentAllocs.get(intentId);
+ intentAllocs.remove(intentId);
+
+ TransactionalMap<Port, IntentId> portAllocs = getPortAllocs(tx);
+ for (Port port : ports) {
+ portAllocs.remove(port);
+ }
+ tx.commit();
+ } catch (Exception e) {
+ log.error("Exception thrown, rolling back", e);
+ tx.abort();
+ return false;
+ }
+
+ return true;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/ConsistentLinkResourceStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/ConsistentLinkResourceStore.java
new file mode 100644
index 00000000..ce25f868
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/ConsistentLinkResourceStore.java
@@ -0,0 +1,503 @@
+package org.onosproject.store.resource.impl;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.onlab.util.Bandwidth;
+import org.onosproject.net.OmsPort;
+import org.onosproject.net.device.DeviceService;
+import org.slf4j.Logger;
+import org.onlab.util.PositionalParameterStringFormatter;
+import org.onosproject.net.Link;
+import org.onosproject.net.LinkKey;
+import org.onosproject.net.Port;
+import org.onosproject.net.intent.IntentId;
+import org.onosproject.net.link.LinkService;
+import org.onosproject.net.resource.link.BandwidthResource;
+import org.onosproject.net.resource.link.BandwidthResourceAllocation;
+import org.onosproject.net.resource.link.LambdaResource;
+import org.onosproject.net.resource.link.LambdaResourceAllocation;
+import org.onosproject.net.resource.link.LinkResourceAllocations;
+import org.onosproject.net.resource.link.LinkResourceEvent;
+import org.onosproject.net.resource.link.LinkResourceStore;
+import org.onosproject.net.resource.link.LinkResourceStoreDelegate;
+import org.onosproject.net.resource.link.MplsLabel;
+import org.onosproject.net.resource.link.MplsLabelResourceAllocation;
+import org.onosproject.net.resource.ResourceAllocation;
+import org.onosproject.net.resource.ResourceAllocationException;
+import org.onosproject.net.resource.ResourceType;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.ConsistentMap;
+import org.onosproject.store.service.Serializer;
+import org.onosproject.store.service.StorageService;
+import org.onosproject.store.service.TransactionContext;
+import org.onosproject.store.service.TransactionException;
+import org.onosproject.store.service.TransactionalMap;
+import org.onosproject.store.service.Versioned;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkState;
+import static org.slf4j.LoggerFactory.getLogger;
+import static org.onosproject.net.AnnotationKeys.BANDWIDTH;
+
+/**
+ * Store that manages link resources using Copycat-backed TransactionalMaps.
+ */
+@Component(immediate = true, enabled = true)
+@Service
+public class ConsistentLinkResourceStore extends
+ AbstractStore<LinkResourceEvent, LinkResourceStoreDelegate> implements
+ LinkResourceStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private static final BandwidthResource DEFAULT_BANDWIDTH = new BandwidthResource(Bandwidth.mbps(1_000));
+ private static final BandwidthResource EMPTY_BW = new BandwidthResource(Bandwidth.bps(0));
+
+ // Smallest non-reserved MPLS label
+ private static final int MIN_UNRESERVED_LABEL = 0x10;
+ // Max non-reserved MPLS label = 239
+ private static final int MAX_UNRESERVED_LABEL = 0xEF;
+
+ // table to store current allocations
+ /** LinkKey -> List<LinkResourceAllocations>. */
+ private static final String LINK_RESOURCE_ALLOCATIONS = "LinkAllocations";
+
+ /** IntentId -> LinkResourceAllocations. */
+ private static final String INTENT_ALLOCATIONS = "LinkIntentAllocations";
+
+ private static final Serializer SERIALIZER = Serializer.using(KryoNamespaces.API);
+
+ // for reading committed values.
+ private ConsistentMap<IntentId, LinkResourceAllocations> intentAllocMap;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected LinkService linkService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected DeviceService deviceService;
+
+ @Activate
+ public void activate() {
+ intentAllocMap = storageService.<IntentId, LinkResourceAllocations>consistentMapBuilder()
+ .withName(INTENT_ALLOCATIONS)
+ .withSerializer(SERIALIZER)
+ .build();
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ private TransactionalMap<IntentId, LinkResourceAllocations> getIntentAllocs(TransactionContext tx) {
+ return tx.getTransactionalMap(INTENT_ALLOCATIONS, SERIALIZER);
+ }
+
+ private TransactionalMap<LinkKey, List<LinkResourceAllocations>> getLinkAllocs(TransactionContext tx) {
+ return tx.getTransactionalMap(LINK_RESOURCE_ALLOCATIONS, SERIALIZER);
+ }
+
+ private TransactionContext getTxContext() {
+ return storageService.transactionContextBuilder().build();
+ }
+
+ private Set<? extends ResourceAllocation> getResourceCapacity(ResourceType type, Link link) {
+ if (type == ResourceType.BANDWIDTH) {
+ return ImmutableSet.of(getBandwidthResourceCapacity(link));
+ }
+ if (type == ResourceType.LAMBDA) {
+ return getLambdaResourceCapacity(link);
+ }
+ if (type == ResourceType.MPLS_LABEL) {
+ return getMplsResourceCapacity();
+ }
+ return ImmutableSet.of();
+ }
+
+ private Set<LambdaResourceAllocation> getLambdaResourceCapacity(Link link) {
+ Set<LambdaResourceAllocation> allocations = new HashSet<>();
+ Port port = deviceService.getPort(link.src().deviceId(), link.src().port());
+ if (port instanceof OmsPort) {
+ OmsPort omsPort = (OmsPort) port;
+
+ // Assume fixed grid for now
+ for (int i = 0; i < omsPort.totalChannels(); i++) {
+ allocations.add(new LambdaResourceAllocation(LambdaResource.valueOf(i)));
+ }
+ }
+ return allocations;
+ }
+
+ private BandwidthResourceAllocation getBandwidthResourceCapacity(Link link) {
+
+ // if Link annotation exist, use them
+ // if all fails, use DEFAULT_BANDWIDTH
+ BandwidthResource bandwidth = null;
+ String strBw = link.annotations().value(BANDWIDTH);
+ if (strBw != null) {
+ try {
+ bandwidth = new BandwidthResource(Bandwidth.mbps(Double.parseDouble(strBw)));
+ } catch (NumberFormatException e) {
+ // do nothings
+ bandwidth = null;
+ }
+ }
+
+ if (bandwidth == null) {
+ // fall back, use fixed default
+ bandwidth = DEFAULT_BANDWIDTH;
+ }
+ return new BandwidthResourceAllocation(bandwidth);
+ }
+
+ private Set<MplsLabelResourceAllocation> getMplsResourceCapacity() {
+ Set<MplsLabelResourceAllocation> allocations = new HashSet<>();
+ //Ignoring reserved labels of 0 through 15
+ for (int i = MIN_UNRESERVED_LABEL; i <= MAX_UNRESERVED_LABEL; i++) {
+ allocations.add(new MplsLabelResourceAllocation(MplsLabel
+ .valueOf(i)));
+
+ }
+ return allocations;
+ }
+
+ private Map<ResourceType, Set<? extends ResourceAllocation>> getResourceCapacity(Link link) {
+ Map<ResourceType, Set<? extends ResourceAllocation>> caps = new HashMap<>();
+ for (ResourceType type : ResourceType.values()) {
+ Set<? extends ResourceAllocation> cap = getResourceCapacity(type, link);
+ if (cap != null) {
+ caps.put(type, cap);
+ }
+ }
+ return caps;
+ }
+
+ @Override
+ public Set<ResourceAllocation> getFreeResources(Link link) {
+ TransactionContext tx = getTxContext();
+
+ tx.begin();
+ try {
+ Map<ResourceType, Set<? extends ResourceAllocation>> freeResources = getFreeResourcesEx(tx, link);
+ Set<ResourceAllocation> allFree = new HashSet<>();
+ freeResources.values().forEach(allFree::addAll);
+ return allFree;
+ } finally {
+ tx.abort();
+ }
+ }
+
+ private Map<ResourceType, Set<? extends ResourceAllocation>> getFreeResourcesEx(TransactionContext tx, Link link) {
+ checkNotNull(tx);
+ checkNotNull(link);
+
+ Map<ResourceType, Set<? extends ResourceAllocation>> free = new HashMap<>();
+ final Map<ResourceType, Set<? extends ResourceAllocation>> caps = getResourceCapacity(link);
+ final Iterable<LinkResourceAllocations> allocations = getAllocations(tx, link);
+
+ for (ResourceType type : ResourceType.values()) {
+ // there should be class/category of resources
+
+ switch (type) {
+ case BANDWIDTH:
+ Set<? extends ResourceAllocation> bw = caps.get(type);
+ if (bw == null || bw.isEmpty()) {
+ bw = Sets.newHashSet(new BandwidthResourceAllocation(EMPTY_BW));
+ }
+
+ BandwidthResourceAllocation cap = (BandwidthResourceAllocation) bw.iterator().next();
+ double freeBw = cap.bandwidth().toDouble();
+
+ // enumerate current allocations, subtracting resources
+ for (LinkResourceAllocations alloc : allocations) {
+ Set<ResourceAllocation> types = alloc.getResourceAllocation(link);
+ for (ResourceAllocation a : types) {
+ if (a instanceof BandwidthResourceAllocation) {
+ BandwidthResourceAllocation bwA = (BandwidthResourceAllocation) a;
+ freeBw -= bwA.bandwidth().toDouble();
+ }
+ }
+ }
+
+ free.put(type, Sets.newHashSet(
+ new BandwidthResourceAllocation(new BandwidthResource(Bandwidth.bps(freeBw)))));
+ break;
+ case LAMBDA:
+ Set<? extends ResourceAllocation> lmd = caps.get(type);
+ if (lmd == null || lmd.isEmpty()) {
+ // nothing left
+ break;
+ }
+ Set<LambdaResourceAllocation> freeL = new HashSet<>();
+ for (ResourceAllocation r : lmd) {
+ if (r instanceof LambdaResourceAllocation) {
+ freeL.add((LambdaResourceAllocation) r);
+ }
+ }
+
+ // enumerate current allocations, removing resources
+ for (LinkResourceAllocations alloc : allocations) {
+ Set<ResourceAllocation> types = alloc.getResourceAllocation(link);
+ for (ResourceAllocation a : types) {
+ if (a instanceof LambdaResourceAllocation) {
+ freeL.remove(a);
+ }
+ }
+ }
+
+ free.put(type, freeL);
+ break;
+ case MPLS_LABEL:
+ Set<? extends ResourceAllocation> mpls = caps.get(type);
+ if (mpls == null || mpls.isEmpty()) {
+ // nothing left
+ break;
+ }
+ Set<MplsLabelResourceAllocation> freeLabel = new HashSet<>();
+ for (ResourceAllocation r : mpls) {
+ if (r instanceof MplsLabelResourceAllocation) {
+ freeLabel.add((MplsLabelResourceAllocation) r);
+ }
+ }
+
+ // enumerate current allocations, removing resources
+ for (LinkResourceAllocations alloc : allocations) {
+ Set<ResourceAllocation> types = alloc.getResourceAllocation(link);
+ for (ResourceAllocation a : types) {
+ if (a instanceof MplsLabelResourceAllocation) {
+ freeLabel.remove(a);
+ }
+ }
+ }
+
+ free.put(type, freeLabel);
+ break;
+ default:
+ log.debug("unsupported ResourceType {}", type);
+ break;
+ }
+ }
+ return free;
+ }
+
+ @Override
+ public void allocateResources(LinkResourceAllocations allocations) {
+ checkNotNull(allocations);
+ TransactionContext tx = getTxContext();
+
+ tx.begin();
+ try {
+ TransactionalMap<IntentId, LinkResourceAllocations> intentAllocs = getIntentAllocs(tx);
+ intentAllocs.put(allocations.intentId(), allocations);
+ allocations.links().forEach(link -> allocateLinkResource(tx, link, allocations));
+ tx.commit();
+ } catch (Exception e) {
+ log.error("Exception thrown, rolling back", e);
+ tx.abort();
+ throw e;
+ }
+ }
+
+ private void allocateLinkResource(TransactionContext tx, Link link,
+ LinkResourceAllocations allocations) {
+ // requested resources
+ Set<ResourceAllocation> reqs = allocations.getResourceAllocation(link);
+ Map<ResourceType, Set<? extends ResourceAllocation>> available = getFreeResourcesEx(tx, link);
+ for (ResourceAllocation req : reqs) {
+ Set<? extends ResourceAllocation> avail = available.get(req.type());
+ if (req instanceof BandwidthResourceAllocation) {
+ // check if allocation should be accepted
+ if (avail.isEmpty()) {
+ checkState(!avail.isEmpty(),
+ "There's no Bandwidth resource on %s?",
+ link);
+ }
+ BandwidthResourceAllocation bw = (BandwidthResourceAllocation) avail.iterator().next();
+ double bwLeft = bw.bandwidth().toDouble();
+ BandwidthResourceAllocation bwReq = ((BandwidthResourceAllocation) req);
+ bwLeft -= bwReq.bandwidth().toDouble();
+ if (bwLeft < 0) {
+ throw new ResourceAllocationException(
+ PositionalParameterStringFormatter.format(
+ "Unable to allocate bandwidth for link {} "
+ + " requested amount is {} current allocation is {}",
+ link,
+ bwReq.bandwidth().toDouble(),
+ bw));
+ }
+ } else if (req instanceof LambdaResourceAllocation) {
+ LambdaResourceAllocation lambdaAllocation = (LambdaResourceAllocation) req;
+ // check if allocation should be accepted
+ if (!avail.contains(req)) {
+ // requested lambda was not available
+ throw new ResourceAllocationException(
+ PositionalParameterStringFormatter.format(
+ "Unable to allocate lambda for link {} lambda is {}",
+ link,
+ lambdaAllocation.lambda().toInt()));
+ }
+ } else if (req instanceof MplsLabelResourceAllocation) {
+ MplsLabelResourceAllocation mplsAllocation = (MplsLabelResourceAllocation) req;
+ if (!avail.contains(req)) {
+ throw new ResourceAllocationException(
+ PositionalParameterStringFormatter
+ .format("Unable to allocate MPLS label for link "
+ + "{} MPLS label is {}",
+ link,
+ mplsAllocation
+ .mplsLabel()
+ .toString()));
+ }
+ }
+ }
+ // all requests allocatable => add allocation
+ final LinkKey linkKey = LinkKey.linkKey(link);
+ TransactionalMap<LinkKey, List<LinkResourceAllocations>> linkAllocs = getLinkAllocs(tx);
+ List<LinkResourceAllocations> before = linkAllocs.get(linkKey);
+ if (before == null) {
+ List<LinkResourceAllocations> after = new ArrayList<>();
+ after.add(allocations);
+ before = linkAllocs.putIfAbsent(linkKey, after);
+ if (before != null) {
+ // concurrent allocation detected, retry transaction : is this needed?
+ log.warn("Concurrent Allocation, retrying");
+ throw new TransactionException();
+ }
+ } else {
+ List<LinkResourceAllocations> after = new ArrayList<>(before.size() + 1);
+ after.addAll(before);
+ after.add(allocations);
+ linkAllocs.replace(linkKey, before, after);
+ }
+ }
+
+ @Override
+ public LinkResourceEvent releaseResources(LinkResourceAllocations allocations) {
+ checkNotNull(allocations);
+
+ final IntentId intentId = allocations.intentId();
+ final Collection<Link> links = allocations.links();
+ boolean success = false;
+ do {
+ TransactionContext tx = getTxContext();
+ tx.begin();
+ try {
+ TransactionalMap<IntentId, LinkResourceAllocations> intentAllocs = getIntentAllocs(tx);
+ intentAllocs.remove(intentId);
+
+ TransactionalMap<LinkKey, List<LinkResourceAllocations>> linkAllocs = getLinkAllocs(tx);
+ links.forEach(link -> {
+ final LinkKey linkId = LinkKey.linkKey(link);
+
+ List<LinkResourceAllocations> before = linkAllocs.get(linkId);
+ if (before == null || before.isEmpty()) {
+ // something is wrong, but it is already freed
+ log.warn("There was no resource left to release on {}", linkId);
+ return;
+ }
+ List<LinkResourceAllocations> after = new ArrayList<>(before);
+ after.remove(allocations);
+ linkAllocs.replace(linkId, before, after);
+ });
+ tx.commit();
+ success = true;
+ } catch (TransactionException e) {
+ log.debug("Transaction failed, retrying", e);
+ tx.abort();
+ } catch (Exception e) {
+ log.error("Exception thrown during releaseResource {}", allocations, e);
+ tx.abort();
+ throw e;
+ }
+ } while (!success);
+
+ // Issue events to force recompilation of intents.
+ final List<LinkResourceAllocations> releasedResources = ImmutableList.of(allocations);
+ return new LinkResourceEvent(
+ LinkResourceEvent.Type.ADDITIONAL_RESOURCES_AVAILABLE,
+ releasedResources);
+
+ }
+
+ @Override
+ public LinkResourceAllocations getAllocations(IntentId intentId) {
+ checkNotNull(intentId);
+ Versioned<LinkResourceAllocations> alloc = null;
+ try {
+ alloc = intentAllocMap.get(intentId);
+ } catch (Exception e) {
+ log.warn("Could not read resource allocation information", e);
+ }
+ return alloc == null ? null : alloc.value();
+ }
+
+ @Override
+ public Iterable<LinkResourceAllocations> getAllocations(Link link) {
+ checkNotNull(link);
+ TransactionContext tx = getTxContext();
+ Iterable<LinkResourceAllocations> res = null;
+ tx.begin();
+ try {
+ res = getAllocations(tx, link);
+ } finally {
+ tx.abort();
+ }
+ return res == null ? Collections.emptyList() : res;
+ }
+
+ @Override
+ public Iterable<LinkResourceAllocations> getAllocations() {
+ try {
+ Set<LinkResourceAllocations> allocs =
+ intentAllocMap.values().stream().map(Versioned::value).collect(Collectors.toSet());
+ return ImmutableSet.copyOf(allocs);
+ } catch (Exception e) {
+ log.warn("Could not read resource allocation information", e);
+ }
+ return ImmutableSet.of();
+ }
+
+ private Iterable<LinkResourceAllocations> getAllocations(TransactionContext tx, Link link) {
+ checkNotNull(tx);
+ checkNotNull(link);
+ final LinkKey key = LinkKey.linkKey(link);
+ TransactionalMap<LinkKey, List<LinkResourceAllocations>> linkAllocs = getLinkAllocs(tx);
+ List<LinkResourceAllocations> res = null;
+
+ res = linkAllocs.get(key);
+ if (res == null) {
+ res = linkAllocs.putIfAbsent(key, new ArrayList<>());
+
+ if (res == null) {
+ return Collections.emptyList();
+ } else {
+ return res;
+ }
+ }
+ return res;
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/package-info.java
new file mode 100644
index 00000000..7c30018d
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/resource/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of distributed packet store.
+ */
+package org.onosproject.store.resource.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/ClusterMessageSerializer.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/ClusterMessageSerializer.java
new file mode 100644
index 00000000..76bf7984
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/ClusterMessageSerializer.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.serializers.custom;
+
+import org.onosproject.cluster.NodeId;
+import org.onosproject.store.cluster.messaging.ClusterMessage;
+import org.onosproject.store.cluster.messaging.MessageSubject;
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+public final class ClusterMessageSerializer extends Serializer<ClusterMessage> {
+
+ /**
+ * Creates a serializer for {@link ClusterMessage}.
+ */
+ public ClusterMessageSerializer() {
+ // does not accept null
+ super(false);
+ }
+
+ @Override
+ public void write(Kryo kryo, Output output, ClusterMessage message) {
+ kryo.writeClassAndObject(output, message.sender());
+ kryo.writeClassAndObject(output, message.subject());
+ output.writeInt(message.payload().length);
+ output.writeBytes(message.payload());
+ }
+
+ @Override
+ public ClusterMessage read(Kryo kryo, Input input,
+ Class<ClusterMessage> type) {
+ NodeId sender = (NodeId) kryo.readClassAndObject(input);
+ MessageSubject subject = (MessageSubject) kryo.readClassAndObject(input);
+ int payloadSize = input.readInt();
+ byte[] payload = input.readBytes(payloadSize);
+ return new ClusterMessage(sender, subject, payload);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/DistributedStoreSerializers.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/DistributedStoreSerializers.java
new file mode 100644
index 00000000..5465b9b4
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/DistributedStoreSerializers.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.serializers.custom;
+
+import org.onosproject.store.impl.MastershipBasedTimestamp;
+import org.onosproject.store.impl.Timestamped;
+import org.onosproject.store.service.WallClockTimestamp;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onlab.util.KryoNamespace;
+
+public final class DistributedStoreSerializers {
+
+
+ public static final int STORE_CUSTOM_BEGIN = KryoNamespaces.BEGIN_USER_CUSTOM_ID + 10;
+
+ /**
+ * KryoNamespace which can serialize ON.lab misc classes.
+ */
+ public static final KryoNamespace STORE_COMMON = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .nextId(KryoNamespaces.BEGIN_USER_CUSTOM_ID)
+ .register(Timestamped.class)
+ .register(new MastershipBasedTimestampSerializer(), MastershipBasedTimestamp.class)
+ .register(WallClockTimestamp.class)
+ .build();
+
+ // avoid instantiation
+ private DistributedStoreSerializers() {}
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/MastershipBasedTimestampSerializer.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/MastershipBasedTimestampSerializer.java
new file mode 100644
index 00000000..eb1b2b55
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/MastershipBasedTimestampSerializer.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.serializers.custom;
+
+import org.onosproject.store.impl.MastershipBasedTimestamp;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+// To be used if Timestamp ever needs to cross bundle boundary.
+/**
+ * Kryo Serializer for {@link MastershipBasedTimestamp}.
+ */
+public class MastershipBasedTimestampSerializer extends Serializer<MastershipBasedTimestamp> {
+
+ /**
+ * Creates a serializer for {@link MastershipBasedTimestamp}.
+ */
+ public MastershipBasedTimestampSerializer() {
+ // non-null, immutable
+ super(false, true);
+ }
+
+ @Override
+ public void write(Kryo kryo, Output output, MastershipBasedTimestamp object) {
+ output.writeLong(object.termNumber());
+ output.writeLong(object.sequenceNumber());
+ }
+
+ @Override
+ public MastershipBasedTimestamp read(Kryo kryo, Input input, Class<MastershipBasedTimestamp> type) {
+ final long term = input.readLong();
+ final long sequence = input.readLong();
+ return new MastershipBasedTimestamp(term, sequence);
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/MessageSubjectSerializer.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/MessageSubjectSerializer.java
new file mode 100644
index 00000000..7ddee1b9
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/MessageSubjectSerializer.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.serializers.custom;
+
+import org.onosproject.store.cluster.messaging.MessageSubject;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+public final class MessageSubjectSerializer extends Serializer<MessageSubject> {
+
+ /**
+ * Creates a serializer for {@link MessageSubject}.
+ */
+ public MessageSubjectSerializer() {
+ // non-null, immutable
+ super(false, true);
+ }
+
+
+ @Override
+ public void write(Kryo kryo, Output output, MessageSubject object) {
+ output.writeString(object.value());
+ }
+
+ @Override
+ public MessageSubject read(Kryo kryo, Input input,
+ Class<MessageSubject> type) {
+ return new MessageSubject(input.readString());
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/package-info.java
new file mode 100644
index 00000000..5cd4bee6
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/serializers/custom/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Cluster messaging and distributed store serializers.
+ */
+//FIXME what is the right name for this package?
+//FIXME can this be moved to onos-core-serializers?
+package org.onosproject.store.serializers.custom;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/DistributedStatisticStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/DistributedStatisticStore.java
new file mode 100644
index 00000000..d5434730
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/DistributedStatisticStore.java
@@ -0,0 +1,317 @@
+/*
+ * Copyright 2014-2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.statistic.impl;
+
+import com.google.common.collect.Sets;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onlab.util.Tools;
+import org.onosproject.cluster.ClusterService;
+import org.onosproject.cluster.NodeId;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.net.ConnectPoint;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.PortNumber;
+import org.onosproject.net.flow.FlowEntry;
+import org.onosproject.net.flow.FlowRule;
+import org.onosproject.net.flow.instructions.Instruction;
+import org.onosproject.net.flow.instructions.Instructions;
+import org.onosproject.net.statistic.StatisticStore;
+import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.serializers.KryoSerializer;
+import org.slf4j.Logger;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.onlab.util.Tools.groupedThreads;
+import static org.onosproject.store.statistic.impl.StatisticStoreMessageSubjects.GET_CURRENT;
+import static org.onosproject.store.statistic.impl.StatisticStoreMessageSubjects.GET_PREVIOUS;
+import static org.slf4j.LoggerFactory.getLogger;
+
+
+/**
+ * Maintains statistics using RPC calls to collect stats from remote instances
+ * on demand.
+ */
+@Component(immediate = true)
+@Service
+public class DistributedStatisticStore implements StatisticStore {
+
+ private final Logger log = getLogger(getClass());
+
+ // TODO: Make configurable.
+ private static final int MESSAGE_HANDLER_THREAD_POOL_SIZE = 4;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterCommunicationService clusterCommunicator;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClusterService clusterService;
+
+ private Map<ConnectPoint, InternalStatisticRepresentation> representations =
+ new ConcurrentHashMap<>();
+
+ private Map<ConnectPoint, Set<FlowEntry>> previous =
+ new ConcurrentHashMap<>();
+
+ private Map<ConnectPoint, Set<FlowEntry>> current =
+ new ConcurrentHashMap<>();
+
+ protected static final KryoSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .nextId(KryoNamespaces.BEGIN_USER_CUSTOM_ID)
+ // register this store specific classes here
+ .build();
+ }
+ };;
+
+ private ExecutorService messageHandlingExecutor;
+
+ private static final long STATISTIC_STORE_TIMEOUT_MILLIS = 3000;
+
+ @Activate
+ public void activate() {
+
+ messageHandlingExecutor = Executors.newFixedThreadPool(
+ MESSAGE_HANDLER_THREAD_POOL_SIZE,
+ groupedThreads("onos/store/statistic", "message-handlers"));
+
+ clusterCommunicator.<ConnectPoint, Set<FlowEntry>>addSubscriber(GET_CURRENT,
+ SERIALIZER::decode,
+ this::getCurrentStatisticInternal,
+ SERIALIZER::encode,
+ messageHandlingExecutor);
+
+ clusterCommunicator.<ConnectPoint, Set<FlowEntry>>addSubscriber(GET_PREVIOUS,
+ SERIALIZER::decode,
+ this::getPreviousStatisticInternal,
+ SERIALIZER::encode,
+ messageHandlingExecutor);
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ clusterCommunicator.removeSubscriber(GET_PREVIOUS);
+ clusterCommunicator.removeSubscriber(GET_CURRENT);
+ messageHandlingExecutor.shutdown();
+ log.info("Stopped");
+ }
+
+ @Override
+ public void prepareForStatistics(FlowRule rule) {
+ ConnectPoint cp = buildConnectPoint(rule);
+ if (cp == null) {
+ return;
+ }
+ InternalStatisticRepresentation rep;
+ synchronized (representations) {
+ rep = getOrCreateRepresentation(cp);
+ }
+ rep.prepare();
+ }
+
+ @Override
+ public synchronized void removeFromStatistics(FlowRule rule) {
+ ConnectPoint cp = buildConnectPoint(rule);
+ if (cp == null) {
+ return;
+ }
+ InternalStatisticRepresentation rep = representations.get(cp);
+ if (rep != null && rep.remove(rule)) {
+ updatePublishedStats(cp, Collections.emptySet());
+ }
+ Set<FlowEntry> values = current.get(cp);
+ if (values != null) {
+ values.remove(rule);
+ }
+ values = previous.get(cp);
+ if (values != null) {
+ values.remove(rule);
+ }
+
+ }
+
+ @Override
+ public void addOrUpdateStatistic(FlowEntry rule) {
+ ConnectPoint cp = buildConnectPoint(rule);
+ if (cp == null) {
+ return;
+ }
+ InternalStatisticRepresentation rep = representations.get(cp);
+ if (rep != null && rep.submit(rule)) {
+ updatePublishedStats(cp, rep.get());
+ }
+ }
+
+ private synchronized void updatePublishedStats(ConnectPoint cp,
+ Set<FlowEntry> flowEntries) {
+ Set<FlowEntry> curr = current.get(cp);
+ if (curr == null) {
+ curr = new HashSet<>();
+ }
+ previous.put(cp, curr);
+ current.put(cp, flowEntries);
+
+ }
+
+ @Override
+ public Set<FlowEntry> getCurrentStatistic(ConnectPoint connectPoint) {
+ final DeviceId deviceId = connectPoint.deviceId();
+ NodeId master = mastershipService.getMasterFor(deviceId);
+ if (master == null) {
+ log.warn("No master for {}", deviceId);
+ return Collections.emptySet();
+ }
+ if (master.equals(clusterService.getLocalNode().id())) {
+ return getCurrentStatisticInternal(connectPoint);
+ } else {
+ return Tools.futureGetOrElse(clusterCommunicator.sendAndReceive(
+ connectPoint,
+ GET_CURRENT,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ master),
+ STATISTIC_STORE_TIMEOUT_MILLIS,
+ TimeUnit.MILLISECONDS,
+ Collections.emptySet());
+ }
+
+ }
+
+ private synchronized Set<FlowEntry> getCurrentStatisticInternal(ConnectPoint connectPoint) {
+ return current.get(connectPoint);
+ }
+
+ @Override
+ public Set<FlowEntry> getPreviousStatistic(ConnectPoint connectPoint) {
+ final DeviceId deviceId = connectPoint.deviceId();
+ NodeId master = mastershipService.getMasterFor(deviceId);
+ if (master == null) {
+ log.warn("No master for {}", deviceId);
+ return Collections.emptySet();
+ }
+ if (master.equals(clusterService.getLocalNode().id())) {
+ return getPreviousStatisticInternal(connectPoint);
+ } else {
+ return Tools.futureGetOrElse(clusterCommunicator.sendAndReceive(
+ connectPoint,
+ GET_PREVIOUS,
+ SERIALIZER::encode,
+ SERIALIZER::decode,
+ master),
+ STATISTIC_STORE_TIMEOUT_MILLIS,
+ TimeUnit.MILLISECONDS,
+ Collections.emptySet());
+ }
+ }
+
+ private synchronized Set<FlowEntry> getPreviousStatisticInternal(ConnectPoint connectPoint) {
+ return previous.get(connectPoint);
+ }
+
+ private InternalStatisticRepresentation getOrCreateRepresentation(ConnectPoint cp) {
+
+ if (representations.containsKey(cp)) {
+ return representations.get(cp);
+ } else {
+ InternalStatisticRepresentation rep = new InternalStatisticRepresentation();
+ representations.put(cp, rep);
+ return rep;
+ }
+
+ }
+
+ private ConnectPoint buildConnectPoint(FlowRule rule) {
+ PortNumber port = getOutput(rule);
+
+ if (port == null) {
+ return null;
+ }
+ ConnectPoint cp = new ConnectPoint(rule.deviceId(), port);
+ return cp;
+ }
+
+ private PortNumber getOutput(FlowRule rule) {
+ for (Instruction i : rule.treatment().allInstructions()) {
+ if (i.type() == Instruction.Type.OUTPUT) {
+ Instructions.OutputInstruction out = (Instructions.OutputInstruction) i;
+ return out.port();
+ }
+ if (i.type() == Instruction.Type.DROP) {
+ return PortNumber.P0;
+ }
+ }
+ return null;
+ }
+
+ private class InternalStatisticRepresentation {
+
+ private final AtomicInteger counter = new AtomicInteger(0);
+ private final Set<FlowEntry> rules = new HashSet<>();
+
+ public void prepare() {
+ counter.incrementAndGet();
+ }
+
+ public synchronized boolean remove(FlowRule rule) {
+ rules.remove(rule);
+ return counter.decrementAndGet() == 0;
+ }
+
+ public synchronized boolean submit(FlowEntry rule) {
+ if (rules.contains(rule)) {
+ rules.remove(rule);
+ }
+ rules.add(rule);
+ if (counter.get() == 0) {
+ return true;
+ } else {
+ return counter.decrementAndGet() == 0;
+ }
+ }
+
+ public synchronized Set<FlowEntry> get() {
+ counter.set(rules.size());
+ return Sets.newHashSet(rules);
+ }
+
+
+ }
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/StatisticStoreMessageSubjects.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/StatisticStoreMessageSubjects.java
new file mode 100644
index 00000000..cc03c302
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/StatisticStoreMessageSubjects.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.statistic.impl;
+
+import org.onosproject.store.cluster.messaging.MessageSubject;
+
+/**
+ * MessageSubjects used by DistributedStatisticStore peer-peer communication.
+ */
+public final class StatisticStoreMessageSubjects {
+ private StatisticStoreMessageSubjects() {}
+ public static final MessageSubject GET_CURRENT =
+ new MessageSubject("peer-return-current");
+ public static final MessageSubject GET_PREVIOUS =
+ new MessageSubject("peer-return-previous");
+
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/package-info.java
new file mode 100644
index 00000000..49436a9f
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/statistic/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of the statistic store.
+ */
+package org.onosproject.store.statistic.impl;
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/DistributedTopologyStore.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/DistributedTopologyStore.java
new file mode 100644
index 00000000..487fad9b
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/DistributedTopologyStore.java
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.topology.impl;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static org.onlab.util.Tools.isNullOrEmpty;
+import static org.onosproject.net.topology.TopologyEvent.Type.TOPOLOGY_CHANGED;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.util.KryoNamespace;
+import org.onosproject.common.DefaultTopology;
+import org.onosproject.event.Event;
+import org.onosproject.mastership.MastershipService;
+import org.onosproject.net.ConnectPoint;
+import org.onosproject.net.Device;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.Link;
+import org.onosproject.net.Path;
+import org.onosproject.net.provider.ProviderId;
+import org.onosproject.net.topology.ClusterId;
+import org.onosproject.net.topology.DefaultGraphDescription;
+import org.onosproject.net.topology.GraphDescription;
+import org.onosproject.net.topology.LinkWeight;
+import org.onosproject.net.topology.Topology;
+import org.onosproject.net.topology.TopologyCluster;
+import org.onosproject.net.topology.TopologyEvent;
+import org.onosproject.net.topology.TopologyGraph;
+import org.onosproject.net.topology.TopologyStore;
+import org.onosproject.net.topology.TopologyStoreDelegate;
+import org.onosproject.store.AbstractStore;
+import org.onosproject.store.serializers.KryoNamespaces;
+import org.onosproject.store.service.EventuallyConsistentMap;
+import org.onosproject.store.service.EventuallyConsistentMapEvent;
+import org.onosproject.store.service.EventuallyConsistentMapListener;
+import org.onosproject.store.service.LogicalClockService;
+import org.onosproject.store.service.StorageService;
+import org.slf4j.Logger;
+
+/**
+ * Manages inventory of topology snapshots using trivial in-memory
+ * structures implementation.
+ * <p>
+ * Note: This component is not distributed per-se. It runs on every
+ * instance and feeds off of other distributed stores.
+ */
+@Component(immediate = true)
+@Service
+public class DistributedTopologyStore
+ extends AbstractStore<TopologyEvent, TopologyStoreDelegate>
+ implements TopologyStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private volatile DefaultTopology current =
+ new DefaultTopology(ProviderId.NONE,
+ new DefaultGraphDescription(0L, System.currentTimeMillis(),
+ Collections.<Device>emptyList(),
+ Collections.<Link>emptyList()));
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StorageService storageService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected LogicalClockService clockService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ // Cluster root to broadcast points bindings to allow convergence to
+ // a shared broadcast tree; node that is the master of the cluster root
+ // is the primary.
+ private EventuallyConsistentMap<DeviceId, Set<ConnectPoint>> broadcastPoints;
+
+ private EventuallyConsistentMapListener<DeviceId, Set<ConnectPoint>> listener =
+ new InternalBroadcastPointListener();
+
+ @Activate
+ public void activate() {
+ KryoNamespace.Builder hostSerializer = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API);
+
+ broadcastPoints = storageService.<DeviceId, Set<ConnectPoint>>eventuallyConsistentMapBuilder()
+ .withName("onos-broadcast-trees")
+ .withSerializer(hostSerializer)
+ .withTimestampProvider((k, v) -> clockService.getTimestamp())
+ .build();
+ broadcastPoints.addListener(listener);
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ broadcastPoints.removeListener(listener);
+ broadcastPoints.destroy();
+ log.info("Stopped");
+ }
+
+ @Override
+ public Topology currentTopology() {
+ return current;
+ }
+
+ @Override
+ public boolean isLatest(Topology topology) {
+ // Topology is current only if it is the same as our current topology
+ return topology == current;
+ }
+
+ @Override
+ public TopologyGraph getGraph(Topology topology) {
+ return defaultTopology(topology).getGraph();
+ }
+
+ @Override
+ public Set<TopologyCluster> getClusters(Topology topology) {
+ return defaultTopology(topology).getClusters();
+ }
+
+ @Override
+ public TopologyCluster getCluster(Topology topology, ClusterId clusterId) {
+ return defaultTopology(topology).getCluster(clusterId);
+ }
+
+ @Override
+ public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) {
+ return defaultTopology(topology).getClusterDevices(cluster);
+ }
+
+ @Override
+ public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) {
+ return defaultTopology(topology).getClusterLinks(cluster);
+ }
+
+ @Override
+ public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) {
+ return defaultTopology(topology).getPaths(src, dst);
+ }
+
+ @Override
+ public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst,
+ LinkWeight weight) {
+ return defaultTopology(topology).getPaths(src, dst, weight);
+ }
+
+ @Override
+ public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) {
+ return defaultTopology(topology).isInfrastructure(connectPoint);
+ }
+
+ @Override
+ public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
+ return defaultTopology(topology).isBroadcastPoint(connectPoint);
+ }
+
+ private boolean isBroadcastPoint(ConnectPoint connectPoint) {
+ // Any non-infrastructure, i.e. edge points are assumed to be OK.
+ if (!current.isInfrastructure(connectPoint)) {
+ return true;
+ }
+
+ // Find the cluster to which the device belongs.
+ TopologyCluster cluster = current.getCluster(connectPoint.deviceId());
+ checkArgument(cluster != null, "No cluster found for device %s", connectPoint.deviceId());
+
+ // If the broadcast set is null or empty, or if the point explicitly
+ // belongs to it, return true;
+ Set<ConnectPoint> points = broadcastPoints.get(cluster.root().deviceId());
+ return isNullOrEmpty(points) || points.contains(connectPoint);
+ }
+
+ @Override
+ public TopologyEvent updateTopology(ProviderId providerId,
+ GraphDescription graphDescription,
+ List<Event> reasons) {
+ // First off, make sure that what we're given is indeed newer than
+ // what we already have.
+ if (current != null && graphDescription.timestamp() < current.time()) {
+ return null;
+ }
+
+ // Have the default topology construct self from the description data.
+ DefaultTopology newTopology =
+ new DefaultTopology(providerId, graphDescription, this::isBroadcastPoint);
+ updateBroadcastPoints(newTopology);
+
+ // Promote the new topology to current and return a ready-to-send event.
+ synchronized (this) {
+ current = newTopology;
+ return new TopologyEvent(TOPOLOGY_CHANGED, current, reasons);
+ }
+ }
+
+ private void updateBroadcastPoints(DefaultTopology topology) {
+ // Remove any broadcast trees rooted by devices for which we are master.
+ Set<DeviceId> toRemove = broadcastPoints.keySet().stream()
+ .filter(mastershipService::isLocalMaster)
+ .collect(Collectors.toSet());
+
+ // Update the broadcast trees rooted by devices for which we are master.
+ topology.getClusters().forEach(c -> {
+ toRemove.remove(c.root().deviceId());
+ if (mastershipService.isLocalMaster(c.root().deviceId())) {
+ broadcastPoints.put(c.root().deviceId(),
+ topology.broadcastPoints(c.id()));
+ }
+ });
+
+ toRemove.forEach(broadcastPoints::remove);
+ }
+
+ // Validates the specified topology and returns it as a default
+ private DefaultTopology defaultTopology(Topology topology) {
+ checkArgument(topology instanceof DefaultTopology,
+ "Topology class %s not supported", topology.getClass());
+ return (DefaultTopology) topology;
+ }
+
+ private class InternalBroadcastPointListener
+ implements EventuallyConsistentMapListener<DeviceId, Set<ConnectPoint>> {
+ @Override
+ public void event(EventuallyConsistentMapEvent<DeviceId, Set<ConnectPoint>> event) {
+ if (event.type() == EventuallyConsistentMapEvent.Type.PUT) {
+ if (!event.value().isEmpty()) {
+ log.info("Cluster rooted at {} has {} broadcast-points; #{}",
+ event.key(), event.value().size(), event.value().hashCode());
+ }
+ }
+ }
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/PathKey.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/PathKey.java
new file mode 100644
index 00000000..f1c2bdc5
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/PathKey.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.onosproject.store.topology.impl;
+
+import org.onosproject.net.DeviceId;
+
+import java.util.Objects;
+
+/**
+ * Key for filing pre-computed paths between source and destination devices.
+ */
+class PathKey {
+ private final DeviceId src;
+ private final DeviceId dst;
+
+ /**
+ * Creates a path key from the given source/dest pair.
+ * @param src source device
+ * @param dst destination device
+ */
+ PathKey(DeviceId src, DeviceId dst) {
+ this.src = src;
+ this.dst = dst;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(src, dst);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj instanceof PathKey) {
+ final PathKey other = (PathKey) obj;
+ return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst);
+ }
+ return false;
+ }
+}
diff --git a/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/package-info.java b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/package-info.java
new file mode 100644
index 00000000..d1590793
--- /dev/null
+++ b/framework/src/onos/core/store/dist/src/main/java/org/onosproject/store/topology/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Implementation of distributed topology store using p2p synchronization protocol.
+ */
+package org.onosproject.store.topology.impl;