Committed by
Gerrit Code Review
In preparation for dynamic clustering support:
- Added Cluster metadata service and metadata store interfaces - Added a static cluster metadata store implementation that is backed by a local file. - Consolidated the existing cluster.json and tablets.json metadata files into a single cluster.json file that has all the cluster related metadata. - Removed dependency on ONOS_NIC env variable. Change-Id: Ia0a8bb69740caecdcdde71a9408be37c56ae2504
Showing
22 changed files
with
802 additions
and
568 deletions
... | @@ -30,9 +30,8 @@ public interface ClusterAdminService { | ... | @@ -30,9 +30,8 @@ public interface ClusterAdminService { |
30 | * instance. | 30 | * instance. |
31 | * | 31 | * |
32 | * @param nodes set of nodes that form the cluster | 32 | * @param nodes set of nodes that form the cluster |
33 | - * @param ipPrefix IP address prefix, e.g. 10.0.1.* | ||
34 | */ | 33 | */ |
35 | - void formCluster(Set<ControllerNode> nodes, String ipPrefix); | 34 | + void formCluster(Set<ControllerNode> nodes); |
36 | 35 | ||
37 | /** | 36 | /** |
38 | * Adds a new controller node to the cluster. | 37 | * Adds a new controller node to the cluster. | ... | ... |
1 | +/* | ||
2 | + * Copyright 2015 Open Networking Laboratory | ||
3 | + * | ||
4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | + * you may not use this file except in compliance with the License. | ||
6 | + * You may obtain a copy of the License at | ||
7 | + * | ||
8 | + * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | + * | ||
10 | + * Unless required by applicable law or agreed to in writing, software | ||
11 | + * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | + * See the License for the specific language governing permissions and | ||
14 | + * limitations under the License. | ||
15 | + */ | ||
16 | +package org.onosproject.cluster; | ||
17 | + | ||
18 | +import java.util.Collection; | ||
19 | +import java.util.Set; | ||
20 | +import java.util.stream.Collectors; | ||
21 | + | ||
22 | +import static com.google.common.base.Preconditions.checkNotNull; | ||
23 | +import static com.google.common.base.Verify.verifyNotNull; | ||
24 | +import static com.google.common.base.Verify.verify; | ||
25 | + | ||
26 | +import com.google.common.base.MoreObjects; | ||
27 | +import com.google.common.collect.Collections2; | ||
28 | +import com.google.common.collect.ImmutableSet; | ||
29 | + | ||
30 | +/** | ||
31 | + * Cluster metadata. | ||
32 | + * <p> | ||
33 | + * Metadata specifies the attributes that define a ONOS cluster and comprises the collection | ||
34 | + * of {@link org.onosproject.cluster.ControllerNode nodes} and the collection of data | ||
35 | + * {@link org.onosproject.cluster.Partition partitions}. | ||
36 | + */ | ||
37 | +public final class ClusterMetadata { | ||
38 | + | ||
39 | + private String name; | ||
40 | + private Set<ControllerNode> nodes; | ||
41 | + private Set<Partition> partitions; | ||
42 | + | ||
43 | + /** | ||
44 | + * Returns a new cluster metadata builder. | ||
45 | + * @return The cluster metadata builder. | ||
46 | + */ | ||
47 | + public static Builder builder() { | ||
48 | + return new Builder(); | ||
49 | + } | ||
50 | + | ||
51 | + /** | ||
52 | + * Returns the name of the cluster. | ||
53 | + * | ||
54 | + * @return cluster name | ||
55 | + */ | ||
56 | + public String getName() { | ||
57 | + return this.name; | ||
58 | + } | ||
59 | + | ||
60 | + /** | ||
61 | + * Returns the collection of {@link org.onosproject.cluster.ControllerNode nodes} that make up the cluster. | ||
62 | + * @return cluster nodes | ||
63 | + */ | ||
64 | + public Collection<ControllerNode> getNodes() { | ||
65 | + return this.nodes; | ||
66 | + } | ||
67 | + | ||
68 | + /** | ||
69 | + * Returns the collection of data {@link org.onosproject.cluster.Partition partitions} that make up the cluster. | ||
70 | + * @return collection of partitions. | ||
71 | + */ | ||
72 | + public Collection<Partition> getPartitions() { | ||
73 | + return this.partitions; | ||
74 | + } | ||
75 | + | ||
76 | + @Override | ||
77 | + public String toString() { | ||
78 | + return MoreObjects.toStringHelper(ClusterMetadata.class) | ||
79 | + .add("name", name) | ||
80 | + .add("nodes", nodes) | ||
81 | + .add("partitions", partitions) | ||
82 | + .toString(); | ||
83 | + } | ||
84 | + | ||
85 | + /** | ||
86 | + * Builder for a {@link ClusterMetadata} instance. | ||
87 | + */ | ||
88 | + public static class Builder { | ||
89 | + | ||
90 | + private final ClusterMetadata metadata; | ||
91 | + | ||
92 | + public Builder() { | ||
93 | + metadata = new ClusterMetadata(); | ||
94 | + } | ||
95 | + | ||
96 | + /** | ||
97 | + * Sets the cluster name, returning the cluster metadata builder for method chaining. | ||
98 | + * @param name cluster name | ||
99 | + * @return this cluster metadata builder | ||
100 | + */ | ||
101 | + public Builder withName(String name) { | ||
102 | + metadata.name = checkNotNull(name); | ||
103 | + return this; | ||
104 | + } | ||
105 | + | ||
106 | + /** | ||
107 | + * Sets the collection of cluster nodes, returning the cluster metadata builder for method chaining. | ||
108 | + * @param controllerNodes collection of cluster nodes | ||
109 | + * @return this cluster metadata builder | ||
110 | + */ | ||
111 | + public Builder withControllerNodes(Collection<ControllerNode> controllerNodes) { | ||
112 | + metadata.nodes = ImmutableSet.copyOf(checkNotNull(controllerNodes)); | ||
113 | + return this; | ||
114 | + } | ||
115 | + | ||
116 | + /** | ||
117 | + * Sets the collection of data partitions, returning the cluster metadata builder for method chaining. | ||
118 | + * @param partitions collection of partitions | ||
119 | + * @return this cluster metadata builder | ||
120 | + */ | ||
121 | + public Builder withPartitions(Collection<Partition> partitions) { | ||
122 | + metadata.partitions = ImmutableSet.copyOf(checkNotNull(partitions)); | ||
123 | + return this; | ||
124 | + } | ||
125 | + | ||
126 | + /** | ||
127 | + * Builds the cluster metadata. | ||
128 | + * @return cluster metadata | ||
129 | + * @throws com.google.common.base.VerifyException VerifyException if the metadata is misconfigured | ||
130 | + */ | ||
131 | + public ClusterMetadata build() { | ||
132 | + verifyMetadata(); | ||
133 | + return metadata; | ||
134 | + } | ||
135 | + | ||
136 | + /** | ||
137 | + * Validates the constructed metadata for semantic correctness. | ||
138 | + * @throws VerifyException if the metadata is misconfigured. | ||
139 | + */ | ||
140 | + private void verifyMetadata() { | ||
141 | + verifyNotNull(metadata.getName(), "Cluster name must be specified"); | ||
142 | + verifyNotNull(metadata.getNodes(), "Cluster nodes must be specified"); | ||
143 | + verifyNotNull(metadata.getPartitions(), "Cluster partitions must be specified"); | ||
144 | + verify(!metadata.getNodes().isEmpty(), "Cluster nodes must not be empty"); | ||
145 | + verify(!metadata.getPartitions().isEmpty(), "Cluster nodes must not be empty"); | ||
146 | + | ||
147 | + // verify that partitions are constituted from valid cluster nodes. | ||
148 | + boolean validPartitions = Collections2.transform(metadata.getNodes(), ControllerNode::id) | ||
149 | + .containsAll(metadata.getPartitions() | ||
150 | + .stream() | ||
151 | + .flatMap(r -> r.getMembers().stream()) | ||
152 | + .collect(Collectors.toSet())); | ||
153 | + verify(validPartitions, "Partition locations must be valid cluster nodes"); | ||
154 | + } | ||
155 | + } | ||
156 | +} |
... | @@ -13,46 +13,44 @@ | ... | @@ -13,46 +13,44 @@ |
13 | * See the License for the specific language governing permissions and | 13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. | 14 | * limitations under the License. |
15 | */ | 15 | */ |
16 | -package org.onosproject.store.cluster.impl; | 16 | +package org.onosproject.cluster; |
17 | 17 | ||
18 | -import java.util.Set; | 18 | +import org.onosproject.event.AbstractEvent; |
19 | - | ||
20 | -import com.google.common.collect.ImmutableSet; | ||
21 | 19 | ||
22 | /** | 20 | /** |
23 | - * Cluster definition. | 21 | + * Describes a cluster metadata event. |
24 | */ | 22 | */ |
25 | -public class ClusterDefinition { | 23 | +public class ClusterMetadataEvent extends AbstractEvent<ClusterMetadataEvent.Type, ClusterMetadata> { |
26 | - | ||
27 | - private Set<NodeInfo> nodes; | ||
28 | - private String ipPrefix; | ||
29 | 24 | ||
30 | /** | 25 | /** |
31 | - * Creates a new cluster definition. | 26 | + * Type of cluster metadata events. |
32 | - * @param nodes cluster nodes information | ||
33 | - * @param ipPrefix ip prefix common to all cluster nodes | ||
34 | - * @return cluster definition | ||
35 | */ | 27 | */ |
36 | - public static ClusterDefinition from(Set<NodeInfo> nodes, String ipPrefix) { | 28 | + public enum Type { |
37 | - ClusterDefinition definition = new ClusterDefinition(); | 29 | + /** |
38 | - definition.ipPrefix = ipPrefix; | 30 | + * Signifies that the cluster metadata has changed. |
39 | - definition.nodes = ImmutableSet.copyOf(nodes); | 31 | + */ |
40 | - return definition; | 32 | + METADATA_CHANGED, |
41 | } | 33 | } |
42 | 34 | ||
43 | /** | 35 | /** |
44 | - * Returns set of cluster nodes info. | 36 | + * Creates an event of a given type and for the specified metadata and the |
45 | - * @return cluster nodes info | 37 | + * current time. |
38 | + * | ||
39 | + * @param type cluster metadata event type | ||
40 | + * @param metadata cluster metadata subject | ||
46 | */ | 41 | */ |
47 | - public Set<NodeInfo> getNodes() { | 42 | + public ClusterMetadataEvent(Type type, ClusterMetadata metadata) { |
48 | - return ImmutableSet.copyOf(nodes); | 43 | + super(type, metadata); |
49 | } | 44 | } |
50 | 45 | ||
51 | /** | 46 | /** |
52 | - * Returns ipPrefix in dotted decimal notion. | 47 | + * Creates an event of a given type and for the specified metadata and time. |
53 | - * @return ip prefix | 48 | + * |
49 | + * @param type cluster metadata event type | ||
50 | + * @param metadata cluster metadata subject | ||
51 | + * @param time occurrence time | ||
54 | */ | 52 | */ |
55 | - public String getIpPrefix() { | 53 | + public ClusterMetadataEvent(Type type, ClusterMetadata metadata, long time) { |
56 | - return ipPrefix; | 54 | + super(type, metadata, time); |
57 | } | 55 | } |
58 | -} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
56 | +} | ... | ... |
1 | +/* | ||
2 | + * Copyright 2015 Open Networking Laboratory | ||
3 | + * | ||
4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | + * you may not use this file except in compliance with the License. | ||
6 | + * You may obtain a copy of the License at | ||
7 | + * | ||
8 | + * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | + * | ||
10 | + * Unless required by applicable law or agreed to in writing, software | ||
11 | + * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | + * See the License for the specific language governing permissions and | ||
14 | + * limitations under the License. | ||
15 | + */ | ||
16 | +package org.onosproject.cluster; | ||
17 | + | ||
18 | +import org.onosproject.event.EventListener; | ||
19 | + | ||
20 | +/** | ||
21 | + * Entity capable of receiving cluster metadata related events. | ||
22 | + */ | ||
23 | +public interface ClusterMetadataEventListener extends EventListener<ClusterMetadataEvent> { | ||
24 | +} |
... | @@ -15,33 +15,26 @@ | ... | @@ -15,33 +15,26 @@ |
15 | */ | 15 | */ |
16 | package org.onosproject.cluster; | 16 | package org.onosproject.cluster; |
17 | 17 | ||
18 | -import java.util.Set; | ||
19 | - | ||
20 | /** | 18 | /** |
21 | - * Service for obtaining the static definition of a controller cluster. | 19 | + * Service for obtaining metadata information about the cluster. |
22 | */ | 20 | */ |
23 | -public interface ClusterDefinitionService { | 21 | +public interface ClusterMetadataService { |
24 | 22 | ||
25 | /** | 23 | /** |
26 | - * Returns the local controller node. | 24 | + * Returns the current cluster metadata. |
27 | - * @return local controller node | 25 | + * @return cluster metadata |
28 | */ | 26 | */ |
29 | - ControllerNode localNode(); | 27 | + ClusterMetadata getClusterMetadata(); |
30 | 28 | ||
31 | /** | 29 | /** |
32 | - * Returns the set of seed nodes that should be used for discovering other members | 30 | + * Updates the cluster metadata. |
33 | - * of the cluster. | 31 | + * @param metadata new metadata |
34 | - * @return set of seed controller nodes | ||
35 | */ | 32 | */ |
36 | - Set<ControllerNode> seedNodes(); | 33 | + void setClusterMetadata(ClusterMetadata metadata); |
37 | 34 | ||
38 | /** | 35 | /** |
39 | - * Forms cluster configuration based on the specified set of node | 36 | + * Returns the local controller node representing this instance. |
40 | - * information. Assumes subsequent restart for the new configuration to | 37 | + * @return local controller node |
41 | - * take hold. | ||
42 | - * | ||
43 | - * @param nodes set of nodes that form the cluster | ||
44 | - * @param ipPrefix IP address prefix, e.g. 10.0.1.* | ||
45 | */ | 38 | */ |
46 | - void formCluster(Set<ControllerNode> nodes, String ipPrefix); | ||
47 | -} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
39 | + ControllerNode getLocalNode(); | ||
40 | +} | ... | ... |
1 | +/* | ||
2 | + * Copyright 2015 Open Networking Laboratory | ||
3 | + * | ||
4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | + * you may not use this file except in compliance with the License. | ||
6 | + * You may obtain a copy of the License at | ||
7 | + * | ||
8 | + * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | + * | ||
10 | + * Unless required by applicable law or agreed to in writing, software | ||
11 | + * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | + * See the License for the specific language governing permissions and | ||
14 | + * limitations under the License. | ||
15 | + */ | ||
16 | +package org.onosproject.cluster; | ||
17 | + | ||
18 | +import java.util.Collection; | ||
19 | + | ||
20 | +import org.onosproject.store.Store; | ||
21 | +import org.onosproject.store.service.Versioned; | ||
22 | + | ||
23 | +/** | ||
24 | + * Manages persistence of cluster metadata; not intended for direct use. | ||
25 | + */ | ||
26 | +public interface ClusterMetadataStore extends Store<ClusterMetadataEvent, ClusterMetadataStoreDelegate> { | ||
27 | + | ||
28 | + /** | ||
29 | + * Returns the cluster metadata. | ||
30 | + * <p> | ||
31 | + * The returned metadata is versioned to aid determining if a metadata instance is more recent than another. | ||
32 | + * @return cluster metadata | ||
33 | + */ | ||
34 | + Versioned<ClusterMetadata> getClusterMetadata(); | ||
35 | + | ||
36 | + /** | ||
37 | + * Updates the cluster metadata. | ||
38 | + * @param metadata new metadata value | ||
39 | + */ | ||
40 | + void setClusterMetadata(ClusterMetadata metadata); | ||
41 | + | ||
42 | + // TODO: The below methods should move to a separate store interface that is responsible for | ||
43 | + // tracking cluster partition operational state. | ||
44 | + | ||
45 | + /** | ||
46 | + * Sets a controller node as an active member of a partition. | ||
47 | + * <p> | ||
48 | + * Active members are those replicas that are up to speed with the rest of the system and are | ||
49 | + * usually capable of participating in the replica state management activities in accordance with | ||
50 | + * the data consistency and replication protocol in use. | ||
51 | + * @param partitionId partition identifier | ||
52 | + * @param nodeId id of controller node | ||
53 | + */ | ||
54 | + void setActiveReplica(String partitionId, NodeId nodeId); | ||
55 | + | ||
56 | + /** | ||
57 | + * Removes a controller node as an active member for a partition. | ||
58 | + * <p> | ||
59 | + * Active members are those replicas that are up to speed with the rest of the system and are | ||
60 | + * usually capable of participating in the replica state management activities in accordance with | ||
61 | + * the data consistency and replication protocol in use. | ||
62 | + * @param partitionId partition identifier | ||
63 | + * @param nodeId id of controller node | ||
64 | + */ | ||
65 | + void unsetActiveReplica(String partitionId, NodeId nodeId); | ||
66 | + | ||
67 | + /** | ||
68 | + * Returns the collection of controller nodes that are the active replicas for a partition. | ||
69 | + * <p> | ||
70 | + * Active members are those replicas that are up to speed with the rest of the system and are | ||
71 | + * usually capable of participating in the replica state management activities in accordance with | ||
72 | + * the data consistency and replication protocol in use. | ||
73 | + * @param partitionId partition identifier | ||
74 | + * @return identifiers of controller nodes that are the active replicas | ||
75 | + */ | ||
76 | + Collection<NodeId> getActiveReplicas(String partitionId); | ||
77 | +} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
1 | +/* | ||
2 | + * Copyright 2015 Open Networking Laboratory | ||
3 | + * | ||
4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | + * you may not use this file except in compliance with the License. | ||
6 | + * You may obtain a copy of the License at | ||
7 | + * | ||
8 | + * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | + * | ||
10 | + * Unless required by applicable law or agreed to in writing, software | ||
11 | + * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | + * See the License for the specific language governing permissions and | ||
14 | + * limitations under the License. | ||
15 | + */ | ||
16 | +package org.onosproject.cluster; | ||
17 | + | ||
18 | +import org.onosproject.store.StoreDelegate; | ||
19 | + | ||
20 | +/** | ||
21 | + * Cluster metadata store delegate abstraction. | ||
22 | + */ | ||
23 | +public interface ClusterMetadataStoreDelegate extends StoreDelegate<ClusterMetadataEvent> { | ||
24 | +} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
1 | +/* | ||
2 | + * Copyright 2015 Open Networking Laboratory | ||
3 | + * | ||
4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | + * you may not use this file except in compliance with the License. | ||
6 | + * You may obtain a copy of the License at | ||
7 | + * | ||
8 | + * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | + * | ||
10 | + * Unless required by applicable law or agreed to in writing, software | ||
11 | + * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | + * See the License for the specific language governing permissions and | ||
14 | + * limitations under the License. | ||
15 | + */ | ||
16 | +package org.onosproject.cluster; | ||
17 | + | ||
18 | +import java.util.Collection; | ||
19 | +import java.util.Set; | ||
20 | + | ||
21 | +import com.google.common.collect.ImmutableSet; | ||
22 | + | ||
23 | +import static com.google.common.base.Preconditions.checkNotNull; | ||
24 | + | ||
25 | +/** | ||
26 | + * A data partition. | ||
27 | + * <p> | ||
28 | + * Partition represents a slice of the data space and is made up of a collection | ||
29 | + * of {@link org.onosproject.cluster.ControllerNode nodes} | ||
30 | + * that all maintain copies of this data. | ||
31 | + */ | ||
32 | +public class Partition { | ||
33 | + | ||
34 | + private final String name; | ||
35 | + private final Set<NodeId> members; | ||
36 | + | ||
37 | + private Partition() { | ||
38 | + name = null; | ||
39 | + members = null; | ||
40 | + } | ||
41 | + | ||
42 | + public Partition(String name, Collection<NodeId> members) { | ||
43 | + this.name = checkNotNull(name); | ||
44 | + this.members = ImmutableSet.copyOf(checkNotNull(members)); | ||
45 | + } | ||
46 | + | ||
47 | + /** | ||
48 | + * Returns the partition name. | ||
49 | + * <p> | ||
50 | + * Each partition is identified by a unique name. | ||
51 | + * @return partition name | ||
52 | + */ | ||
53 | + public String getName() { | ||
54 | + return this.name; | ||
55 | + } | ||
56 | + | ||
57 | + /** | ||
58 | + * Returns the collection of controller node identifiers that make up this partition. | ||
59 | + * @return collection of controller node identifiers | ||
60 | + */ | ||
61 | + public Collection<NodeId> getMembers() { | ||
62 | + return this.members; | ||
63 | + } | ||
64 | +} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
... | @@ -25,17 +25,26 @@ import org.apache.karaf.system.SystemService; | ... | @@ -25,17 +25,26 @@ import org.apache.karaf.system.SystemService; |
25 | import org.joda.time.DateTime; | 25 | import org.joda.time.DateTime; |
26 | import org.onlab.packet.IpAddress; | 26 | import org.onlab.packet.IpAddress; |
27 | import org.onosproject.cluster.ClusterAdminService; | 27 | import org.onosproject.cluster.ClusterAdminService; |
28 | -import org.onosproject.cluster.ClusterDefinitionService; | ||
29 | import org.onosproject.cluster.ClusterEvent; | 28 | import org.onosproject.cluster.ClusterEvent; |
30 | import org.onosproject.cluster.ClusterEventListener; | 29 | import org.onosproject.cluster.ClusterEventListener; |
30 | +import org.onosproject.cluster.ClusterMetadata; | ||
31 | +import org.onosproject.cluster.ClusterMetadataService; | ||
31 | import org.onosproject.cluster.ClusterService; | 32 | import org.onosproject.cluster.ClusterService; |
32 | import org.onosproject.cluster.ClusterStore; | 33 | import org.onosproject.cluster.ClusterStore; |
33 | import org.onosproject.cluster.ClusterStoreDelegate; | 34 | import org.onosproject.cluster.ClusterStoreDelegate; |
34 | import org.onosproject.cluster.ControllerNode; | 35 | import org.onosproject.cluster.ControllerNode; |
35 | import org.onosproject.cluster.NodeId; | 36 | import org.onosproject.cluster.NodeId; |
37 | +import org.onosproject.cluster.Partition; | ||
36 | import org.onosproject.event.AbstractListenerManager; | 38 | import org.onosproject.event.AbstractListenerManager; |
37 | import org.slf4j.Logger; | 39 | import org.slf4j.Logger; |
38 | 40 | ||
41 | +import com.google.common.collect.Lists; | ||
42 | + | ||
43 | +import java.util.ArrayList; | ||
44 | +import java.util.Collection; | ||
45 | +import java.util.Collections; | ||
46 | +import java.util.HashSet; | ||
47 | +import java.util.List; | ||
39 | import java.util.Set; | 48 | import java.util.Set; |
40 | 49 | ||
41 | import static com.google.common.base.Preconditions.checkArgument; | 50 | import static com.google.common.base.Preconditions.checkArgument; |
... | @@ -44,8 +53,6 @@ import static org.onosproject.security.AppGuard.checkPermission; | ... | @@ -44,8 +53,6 @@ import static org.onosproject.security.AppGuard.checkPermission; |
44 | import static org.slf4j.LoggerFactory.getLogger; | 53 | import static org.slf4j.LoggerFactory.getLogger; |
45 | import static org.onosproject.security.AppPermission.Type.*; | 54 | import static org.onosproject.security.AppPermission.Type.*; |
46 | 55 | ||
47 | - | ||
48 | - | ||
49 | /** | 56 | /** |
50 | * Implementation of the cluster service. | 57 | * Implementation of the cluster service. |
51 | */ | 58 | */ |
... | @@ -61,7 +68,7 @@ public class ClusterManager | ... | @@ -61,7 +68,7 @@ public class ClusterManager |
61 | private ClusterStoreDelegate delegate = new InternalStoreDelegate(); | 68 | private ClusterStoreDelegate delegate = new InternalStoreDelegate(); |
62 | 69 | ||
63 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 70 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
64 | - protected ClusterDefinitionService clusterDefinitionService; | 71 | + protected ClusterMetadataService clusterMetadataService; |
65 | 72 | ||
66 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 73 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
67 | protected ClusterStore store; | 74 | protected ClusterStore store; |
... | @@ -73,8 +80,9 @@ public class ClusterManager | ... | @@ -73,8 +80,9 @@ public class ClusterManager |
73 | public void activate() { | 80 | public void activate() { |
74 | store.setDelegate(delegate); | 81 | store.setDelegate(delegate); |
75 | eventDispatcher.addSink(ClusterEvent.class, listenerRegistry); | 82 | eventDispatcher.addSink(ClusterEvent.class, listenerRegistry); |
76 | - clusterDefinitionService.seedNodes() | 83 | + clusterMetadataService.getClusterMetadata() |
77 | - .forEach(node -> store.addNode(node.id(), node.ip(), node.tcpPort())); | 84 | + .getNodes() |
85 | + .forEach(node -> store.addNode(node.id(), node.ip(), node.tcpPort())); | ||
78 | log.info("Started"); | 86 | log.info("Started"); |
79 | } | 87 | } |
80 | 88 | ||
... | @@ -119,11 +127,16 @@ public class ClusterManager | ... | @@ -119,11 +127,16 @@ public class ClusterManager |
119 | } | 127 | } |
120 | 128 | ||
121 | @Override | 129 | @Override |
122 | - public void formCluster(Set<ControllerNode> nodes, String ipPrefix) { | 130 | + public void formCluster(Set<ControllerNode> nodes) { |
123 | checkNotNull(nodes, "Nodes cannot be null"); | 131 | checkNotNull(nodes, "Nodes cannot be null"); |
124 | checkArgument(!nodes.isEmpty(), "Nodes cannot be empty"); | 132 | checkArgument(!nodes.isEmpty(), "Nodes cannot be empty"); |
125 | - checkNotNull(ipPrefix, "IP prefix cannot be null"); | 133 | + |
126 | - clusterDefinitionService.formCluster(nodes, ipPrefix); | 134 | + ClusterMetadata metadata = ClusterMetadata.builder() |
135 | + .withName("default") | ||
136 | + .withControllerNodes(nodes) | ||
137 | + .withPartitions(buildDefaultPartitions(nodes)) | ||
138 | + .build(); | ||
139 | + clusterMetadataService.setClusterMetadata(metadata); | ||
127 | try { | 140 | try { |
128 | log.warn("Shutting down container for cluster reconfiguration!"); | 141 | log.warn("Shutting down container for cluster reconfiguration!"); |
129 | systemService.reboot("now", SystemService.Swipe.NONE); | 142 | systemService.reboot("now", SystemService.Swipe.NONE); |
... | @@ -153,4 +166,21 @@ public class ClusterManager | ... | @@ -153,4 +166,21 @@ public class ClusterManager |
153 | post(event); | 166 | post(event); |
154 | } | 167 | } |
155 | } | 168 | } |
169 | + | ||
170 | + private static Collection<Partition> buildDefaultPartitions(Collection<ControllerNode> nodes) { | ||
171 | + List<ControllerNode> sorted = new ArrayList<>(nodes); | ||
172 | + Collections.sort(sorted, (o1, o2) -> o1.id().toString().compareTo(o2.id().toString())); | ||
173 | + Collection<Partition> partitions = Lists.newArrayList(); | ||
174 | + | ||
175 | + int length = nodes.size(); | ||
176 | + int count = 3; | ||
177 | + for (int i = 0; i < length; i++) { | ||
178 | + Set<NodeId> set = new HashSet<>(count); | ||
179 | + for (int j = 0; j < count; j++) { | ||
180 | + set.add(sorted.get((i + j) % length).id()); | ||
181 | + } | ||
182 | + partitions.add(new Partition("p" + (i + 1), set)); | ||
183 | + } | ||
184 | + return partitions; | ||
185 | + } | ||
156 | } | 186 | } | ... | ... |
1 | +package org.onosproject.cluster.impl; | ||
2 | + | ||
3 | +import static com.google.common.base.Preconditions.checkNotNull; | ||
4 | +import static org.slf4j.LoggerFactory.getLogger; | ||
5 | + | ||
6 | +import java.net.InetAddress; | ||
7 | +import java.net.NetworkInterface; | ||
8 | +import java.net.SocketException; | ||
9 | +import java.util.Collection; | ||
10 | +import java.util.Enumeration; | ||
11 | + | ||
12 | +import org.apache.felix.scr.annotations.Activate; | ||
13 | +import org.apache.felix.scr.annotations.Component; | ||
14 | +import org.apache.felix.scr.annotations.Deactivate; | ||
15 | +import org.apache.felix.scr.annotations.Reference; | ||
16 | +import org.apache.felix.scr.annotations.ReferenceCardinality; | ||
17 | +import org.apache.felix.scr.annotations.Service; | ||
18 | +import org.onlab.packet.IpAddress; | ||
19 | +import org.onosproject.cluster.ClusterMetadata; | ||
20 | +import org.onosproject.cluster.ClusterMetadataEvent; | ||
21 | +import org.onosproject.cluster.ClusterMetadataEventListener; | ||
22 | +import org.onosproject.cluster.ClusterMetadataService; | ||
23 | +import org.onosproject.cluster.ClusterMetadataStore; | ||
24 | +import org.onosproject.cluster.ClusterMetadataStoreDelegate; | ||
25 | +import org.onosproject.cluster.ControllerNode; | ||
26 | +import org.onosproject.event.AbstractListenerManager; | ||
27 | +import org.onosproject.store.service.Versioned; | ||
28 | +import org.slf4j.Logger; | ||
29 | + | ||
30 | +/** | ||
31 | + * Implementation of ClusterMetadataService. | ||
32 | + */ | ||
33 | +@Component(immediate = true) | ||
34 | +@Service | ||
35 | +public class ClusterMetadataManager | ||
36 | + extends AbstractListenerManager<ClusterMetadataEvent, ClusterMetadataEventListener> | ||
37 | + implements ClusterMetadataService { | ||
38 | + | ||
39 | + private ControllerNode localNode; | ||
40 | + private final Logger log = getLogger(getClass()); | ||
41 | + | ||
42 | + private ClusterMetadataStoreDelegate delegate = new InternalStoreDelegate(); | ||
43 | + | ||
44 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
45 | + protected ClusterMetadataStore store; | ||
46 | + | ||
47 | + @Activate | ||
48 | + public void activate() { | ||
49 | + store.setDelegate(delegate); | ||
50 | + eventDispatcher.addSink(ClusterMetadataEvent.class, listenerRegistry); | ||
51 | + establishSelfIdentity(); | ||
52 | + log.info("Started"); | ||
53 | + } | ||
54 | + | ||
55 | + @Deactivate | ||
56 | + public void deactivate() { | ||
57 | + store.unsetDelegate(delegate); | ||
58 | + eventDispatcher.removeSink(ClusterMetadataEvent.class); | ||
59 | + log.info("Stopped"); | ||
60 | + } | ||
61 | + | ||
62 | + @Override | ||
63 | + public ClusterMetadata getClusterMetadata() { | ||
64 | + return Versioned.valueOrElse(store.getClusterMetadata(), null); | ||
65 | + } | ||
66 | + | ||
67 | + @Override | ||
68 | + public ControllerNode getLocalNode() { | ||
69 | + return localNode; | ||
70 | + } | ||
71 | + | ||
72 | + @Override | ||
73 | + public void setClusterMetadata(ClusterMetadata metadata) { | ||
74 | + checkNotNull(metadata, "Cluster metadata cannot be null"); | ||
75 | + store.setClusterMetadata(metadata); | ||
76 | + } | ||
77 | + | ||
78 | + // Store delegate to re-post events emitted from the store. | ||
79 | + private class InternalStoreDelegate implements ClusterMetadataStoreDelegate { | ||
80 | + @Override | ||
81 | + public void notify(ClusterMetadataEvent event) { | ||
82 | + post(event); | ||
83 | + } | ||
84 | + } | ||
85 | + | ||
86 | + private IpAddress findLocalIp(Collection<ControllerNode> controllerNodes) throws SocketException { | ||
87 | + Enumeration<NetworkInterface> interfaces = | ||
88 | + NetworkInterface.getNetworkInterfaces(); | ||
89 | + while (interfaces.hasMoreElements()) { | ||
90 | + NetworkInterface iface = interfaces.nextElement(); | ||
91 | + Enumeration<InetAddress> inetAddresses = iface.getInetAddresses(); | ||
92 | + while (inetAddresses.hasMoreElements()) { | ||
93 | + IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement()); | ||
94 | + if (controllerNodes.stream() | ||
95 | + .map(ControllerNode::ip) | ||
96 | + .anyMatch(nodeIp -> ip.equals(nodeIp))) { | ||
97 | + return ip; | ||
98 | + } | ||
99 | + } | ||
100 | + } | ||
101 | + throw new IllegalStateException("Unable to determine local ip"); | ||
102 | + } | ||
103 | + | ||
104 | + private void establishSelfIdentity() { | ||
105 | + try { | ||
106 | + IpAddress ip = findLocalIp(getClusterMetadata().getNodes()); | ||
107 | + localNode = getClusterMetadata().getNodes() | ||
108 | + .stream() | ||
109 | + .filter(node -> node.ip().equals(ip)) | ||
110 | + .findFirst() | ||
111 | + .get(); | ||
112 | + } catch (SocketException e) { | ||
113 | + throw new IllegalStateException("Cannot determine local IP", e); | ||
114 | + } | ||
115 | + } | ||
116 | +} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
1 | -/* | ||
2 | - * Copyright 2015 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.cluster.impl; | ||
17 | - | ||
18 | -import com.google.common.collect.ImmutableSet; | ||
19 | -import com.google.common.collect.Sets; | ||
20 | -import org.apache.felix.scr.annotations.Activate; | ||
21 | -import org.apache.felix.scr.annotations.Component; | ||
22 | -import org.apache.felix.scr.annotations.Deactivate; | ||
23 | -import org.apache.felix.scr.annotations.Service; | ||
24 | -import org.onlab.packet.IpAddress; | ||
25 | -import org.onosproject.cluster.ClusterDefinitionService; | ||
26 | -import org.onosproject.cluster.ControllerNode; | ||
27 | -import org.onosproject.cluster.DefaultControllerNode; | ||
28 | -import org.onosproject.cluster.NodeId; | ||
29 | -import org.onosproject.store.consistent.impl.DatabaseDefinition; | ||
30 | -import org.onosproject.store.consistent.impl.DatabaseDefinitionStore; | ||
31 | -import org.slf4j.Logger; | ||
32 | - | ||
33 | -import java.io.File; | ||
34 | -import java.io.IOException; | ||
35 | -import java.net.InetAddress; | ||
36 | -import java.net.NetworkInterface; | ||
37 | -import java.net.SocketException; | ||
38 | -import java.util.Enumeration; | ||
39 | -import java.util.Set; | ||
40 | -import java.util.stream.Collectors; | ||
41 | - | ||
42 | -import static java.net.NetworkInterface.getNetworkInterfaces; | ||
43 | -import static java.util.Collections.list; | ||
44 | -import static org.onosproject.cluster.DefaultControllerNode.DEFAULT_PORT; | ||
45 | -import static org.onosproject.store.consistent.impl.DatabaseManager.PARTITION_DEFINITION_FILE; | ||
46 | -import static org.slf4j.LoggerFactory.getLogger; | ||
47 | - | ||
48 | -/** | ||
49 | - * Implementation of ClusterDefinitionService. | ||
50 | - */ | ||
51 | -@Component(immediate = true) | ||
52 | -@Service | ||
53 | -public class ClusterDefinitionManager implements ClusterDefinitionService { | ||
54 | - | ||
55 | - public static final String CLUSTER_DEFINITION_FILE = "../config/cluster.json"; | ||
56 | - private static final String ONOS_NIC = "ONOS_NIC"; | ||
57 | - private static final Logger log = getLogger(ClusterDefinitionManager.class); | ||
58 | - private ControllerNode localNode; | ||
59 | - private Set<ControllerNode> seedNodes; | ||
60 | - | ||
61 | - @Activate | ||
62 | - public void activate() { | ||
63 | - File clusterDefinitionFile = new File(CLUSTER_DEFINITION_FILE); | ||
64 | - ClusterDefinitionStore clusterDefinitionStore = | ||
65 | - new ClusterDefinitionStore(clusterDefinitionFile.getPath()); | ||
66 | - | ||
67 | - if (!clusterDefinitionFile.exists()) { | ||
68 | - createDefaultClusterDefinition(clusterDefinitionStore); | ||
69 | - } | ||
70 | - | ||
71 | - try { | ||
72 | - ClusterDefinition clusterDefinition = clusterDefinitionStore.read(); | ||
73 | - establishSelfIdentity(clusterDefinition); | ||
74 | - seedNodes = ImmutableSet | ||
75 | - .copyOf(clusterDefinition.getNodes()) | ||
76 | - .stream() | ||
77 | - .filter(n -> !localNode.id().equals(new NodeId(n.getId()))) | ||
78 | - .map(n -> new DefaultControllerNode(new NodeId(n.getId()), | ||
79 | - IpAddress.valueOf(n.getIp()), | ||
80 | - n.getTcpPort())) | ||
81 | - .collect(Collectors.toSet()); | ||
82 | - } catch (IOException e) { | ||
83 | - throw new IllegalStateException("Failed to read cluster definition.", e); | ||
84 | - } | ||
85 | - | ||
86 | - log.info("Started"); | ||
87 | - } | ||
88 | - | ||
89 | - @Deactivate | ||
90 | - public void deactivate() { | ||
91 | - log.info("Stopped"); | ||
92 | - } | ||
93 | - | ||
94 | - @Override | ||
95 | - public ControllerNode localNode() { | ||
96 | - return localNode; | ||
97 | - } | ||
98 | - | ||
99 | - @Override | ||
100 | - public Set<ControllerNode> seedNodes() { | ||
101 | - return seedNodes; | ||
102 | - } | ||
103 | - | ||
104 | - @Override | ||
105 | - public void formCluster(Set<ControllerNode> nodes, String ipPrefix) { | ||
106 | - try { | ||
107 | - Set<NodeInfo> infos = Sets.newHashSet(); | ||
108 | - nodes.forEach(n -> infos.add(NodeInfo.from(n.id().toString(), | ||
109 | - n.ip().toString(), | ||
110 | - n.tcpPort()))); | ||
111 | - | ||
112 | - ClusterDefinition cdef = ClusterDefinition.from(infos, ipPrefix); | ||
113 | - new ClusterDefinitionStore(CLUSTER_DEFINITION_FILE).write(cdef); | ||
114 | - | ||
115 | - DatabaseDefinition ddef = DatabaseDefinition.from(infos); | ||
116 | - new DatabaseDefinitionStore(PARTITION_DEFINITION_FILE).write(ddef); | ||
117 | - } catch (IOException e) { | ||
118 | - log.error("Unable to form cluster", e); | ||
119 | - } | ||
120 | - } | ||
121 | - | ||
122 | - private IpAddress findLocalIp(ClusterDefinition clusterDefinition) throws SocketException { | ||
123 | - Enumeration<NetworkInterface> interfaces = | ||
124 | - NetworkInterface.getNetworkInterfaces(); | ||
125 | - while (interfaces.hasMoreElements()) { | ||
126 | - NetworkInterface iface = interfaces.nextElement(); | ||
127 | - Enumeration<InetAddress> inetAddresses = iface.getInetAddresses(); | ||
128 | - while (inetAddresses.hasMoreElements()) { | ||
129 | - IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement()); | ||
130 | - if (clusterDefinition.getNodes().stream() | ||
131 | - .map(NodeInfo::getIp) | ||
132 | - .map(IpAddress::valueOf) | ||
133 | - .anyMatch(nodeIp -> ip.equals(nodeIp))) { | ||
134 | - return ip; | ||
135 | - } | ||
136 | - } | ||
137 | - } | ||
138 | - throw new IllegalStateException("Unable to determine local ip"); | ||
139 | - } | ||
140 | - | ||
141 | - private void establishSelfIdentity(ClusterDefinition clusterDefinition) { | ||
142 | - try { | ||
143 | - IpAddress ip = findLocalIp(clusterDefinition); | ||
144 | - localNode = new DefaultControllerNode(new NodeId(ip.toString()), ip); | ||
145 | - } catch (SocketException e) { | ||
146 | - throw new IllegalStateException("Cannot determine local IP", e); | ||
147 | - } | ||
148 | - } | ||
149 | - | ||
150 | - private void createDefaultClusterDefinition(ClusterDefinitionStore store) { | ||
151 | - // Assumes IPv4 is returned. | ||
152 | - String ip = getSiteLocalAddress(); | ||
153 | - String ipPrefix = ip.replaceFirst("\\.[0-9]*$", ".*"); | ||
154 | - NodeInfo node = NodeInfo.from(ip, ip, DEFAULT_PORT); | ||
155 | - try { | ||
156 | - store.write(ClusterDefinition.from(ImmutableSet.of(node), ipPrefix)); | ||
157 | - } catch (IOException e) { | ||
158 | - log.warn("Unable to write default cluster definition", e); | ||
159 | - } | ||
160 | - } | ||
161 | - | ||
162 | - /** | ||
163 | - * Returns the address that matches the IP prefix given in ONOS_NIC | ||
164 | - * environment variable if one was specified, or the first site local | ||
165 | - * address if one can be found or the loopback address otherwise. | ||
166 | - * | ||
167 | - * @return site-local address in string form | ||
168 | - */ | ||
169 | - public static String getSiteLocalAddress() { | ||
170 | - try { | ||
171 | - String ipPrefix = System.getenv(ONOS_NIC); | ||
172 | - for (NetworkInterface nif : list(getNetworkInterfaces())) { | ||
173 | - for (InetAddress address : list(nif.getInetAddresses())) { | ||
174 | - IpAddress ip = IpAddress.valueOf(address); | ||
175 | - if (ipPrefix == null && address.isSiteLocalAddress() || | ||
176 | - ipPrefix != null && matchInterface(ip.toString(), ipPrefix)) { | ||
177 | - return ip.toString(); | ||
178 | - } | ||
179 | - } | ||
180 | - } | ||
181 | - } catch (SocketException e) { | ||
182 | - log.error("Unable to get network interfaces", e); | ||
183 | - } | ||
184 | - | ||
185 | - return IpAddress.valueOf(InetAddress.getLoopbackAddress()).toString(); | ||
186 | - } | ||
187 | - | ||
188 | - // Indicates whether the specified interface address matches the given prefix. | ||
189 | - // FIXME: Add a facility to IpPrefix to make this more robust | ||
190 | - private static boolean matchInterface(String ip, String ipPrefix) { | ||
191 | - String s = ipPrefix.replaceAll("\\.\\*", ""); | ||
192 | - return ip.startsWith(s); | ||
193 | - } | ||
194 | -} |
core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionStore.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014-2015 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.cluster.impl; | ||
17 | - | ||
18 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
19 | - | ||
20 | -import com.fasterxml.jackson.databind.ObjectMapper; | ||
21 | -import com.google.common.io.Files; | ||
22 | - | ||
23 | -import java.io.File; | ||
24 | -import java.io.IOException; | ||
25 | - | ||
26 | -/** | ||
27 | - * Allows for reading and writing cluster definition as a JSON file. | ||
28 | - */ | ||
29 | -public class ClusterDefinitionStore { | ||
30 | - | ||
31 | - private final File file; | ||
32 | - | ||
33 | - /** | ||
34 | - * Creates a reader/writer of the cluster definition file. | ||
35 | - * @param filePath location of the definition file | ||
36 | - */ | ||
37 | - public ClusterDefinitionStore(String filePath) { | ||
38 | - file = new File(filePath); | ||
39 | - } | ||
40 | - | ||
41 | - /** | ||
42 | - * Returns the cluster definition. | ||
43 | - * @return cluster definition | ||
44 | - * @throws IOException when I/O exception of some sort has occurred | ||
45 | - */ | ||
46 | - public ClusterDefinition read() throws IOException { | ||
47 | - ObjectMapper mapper = new ObjectMapper(); | ||
48 | - return mapper.readValue(file, ClusterDefinition.class); | ||
49 | - } | ||
50 | - | ||
51 | - /** | ||
52 | - * Writes the specified cluster definition to file. | ||
53 | - * @param definition cluster definition | ||
54 | - * @throws IOException when I/O exception of some sort has occurred | ||
55 | - */ | ||
56 | - public void write(ClusterDefinition definition) throws IOException { | ||
57 | - checkNotNull(definition); | ||
58 | - // write back to file | ||
59 | - Files.createParentDirs(file); | ||
60 | - ObjectMapper mapper = new ObjectMapper(); | ||
61 | - mapper.writeValue(file, definition); | ||
62 | - } | ||
63 | -} |
... | @@ -27,8 +27,8 @@ import org.apache.felix.scr.annotations.Service; | ... | @@ -27,8 +27,8 @@ import org.apache.felix.scr.annotations.Service; |
27 | import org.joda.time.DateTime; | 27 | import org.joda.time.DateTime; |
28 | import org.onlab.packet.IpAddress; | 28 | import org.onlab.packet.IpAddress; |
29 | import org.onlab.util.KryoNamespace; | 29 | import org.onlab.util.KryoNamespace; |
30 | -import org.onosproject.cluster.ClusterDefinitionService; | ||
31 | import org.onosproject.cluster.ClusterEvent; | 30 | import org.onosproject.cluster.ClusterEvent; |
31 | +import org.onosproject.cluster.ClusterMetadataService; | ||
32 | import org.onosproject.cluster.ClusterStore; | 32 | import org.onosproject.cluster.ClusterStore; |
33 | import org.onosproject.cluster.ClusterStoreDelegate; | 33 | import org.onosproject.cluster.ClusterStoreDelegate; |
34 | import org.onosproject.cluster.ControllerNode; | 34 | import org.onosproject.cluster.ControllerNode; |
... | @@ -99,14 +99,14 @@ public class DistributedClusterStore | ... | @@ -99,14 +99,14 @@ public class DistributedClusterStore |
99 | private ControllerNode localNode; | 99 | private ControllerNode localNode; |
100 | 100 | ||
101 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 101 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
102 | - protected ClusterDefinitionService clusterDefinitionService; | 102 | + protected ClusterMetadataService clusterMetadataService; |
103 | 103 | ||
104 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 104 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
105 | protected MessagingService messagingService; | 105 | protected MessagingService messagingService; |
106 | 106 | ||
107 | @Activate | 107 | @Activate |
108 | public void activate() { | 108 | public void activate() { |
109 | - localNode = clusterDefinitionService.localNode(); | 109 | + localNode = clusterMetadataService.getLocalNode(); |
110 | 110 | ||
111 | messagingService.registerHandler(HEARTBEAT_MESSAGE, | 111 | messagingService.registerHandler(HEARTBEAT_MESSAGE, |
112 | new HeartbeatMessageHandler(), heartBeatMessageHandler); | 112 | new HeartbeatMessageHandler(), heartBeatMessageHandler); |
... | @@ -116,9 +116,6 @@ public class DistributedClusterStore | ... | @@ -116,9 +116,6 @@ public class DistributedClusterStore |
116 | heartBeatSender.scheduleWithFixedDelay(this::heartbeat, 0, | 116 | heartBeatSender.scheduleWithFixedDelay(this::heartbeat, 0, |
117 | HEARTBEAT_INTERVAL_MS, TimeUnit.MILLISECONDS); | 117 | HEARTBEAT_INTERVAL_MS, TimeUnit.MILLISECONDS); |
118 | 118 | ||
119 | - addNode(localNode); | ||
120 | - updateState(localNode.id(), State.ACTIVE); | ||
121 | - | ||
122 | log.info("Started"); | 119 | log.info("Started"); |
123 | } | 120 | } |
124 | 121 | ||
... | @@ -188,7 +185,7 @@ public class DistributedClusterStore | ... | @@ -188,7 +185,7 @@ public class DistributedClusterStore |
188 | 185 | ||
189 | private void addNode(ControllerNode node) { | 186 | private void addNode(ControllerNode node) { |
190 | allNodes.put(node.id(), node); | 187 | allNodes.put(node.id(), node); |
191 | - updateState(node.id(), State.INACTIVE); | 188 | + updateState(node.id(), node.equals(localNode) ? State.ACTIVE : State.INACTIVE); |
192 | notifyDelegate(new ClusterEvent(ClusterEvent.Type.INSTANCE_ADDED, node)); | 189 | notifyDelegate(new ClusterEvent(ClusterEvent.Type.INSTANCE_ADDED, node)); |
193 | } | 190 | } |
194 | 191 | ... | ... |
core/store/dist/src/main/java/org/onosproject/store/cluster/impl/StaticClusterMetadataStore.java
0 → 100644
1 | +package org.onosproject.store.cluster.impl; | ||
2 | + | ||
3 | +import static com.google.common.base.Preconditions.checkNotNull; | ||
4 | +import static java.net.NetworkInterface.getNetworkInterfaces; | ||
5 | +import static org.slf4j.LoggerFactory.getLogger; | ||
6 | + | ||
7 | +import java.io.File; | ||
8 | +import java.io.IOException; | ||
9 | +import java.net.InetAddress; | ||
10 | +import java.net.NetworkInterface; | ||
11 | +import java.util.Arrays; | ||
12 | +import java.util.Collection; | ||
13 | +import java.util.Collections; | ||
14 | +import java.util.concurrent.atomic.AtomicReference; | ||
15 | +import java.util.function.Function; | ||
16 | + | ||
17 | +import org.apache.felix.scr.annotations.Activate; | ||
18 | +import org.apache.felix.scr.annotations.Component; | ||
19 | +import org.apache.felix.scr.annotations.Deactivate; | ||
20 | +import org.apache.felix.scr.annotations.Service; | ||
21 | +import org.onlab.packet.IpAddress; | ||
22 | +import org.onosproject.cluster.ClusterMetadata; | ||
23 | +import org.onosproject.cluster.ClusterMetadataEvent; | ||
24 | +import org.onosproject.cluster.ClusterMetadataStore; | ||
25 | +import org.onosproject.cluster.ClusterMetadataStoreDelegate; | ||
26 | +import org.onosproject.cluster.ControllerNode; | ||
27 | +import org.onosproject.cluster.DefaultControllerNode; | ||
28 | +import org.onosproject.cluster.NodeId; | ||
29 | +import org.onosproject.cluster.Partition; | ||
30 | +import org.onosproject.store.AbstractStore; | ||
31 | +import org.onosproject.store.service.Versioned; | ||
32 | +import org.slf4j.Logger; | ||
33 | + | ||
34 | +import com.fasterxml.jackson.core.JsonGenerator; | ||
35 | +import com.fasterxml.jackson.core.JsonParser; | ||
36 | +import com.fasterxml.jackson.core.JsonProcessingException; | ||
37 | +import com.fasterxml.jackson.databind.DeserializationContext; | ||
38 | +import com.fasterxml.jackson.databind.JsonDeserializer; | ||
39 | +import com.fasterxml.jackson.databind.JsonNode; | ||
40 | +import com.fasterxml.jackson.databind.JsonSerializer; | ||
41 | +import com.fasterxml.jackson.databind.ObjectMapper; | ||
42 | +import com.fasterxml.jackson.databind.SerializerProvider; | ||
43 | +import com.fasterxml.jackson.databind.module.SimpleModule; | ||
44 | +import com.google.common.base.Throwables; | ||
45 | +import com.google.common.collect.Lists; | ||
46 | +import com.google.common.io.Files; | ||
47 | + | ||
48 | +/** | ||
49 | + * ClusterMetadataStore backed by a local file. | ||
50 | + */ | ||
51 | +@Component(immediate = true, enabled = true) | ||
52 | +@Service | ||
53 | +public class StaticClusterMetadataStore | ||
54 | + extends AbstractStore<ClusterMetadataEvent, ClusterMetadataStoreDelegate> | ||
55 | + implements ClusterMetadataStore { | ||
56 | + | ||
57 | + private final Logger log = getLogger(getClass()); | ||
58 | + private static final String CLUSTER_METADATA_FILE = "../config/cluster.json"; | ||
59 | + private static final int DEFAULT_ONOS_PORT = 9876; | ||
60 | + private final File metadataFile = new File(CLUSTER_METADATA_FILE); | ||
61 | + private AtomicReference<ClusterMetadata> metadata = new AtomicReference<>(); | ||
62 | + private ObjectMapper mapper; | ||
63 | + private long version; | ||
64 | + | ||
65 | + @Activate | ||
66 | + public void activate() { | ||
67 | + mapper = new ObjectMapper(); | ||
68 | + SimpleModule module = new SimpleModule(); | ||
69 | + module.addSerializer(NodeId.class, new NodeIdSerializer()); | ||
70 | + module.addDeserializer(NodeId.class, new NodeIdDeserializer()); | ||
71 | + module.addSerializer(ControllerNode.class, new ControllerNodeSerializer()); | ||
72 | + module.addDeserializer(ControllerNode.class, new ControllerNodeDeserializer()); | ||
73 | + mapper.registerModule(module); | ||
74 | + File metadataFile = new File(CLUSTER_METADATA_FILE); | ||
75 | + if (metadataFile.exists()) { | ||
76 | + try { | ||
77 | + metadata.set(mapper.readValue(metadataFile, ClusterMetadata.class)); | ||
78 | + version = metadataFile.lastModified(); | ||
79 | + } catch (IOException e) { | ||
80 | + Throwables.propagate(e); | ||
81 | + } | ||
82 | + } else { | ||
83 | + String localIp = getSiteLocalAddress(); | ||
84 | + ControllerNode localNode = | ||
85 | + new DefaultControllerNode(new NodeId(localIp), IpAddress.valueOf(localIp), DEFAULT_ONOS_PORT); | ||
86 | + metadata.set(ClusterMetadata.builder() | ||
87 | + .withName("default") | ||
88 | + .withControllerNodes(Arrays.asList(localNode)) | ||
89 | + .withPartitions(Lists.newArrayList(new Partition("p1", Lists.newArrayList(localNode.id())))) | ||
90 | + .build()); | ||
91 | + version = System.currentTimeMillis(); | ||
92 | + } | ||
93 | + log.info("Started"); | ||
94 | + } | ||
95 | + | ||
96 | + @Deactivate | ||
97 | + public void deactivate() { | ||
98 | + log.info("Stopped"); | ||
99 | + } | ||
100 | + | ||
101 | + @Override | ||
102 | + public void setDelegate(ClusterMetadataStoreDelegate delegate) { | ||
103 | + checkNotNull(delegate, "Delegate cannot be null"); | ||
104 | + this.delegate = delegate; | ||
105 | + } | ||
106 | + | ||
107 | + @Override | ||
108 | + public void unsetDelegate(ClusterMetadataStoreDelegate delegate) { | ||
109 | + this.delegate = null; | ||
110 | + } | ||
111 | + | ||
112 | + @Override | ||
113 | + public boolean hasDelegate() { | ||
114 | + return this.delegate != null; | ||
115 | + } | ||
116 | + | ||
117 | + @Override | ||
118 | + public Versioned<ClusterMetadata> getClusterMetadata() { | ||
119 | + return new Versioned<>(metadata.get(), version); | ||
120 | + } | ||
121 | + | ||
122 | + @Override | ||
123 | + public void setClusterMetadata(ClusterMetadata metadata) { | ||
124 | + checkNotNull(metadata); | ||
125 | + try { | ||
126 | + Files.createParentDirs(metadataFile); | ||
127 | + mapper.writeValue(metadataFile, metadata); | ||
128 | + this.metadata.set(metadata); | ||
129 | + } catch (IOException e) { | ||
130 | + Throwables.propagate(e); | ||
131 | + } | ||
132 | + } | ||
133 | + | ||
134 | + @Override | ||
135 | + public void setActiveReplica(String partitionId, NodeId nodeId) { | ||
136 | + throw new UnsupportedOperationException(); | ||
137 | + } | ||
138 | + | ||
139 | + @Override | ||
140 | + public void unsetActiveReplica(String partitionId, NodeId nodeId) { | ||
141 | + throw new UnsupportedOperationException(); | ||
142 | + } | ||
143 | + | ||
144 | + @Override | ||
145 | + public Collection<NodeId> getActiveReplicas(String partitionId) { | ||
146 | + return metadata.get().getPartitions() | ||
147 | + .stream() | ||
148 | + .filter(r -> r.getName().equals(partitionId)) | ||
149 | + .findFirst() | ||
150 | + .map(r -> r.getMembers()) | ||
151 | + .orElse(null); | ||
152 | + } | ||
153 | + | ||
154 | + private static class ControllerNodeSerializer extends JsonSerializer<ControllerNode> { | ||
155 | + @Override | ||
156 | + public void serialize(ControllerNode node, JsonGenerator jgen, SerializerProvider provider) | ||
157 | + throws IOException, JsonProcessingException { | ||
158 | + jgen.writeStartObject(); | ||
159 | + jgen.writeStringField("id", node.id().toString()); | ||
160 | + jgen.writeStringField("ip", node.ip().toString()); | ||
161 | + jgen.writeNumberField("port", node.tcpPort()); | ||
162 | + jgen.writeEndObject(); | ||
163 | + } | ||
164 | + } | ||
165 | + | ||
166 | + private static class ControllerNodeDeserializer extends JsonDeserializer<ControllerNode> { | ||
167 | + @Override | ||
168 | + public ControllerNode deserialize(JsonParser jp, DeserializationContext ctxt) | ||
169 | + throws IOException, JsonProcessingException { | ||
170 | + JsonNode node = jp.getCodec().readTree(jp); | ||
171 | + NodeId nodeId = new NodeId(node.get("id").textValue()); | ||
172 | + IpAddress ip = IpAddress.valueOf(node.get("ip").textValue()); | ||
173 | + int port = node.get("port").asInt(); | ||
174 | + return new DefaultControllerNode(nodeId, ip, port); | ||
175 | + } | ||
176 | + } | ||
177 | + | ||
178 | + private static class NodeIdSerializer extends JsonSerializer<NodeId> { | ||
179 | + @Override | ||
180 | + public void serialize(NodeId nodeId, JsonGenerator jgen, SerializerProvider provider) | ||
181 | + throws IOException, JsonProcessingException { | ||
182 | + jgen.writeString(nodeId.toString()); | ||
183 | + } | ||
184 | + } | ||
185 | + | ||
186 | + private class NodeIdDeserializer extends JsonDeserializer<NodeId> { | ||
187 | + @Override | ||
188 | + public NodeId deserialize(JsonParser jp, DeserializationContext ctxt) | ||
189 | + throws IOException, JsonProcessingException { | ||
190 | + JsonNode node = jp.getCodec().readTree(jp); | ||
191 | + return new NodeId(node.asText()); | ||
192 | + } | ||
193 | + } | ||
194 | + | ||
195 | + | ||
196 | + private static String getSiteLocalAddress() { | ||
197 | + Function<NetworkInterface, IpAddress> ipLookup = nif -> { | ||
198 | + for (InetAddress address : Collections.list(nif.getInetAddresses())) { | ||
199 | + if (address.isSiteLocalAddress()) { | ||
200 | + return IpAddress.valueOf(address); | ||
201 | + } | ||
202 | + } | ||
203 | + return null; | ||
204 | + }; | ||
205 | + try { | ||
206 | + IpAddress ip = ipLookup.apply(NetworkInterface.getByName("eth0")); | ||
207 | + if (ip != null) { | ||
208 | + return ip.toString(); | ||
209 | + } | ||
210 | + for (NetworkInterface nif : Collections.list(getNetworkInterfaces())) { | ||
211 | + ip = ipLookup.apply(nif); | ||
212 | + if (ip != null) { | ||
213 | + return ip.toString(); | ||
214 | + } | ||
215 | + } | ||
216 | + } catch (Exception e) { | ||
217 | + throw new IllegalStateException("Unable to get network interfaces", e); | ||
218 | + } | ||
219 | + return IpAddress.valueOf(InetAddress.getLoopbackAddress()).toString(); | ||
220 | + } | ||
221 | +} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
... | @@ -22,7 +22,7 @@ import org.apache.felix.scr.annotations.Reference; | ... | @@ -22,7 +22,7 @@ import org.apache.felix.scr.annotations.Reference; |
22 | import org.apache.felix.scr.annotations.ReferenceCardinality; | 22 | import org.apache.felix.scr.annotations.ReferenceCardinality; |
23 | import org.apache.felix.scr.annotations.Service; | 23 | import org.apache.felix.scr.annotations.Service; |
24 | import org.onlab.nio.service.IOLoopMessaging; | 24 | import org.onlab.nio.service.IOLoopMessaging; |
25 | -import org.onosproject.cluster.ClusterDefinitionService; | 25 | +import org.onosproject.cluster.ClusterMetadataService; |
26 | import org.onosproject.cluster.ControllerNode; | 26 | import org.onosproject.cluster.ControllerNode; |
27 | import org.onosproject.store.cluster.messaging.Endpoint; | 27 | import org.onosproject.store.cluster.messaging.Endpoint; |
28 | import org.slf4j.Logger; | 28 | import org.slf4j.Logger; |
... | @@ -38,11 +38,11 @@ public class IOLoopMessagingManager extends IOLoopMessaging { | ... | @@ -38,11 +38,11 @@ public class IOLoopMessagingManager extends IOLoopMessaging { |
38 | private final Logger log = LoggerFactory.getLogger(getClass()); | 38 | private final Logger log = LoggerFactory.getLogger(getClass()); |
39 | 39 | ||
40 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 40 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
41 | - protected ClusterDefinitionService clusterDefinitionService; | 41 | + protected ClusterMetadataService clusterMetadataService; |
42 | 42 | ||
43 | @Activate | 43 | @Activate |
44 | public void activate() throws Exception { | 44 | public void activate() throws Exception { |
45 | - ControllerNode localNode = clusterDefinitionService.localNode(); | 45 | + ControllerNode localNode = clusterMetadataService.getLocalNode(); |
46 | super.start(new Endpoint(localNode.ip(), localNode.tcpPort())); | 46 | super.start(new Endpoint(localNode.ip(), localNode.tcpPort())); |
47 | log.info("Started"); | 47 | log.info("Started"); |
48 | } | 48 | } | ... | ... |
... | @@ -16,6 +16,7 @@ | ... | @@ -16,6 +16,7 @@ |
16 | package org.onosproject.store.cluster.messaging.impl; | 16 | package org.onosproject.store.cluster.messaging.impl; |
17 | 17 | ||
18 | import com.google.common.base.Strings; | 18 | import com.google.common.base.Strings; |
19 | + | ||
19 | import org.apache.felix.scr.annotations.Activate; | 20 | import org.apache.felix.scr.annotations.Activate; |
20 | import org.apache.felix.scr.annotations.Component; | 21 | import org.apache.felix.scr.annotations.Component; |
21 | import org.apache.felix.scr.annotations.Deactivate; | 22 | import org.apache.felix.scr.annotations.Deactivate; |
... | @@ -23,7 +24,7 @@ import org.apache.felix.scr.annotations.Reference; | ... | @@ -23,7 +24,7 @@ import org.apache.felix.scr.annotations.Reference; |
23 | import org.apache.felix.scr.annotations.ReferenceCardinality; | 24 | import org.apache.felix.scr.annotations.ReferenceCardinality; |
24 | import org.apache.felix.scr.annotations.Service; | 25 | import org.apache.felix.scr.annotations.Service; |
25 | import org.onlab.netty.NettyMessaging; | 26 | import org.onlab.netty.NettyMessaging; |
26 | -import org.onosproject.cluster.ClusterDefinitionService; | 27 | +import org.onosproject.cluster.ClusterMetadataService; |
27 | import org.onosproject.cluster.ControllerNode; | 28 | import org.onosproject.cluster.ControllerNode; |
28 | import org.onosproject.store.cluster.messaging.Endpoint; | 29 | import org.onosproject.store.cluster.messaging.Endpoint; |
29 | import org.slf4j.Logger; | 30 | import org.slf4j.Logger; |
... | @@ -41,11 +42,11 @@ public class NettyMessagingManager extends NettyMessaging { | ... | @@ -41,11 +42,11 @@ public class NettyMessagingManager extends NettyMessaging { |
41 | private static final short MIN_KS_LENGTH = 6; | 42 | private static final short MIN_KS_LENGTH = 6; |
42 | 43 | ||
43 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 44 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
44 | - protected ClusterDefinitionService clusterDefinitionService; | 45 | + protected ClusterMetadataService clusterMetadataService; |
45 | 46 | ||
46 | @Activate | 47 | @Activate |
47 | public void activate() throws Exception { | 48 | public void activate() throws Exception { |
48 | - ControllerNode localNode = clusterDefinitionService.localNode(); | 49 | + ControllerNode localNode = clusterMetadataService.getLocalNode(); |
49 | getTLSParameters(); | 50 | getTLSParameters(); |
50 | super.start(new Endpoint(localNode.ip(), localNode.tcpPort())); | 51 | super.start(new Endpoint(localNode.ip(), localNode.tcpPort())); |
51 | log.info("Started"); | 52 | log.info("Started"); | ... | ... |
core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseDefinition.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2015 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.consistent.impl; | ||
17 | - | ||
18 | -import com.google.common.collect.ImmutableMap; | ||
19 | -import com.google.common.collect.ImmutableSet; | ||
20 | -import com.google.common.collect.Maps; | ||
21 | -import org.onosproject.store.cluster.impl.NodeInfo; | ||
22 | - | ||
23 | -import java.util.ArrayList; | ||
24 | -import java.util.Collections; | ||
25 | -import java.util.HashSet; | ||
26 | -import java.util.List; | ||
27 | -import java.util.Map; | ||
28 | -import java.util.Set; | ||
29 | - | ||
30 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
31 | - | ||
32 | -/** | ||
33 | - * Partitioned database configuration. | ||
34 | - */ | ||
35 | -public class DatabaseDefinition { | ||
36 | - private Map<String, Set<NodeInfo>> partitions; | ||
37 | - private Set<NodeInfo> nodes; | ||
38 | - | ||
39 | - /** | ||
40 | - * Creates a new DatabaseDefinition. | ||
41 | - * | ||
42 | - * @param partitions partition map | ||
43 | - * @param nodes set of nodes | ||
44 | - * @return database definition | ||
45 | - */ | ||
46 | - public static DatabaseDefinition from(Map<String, Set<NodeInfo>> partitions, | ||
47 | - Set<NodeInfo> nodes) { | ||
48 | - checkNotNull(partitions); | ||
49 | - checkNotNull(nodes); | ||
50 | - DatabaseDefinition definition = new DatabaseDefinition(); | ||
51 | - definition.partitions = ImmutableMap.copyOf(partitions); | ||
52 | - definition.nodes = ImmutableSet.copyOf(nodes); | ||
53 | - return definition; | ||
54 | - } | ||
55 | - | ||
56 | - /** | ||
57 | - * Creates a new DatabaseDefinition using default partitions. | ||
58 | - * | ||
59 | - * @param nodes set of nodes | ||
60 | - * @return database definition | ||
61 | - */ | ||
62 | - public static DatabaseDefinition from(Set<NodeInfo> nodes) { | ||
63 | - return from(generateDefaultPartitions(nodes), nodes); | ||
64 | - } | ||
65 | - | ||
66 | - /** | ||
67 | - * Returns the map of database partitions. | ||
68 | - * | ||
69 | - * @return db partition map | ||
70 | - */ | ||
71 | - public Map<String, Set<NodeInfo>> getPartitions() { | ||
72 | - return partitions; | ||
73 | - } | ||
74 | - | ||
75 | - /** | ||
76 | - * Returns the set of nodes. | ||
77 | - * | ||
78 | - * @return nodes | ||
79 | - */ | ||
80 | - public Set<NodeInfo> getNodes() { | ||
81 | - return nodes; | ||
82 | - } | ||
83 | - | ||
84 | - | ||
85 | - /** | ||
86 | - * Generates set of default partitions using permutations of the nodes. | ||
87 | - * | ||
88 | - * @param nodes information about cluster nodes | ||
89 | - * @return default partition map | ||
90 | - */ | ||
91 | - private static Map<String, Set<NodeInfo>> generateDefaultPartitions(Set<NodeInfo> nodes) { | ||
92 | - List<NodeInfo> sorted = new ArrayList<>(nodes); | ||
93 | - Collections.sort(sorted, (o1, o2) -> o1.getId().compareTo(o2.getId())); | ||
94 | - Map<String, Set<NodeInfo>> partitions = Maps.newHashMap(); | ||
95 | - | ||
96 | - int length = nodes.size(); | ||
97 | - int count = 3; | ||
98 | - for (int i = 0; i < length; i++) { | ||
99 | - Set<NodeInfo> set = new HashSet<>(count); | ||
100 | - for (int j = 0; j < count; j++) { | ||
101 | - set.add(sorted.get((i + j) % length)); | ||
102 | - } | ||
103 | - partitions.put("p" + (i + 1), set); | ||
104 | - } | ||
105 | - return partitions; | ||
106 | - } | ||
107 | - | ||
108 | -} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
1 | -/* | ||
2 | - * Copyright 2015 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | - | ||
17 | -package org.onosproject.store.consistent.impl; | ||
18 | - | ||
19 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
20 | -import java.io.File; | ||
21 | -import java.io.IOException; | ||
22 | -import com.fasterxml.jackson.databind.ObjectMapper; | ||
23 | -import com.google.common.io.Files; | ||
24 | - | ||
25 | -/** | ||
26 | - * Allows for reading and writing partitioned database definition as a JSON file. | ||
27 | - */ | ||
28 | -public class DatabaseDefinitionStore { | ||
29 | - | ||
30 | - private final File file; | ||
31 | - | ||
32 | - /** | ||
33 | - * Creates a reader/writer of the database definition file. | ||
34 | - * | ||
35 | - * @param filePath location of the definition file | ||
36 | - */ | ||
37 | - public DatabaseDefinitionStore(String filePath) { | ||
38 | - file = new File(checkNotNull(filePath)); | ||
39 | - } | ||
40 | - | ||
41 | - /** | ||
42 | - * Creates a reader/writer of the database definition file. | ||
43 | - * | ||
44 | - * @param filePath location of the definition file | ||
45 | - */ | ||
46 | - public DatabaseDefinitionStore(File filePath) { | ||
47 | - file = checkNotNull(filePath); | ||
48 | - } | ||
49 | - | ||
50 | - /** | ||
51 | - * Returns the database definition. | ||
52 | - * | ||
53 | - * @return database definition | ||
54 | - * @throws IOException when I/O exception of some sort has occurred. | ||
55 | - */ | ||
56 | - public DatabaseDefinition read() throws IOException { | ||
57 | - ObjectMapper mapper = new ObjectMapper(); | ||
58 | - return mapper.readValue(file, DatabaseDefinition.class); | ||
59 | - } | ||
60 | - | ||
61 | - /** | ||
62 | - * Writes the specified database definition to file. | ||
63 | - * | ||
64 | - * @param definition database definition | ||
65 | - * @throws IOException when I/O exception of some sort has occurred. | ||
66 | - */ | ||
67 | - public void write(DatabaseDefinition definition) throws IOException { | ||
68 | - checkNotNull(definition); | ||
69 | - // write back to file | ||
70 | - Files.createParentDirs(file); | ||
71 | - ObjectMapper mapper = new ObjectMapper(); | ||
72 | - mapper.writeValue(file, definition); | ||
73 | - } | ||
74 | -} |
... | @@ -18,7 +18,6 @@ package org.onosproject.store.consistent.impl; | ... | @@ -18,7 +18,6 @@ package org.onosproject.store.consistent.impl; |
18 | 18 | ||
19 | import com.google.common.collect.ArrayListMultimap; | 19 | import com.google.common.collect.ArrayListMultimap; |
20 | import com.google.common.collect.ImmutableList; | 20 | import com.google.common.collect.ImmutableList; |
21 | -import com.google.common.collect.ImmutableSet; | ||
22 | import com.google.common.collect.Lists; | 21 | import com.google.common.collect.Lists; |
23 | import com.google.common.collect.Maps; | 22 | import com.google.common.collect.Maps; |
24 | import com.google.common.collect.Multimap; | 23 | import com.google.common.collect.Multimap; |
... | @@ -50,12 +49,12 @@ import org.apache.felix.scr.annotations.Service; | ... | @@ -50,12 +49,12 @@ import org.apache.felix.scr.annotations.Service; |
50 | import org.onosproject.app.ApplicationEvent; | 49 | import org.onosproject.app.ApplicationEvent; |
51 | import org.onosproject.app.ApplicationListener; | 50 | import org.onosproject.app.ApplicationListener; |
52 | import org.onosproject.app.ApplicationService; | 51 | import org.onosproject.app.ApplicationService; |
52 | +import org.onosproject.cluster.ClusterMetadataService; | ||
53 | import org.onosproject.cluster.ClusterService; | 53 | import org.onosproject.cluster.ClusterService; |
54 | +import org.onosproject.cluster.ControllerNode; | ||
54 | import org.onosproject.cluster.NodeId; | 55 | import org.onosproject.cluster.NodeId; |
55 | import org.onosproject.core.ApplicationId; | 56 | import org.onosproject.core.ApplicationId; |
56 | import org.onosproject.core.IdGenerator; | 57 | import org.onosproject.core.IdGenerator; |
57 | -import org.onosproject.store.cluster.impl.ClusterDefinitionManager; | ||
58 | -import org.onosproject.store.cluster.impl.NodeInfo; | ||
59 | import org.onosproject.store.cluster.messaging.ClusterCommunicationService; | 58 | import org.onosproject.store.cluster.messaging.ClusterCommunicationService; |
60 | import org.onosproject.store.ecmap.EventuallyConsistentMapBuilderImpl; | 59 | import org.onosproject.store.ecmap.EventuallyConsistentMapBuilderImpl; |
61 | import org.onosproject.store.service.AtomicCounterBuilder; | 60 | import org.onosproject.store.service.AtomicCounterBuilder; |
... | @@ -73,8 +72,6 @@ import org.onosproject.store.service.Transaction; | ... | @@ -73,8 +72,6 @@ import org.onosproject.store.service.Transaction; |
73 | import org.onosproject.store.service.TransactionContextBuilder; | 72 | import org.onosproject.store.service.TransactionContextBuilder; |
74 | import org.slf4j.Logger; | 73 | import org.slf4j.Logger; |
75 | 74 | ||
76 | -import java.io.File; | ||
77 | -import java.io.IOException; | ||
78 | import java.util.Collection; | 75 | import java.util.Collection; |
79 | import java.util.List; | 76 | import java.util.List; |
80 | import java.util.Map; | 77 | import java.util.Map; |
... | @@ -99,8 +96,6 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -99,8 +96,6 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
99 | 96 | ||
100 | private final Logger log = getLogger(getClass()); | 97 | private final Logger log = getLogger(getClass()); |
101 | 98 | ||
102 | - public static final int COPYCAT_TCP_PORT = 9876; | ||
103 | - public static final String PARTITION_DEFINITION_FILE = "../config/tablets.json"; | ||
104 | public static final String BASE_PARTITION_NAME = "p0"; | 99 | public static final String BASE_PARTITION_NAME = "p0"; |
105 | 100 | ||
106 | private static final int RAFT_ELECTION_TIMEOUT_MILLIS = 3000; | 101 | private static final int RAFT_ELECTION_TIMEOUT_MILLIS = 3000; |
... | @@ -122,6 +117,9 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -122,6 +117,9 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
122 | Multimaps.synchronizedMultimap(ArrayListMultimap.create()); | 117 | Multimaps.synchronizedMultimap(ArrayListMultimap.create()); |
123 | 118 | ||
124 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 119 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
120 | + protected ClusterMetadataService clusterMetadataService; | ||
121 | + | ||
122 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
125 | protected ClusterService clusterService; | 123 | protected ClusterService clusterService; |
126 | 124 | ||
127 | @Reference(cardinality = ReferenceCardinality.OPTIONAL_UNARY, policy = ReferencePolicy.DYNAMIC) | 125 | @Reference(cardinality = ReferenceCardinality.OPTIONAL_UNARY, policy = ReferencePolicy.DYNAMIC) |
... | @@ -130,8 +128,9 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -130,8 +128,9 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
130 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 128 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
131 | protected ClusterCommunicationService clusterCommunicator; | 129 | protected ClusterCommunicationService clusterCommunicator; |
132 | 130 | ||
133 | - protected String nodeToUri(NodeInfo node) { | 131 | + protected String nodeIdToUri(NodeId nodeId) { |
134 | - return String.format("onos://%s:%d", node.getIp(), node.getTcpPort()); | 132 | + ControllerNode node = clusterService.getNode(nodeId); |
133 | + return String.format("onos://%s:%d", node.ip(), node.tcpPort()); | ||
135 | } | 134 | } |
136 | 135 | ||
137 | protected void bindApplicationService(ApplicationService service) { | 136 | protected void bindApplicationService(ApplicationService service) { |
... | @@ -147,30 +146,22 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -147,30 +146,22 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
147 | @Activate | 146 | @Activate |
148 | public void activate() { | 147 | public void activate() { |
149 | localNodeId = clusterService.getLocalNode().id(); | 148 | localNodeId = clusterService.getLocalNode().id(); |
150 | - // load database configuration | ||
151 | - File databaseDefFile = new File(PARTITION_DEFINITION_FILE); | ||
152 | - log.info("Loading database definition: {}", databaseDefFile.getAbsolutePath()); | ||
153 | 149 | ||
154 | - Map<String, Set<NodeInfo>> partitionMap; | 150 | + Map<String, Set<NodeId>> partitionMap = Maps.newHashMap(); |
155 | - try { | 151 | + clusterMetadataService.getClusterMetadata().getPartitions().forEach(p -> { |
156 | - DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(databaseDefFile); | 152 | + partitionMap.put(p.getName(), Sets.newHashSet(p.getMembers())); |
157 | - if (!databaseDefFile.exists()) { | 153 | + }); |
158 | - createDefaultDatabaseDefinition(databaseDefStore); | 154 | + |
159 | - } | ||
160 | - partitionMap = databaseDefStore.read().getPartitions(); | ||
161 | - } catch (IOException e) { | ||
162 | - throw new IllegalStateException("Failed to load database config", e); | ||
163 | - } | ||
164 | 155 | ||
165 | String[] activeNodeUris = partitionMap.values() | 156 | String[] activeNodeUris = partitionMap.values() |
166 | .stream() | 157 | .stream() |
167 | .reduce((s1, s2) -> Sets.union(s1, s2)) | 158 | .reduce((s1, s2) -> Sets.union(s1, s2)) |
168 | .get() | 159 | .get() |
169 | .stream() | 160 | .stream() |
170 | - .map(this::nodeToUri) | 161 | + .map(this::nodeIdToUri) |
171 | .toArray(String[]::new); | 162 | .toArray(String[]::new); |
172 | 163 | ||
173 | - String localNodeUri = nodeToUri(NodeInfo.of(clusterService.getLocalNode())); | 164 | + String localNodeUri = nodeIdToUri(clusterMetadataService.getLocalNode().id()); |
174 | Protocol protocol = new CopycatCommunicationProtocol(clusterService, clusterCommunicator); | 165 | Protocol protocol = new CopycatCommunicationProtocol(clusterService, clusterCommunicator); |
175 | 166 | ||
176 | ClusterConfig clusterConfig = new ClusterConfig() | 167 | ClusterConfig clusterConfig = new ClusterConfig() |
... | @@ -198,7 +189,7 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -198,7 +189,7 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
198 | List<Database> partitions = partitionMap.entrySet() | 189 | List<Database> partitions = partitionMap.entrySet() |
199 | .stream() | 190 | .stream() |
200 | .map(entry -> { | 191 | .map(entry -> { |
201 | - String[] replicas = entry.getValue().stream().map(this::nodeToUri).toArray(String[]::new); | 192 | + String[] replicas = entry.getValue().stream().map(this::nodeIdToUri).toArray(String[]::new); |
202 | return newDatabaseConfig(entry.getKey(), newPersistentLog(), replicas); | 193 | return newDatabaseConfig(entry.getKey(), newPersistentLog(), replicas); |
203 | }) | 194 | }) |
204 | .map(config -> { | 195 | .map(config -> { |
... | @@ -229,17 +220,6 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -229,17 +220,6 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
229 | log.info("Started"); | 220 | log.info("Started"); |
230 | } | 221 | } |
231 | 222 | ||
232 | - private void createDefaultDatabaseDefinition(DatabaseDefinitionStore store) { | ||
233 | - // Assumes IPv4 is returned. | ||
234 | - String ip = ClusterDefinitionManager.getSiteLocalAddress(); | ||
235 | - NodeInfo node = NodeInfo.from(ip, ip, COPYCAT_TCP_PORT); | ||
236 | - try { | ||
237 | - store.write(DatabaseDefinition.from(ImmutableSet.of(node))); | ||
238 | - } catch (IOException e) { | ||
239 | - log.warn("Unable to write default cluster definition", e); | ||
240 | - } | ||
241 | - } | ||
242 | - | ||
243 | @Deactivate | 223 | @Deactivate |
244 | public void deactivate() { | 224 | public void deactivate() { |
245 | CompletableFuture.allOf(inMemoryDatabase.close(), partitionedDatabase.close()) | 225 | CompletableFuture.allOf(inMemoryDatabase.close(), partitionedDatabase.close()) | ... | ... |
... | @@ -37,17 +37,6 @@ export ONOS_BOOT_FEATURES="${ONOS_BOOT_FEATURES:-webconsole,onos-api,onos-core,o | ... | @@ -37,17 +37,6 @@ export ONOS_BOOT_FEATURES="${ONOS_BOOT_FEATURES:-webconsole,onos-api,onos-core,o |
37 | # ONOS builtin apps and providers ignited by default | 37 | # ONOS builtin apps and providers ignited by default |
38 | export ONOS_APPS="${ONOS_APPS:-drivers,openflow}" | 38 | export ONOS_APPS="${ONOS_APPS:-drivers,openflow}" |
39 | 39 | ||
40 | -# Generate a cluster.json from the ON* environment variables | ||
41 | -CDEF_FILE=/tmp/${remote}.cluster.json | ||
42 | -echo "{ \"ipPrefix\": \"$ONOS_NIC\"," > $CDEF_FILE | ||
43 | -echo " \"nodes\":[" >> $CDEF_FILE | ||
44 | -for node in $(env | sort | egrep "OC[2-9]+" | cut -d= -f2); do | ||
45 | - echo " { \"id\": \"$node\", \"ip\": \"$node\", \"tcpPort\": 9876 }," >> $CDEF_FILE | ||
46 | -done | ||
47 | -echo " { \"id\": \"$OC1\", \"ip\": \"$OC1\", \"tcpPort\": 9876 }" >> $CDEF_FILE | ||
48 | -echo "]}" >> $CDEF_FILE | ||
49 | -scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/cluster.json | ||
50 | - | ||
51 | ssh $remote " | 40 | ssh $remote " |
52 | echo \"onos.ip = \$(sudo ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \ | 41 | echo \"onos.ip = \$(sudo ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \ |
53 | >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties | 42 | >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties |
... | @@ -66,10 +55,10 @@ ssh $remote " | ... | @@ -66,10 +55,10 @@ ssh $remote " |
66 | done | 55 | done |
67 | " | 56 | " |
68 | 57 | ||
69 | -# Generate a default tablets.json from the ON* environment variables | 58 | +# Generate a default cluster.json from the ON* environment variables |
70 | -TDEF_FILE=/tmp/${remote}.tablets.json | 59 | +CDEF_FILE=/tmp/${remote}.cluster.json |
71 | -onos-gen-partitions $TDEF_FILE | 60 | +onos-gen-partitions $CDEF_FILE |
72 | -scp -q $TDEF_FILE $remote:$ONOS_INSTALL_DIR/config/tablets.json | 61 | +scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/cluster.json |
73 | 62 | ||
74 | # Copy tools/package/config/ to remote | 63 | # Copy tools/package/config/ to remote |
75 | scp -qr ${ONOS_ROOT}/tools/package/config/ $remote:$ONOS_INSTALL_DIR/ | 64 | scp -qr ${ONOS_ROOT}/tools/package/config/ $remote:$ONOS_INSTALL_DIR/ | ... | ... |
... | @@ -23,22 +23,27 @@ def get_OC_vars(): | ... | @@ -23,22 +23,27 @@ def get_OC_vars(): |
23 | return sorted(vars, key=alphanum_key) | 23 | return sorted(vars, key=alphanum_key) |
24 | 24 | ||
25 | def get_nodes(vars, port=9876): | 25 | def get_nodes(vars, port=9876): |
26 | - node = lambda k: { 'id': k, 'ip': k, 'tcpPort': port } | 26 | + node = lambda k: { 'id': k, 'ip': k, 'port': port } |
27 | return [ node(environ[v]) for v in vars ] | 27 | return [ node(environ[v]) for v in vars ] |
28 | 28 | ||
29 | def generate_permutations(nodes, k): | 29 | def generate_permutations(nodes, k): |
30 | l = deque(nodes) | 30 | l = deque(nodes) |
31 | - perms = {} | 31 | + perms = [] |
32 | for i in range(1, len(nodes)+1): | 32 | for i in range(1, len(nodes)+1): |
33 | - perms['p%d' % i] = list(l)[:k] | 33 | + part = { |
34 | + 'name': 'p%d' % i, | ||
35 | + 'members': list(l)[:k] | ||
36 | + } | ||
37 | + perms.append(part) | ||
34 | l.rotate(-1) | 38 | l.rotate(-1) |
35 | - return OrderedDict(sorted(perms.iteritems(), key=lambda (k, v): alphanum_key(k))) | 39 | + return perms |
36 | 40 | ||
37 | if __name__ == '__main__': | 41 | if __name__ == '__main__': |
38 | vars = get_OC_vars() | 42 | vars = get_OC_vars() |
39 | nodes = get_nodes(vars) | 43 | nodes = get_nodes(vars) |
40 | - partitions = generate_permutations(nodes, 3) | 44 | + partitions = generate_permutations([v.get('id') for v in nodes], 3) |
41 | - data = { | 45 | + data = { |
46 | + 'name': 'default', | ||
42 | 'nodes': nodes, | 47 | 'nodes': nodes, |
43 | 'partitions': partitions | 48 | 'partitions': partitions |
44 | } | 49 | } | ... | ... |
... | @@ -84,10 +84,9 @@ public class ClusterWebResource extends AbstractWebResource { | ... | @@ -84,10 +84,9 @@ public class ClusterWebResource extends AbstractWebResource { |
84 | public Response formCluster(InputStream config) throws IOException { | 84 | public Response formCluster(InputStream config) throws IOException { |
85 | JsonCodec<ControllerNode> codec = codec(ControllerNode.class); | 85 | JsonCodec<ControllerNode> codec = codec(ControllerNode.class); |
86 | ObjectNode root = (ObjectNode) mapper().readTree(config); | 86 | ObjectNode root = (ObjectNode) mapper().readTree(config); |
87 | - String ipPrefix = root.path("ipPrefix").asText(); | ||
88 | 87 | ||
89 | List<ControllerNode> nodes = codec.decode((ArrayNode) root.path("nodes"), this); | 88 | List<ControllerNode> nodes = codec.decode((ArrayNode) root.path("nodes"), this); |
90 | - get(ClusterAdminService.class).formCluster(new HashSet<>(nodes), ipPrefix); | 89 | + get(ClusterAdminService.class).formCluster(new HashSet<>(nodes)); |
91 | 90 | ||
92 | return Response.ok().build(); | 91 | return Response.ok().build(); |
93 | } | 92 | } | ... | ... |
-
Please register or login to post a comment