Committed by
Ray Milkey
Added ability to form a cluster via REST API.
Change-Id: Ib71f6b4caed1b1c4b9db78596ee35bf5cab05184
Showing
19 changed files
with
366 additions
and
78 deletions
| ... | @@ -18,6 +18,7 @@ package org.onosproject.cli; | ... | @@ -18,6 +18,7 @@ package org.onosproject.cli; |
| 18 | import org.apache.karaf.shell.commands.Argument; | 18 | import org.apache.karaf.shell.commands.Argument; |
| 19 | import org.apache.karaf.shell.commands.Command; | 19 | import org.apache.karaf.shell.commands.Command; |
| 20 | import org.onosproject.cluster.ClusterAdminService; | 20 | import org.onosproject.cluster.ClusterAdminService; |
| 21 | +import org.onosproject.cluster.DefaultControllerNode; | ||
| 21 | import org.onosproject.cluster.NodeId; | 22 | import org.onosproject.cluster.NodeId; |
| 22 | import org.onlab.packet.IpAddress; | 23 | import org.onlab.packet.IpAddress; |
| 23 | 24 | ||
| ... | @@ -38,7 +39,7 @@ public class NodeAddCommand extends AbstractShellCommand { | ... | @@ -38,7 +39,7 @@ public class NodeAddCommand extends AbstractShellCommand { |
| 38 | 39 | ||
| 39 | @Argument(index = 2, name = "tcpPort", description = "Node TCP listen port", | 40 | @Argument(index = 2, name = "tcpPort", description = "Node TCP listen port", |
| 40 | required = false, multiValued = false) | 41 | required = false, multiValued = false) |
| 41 | - int tcpPort = 9876; | 42 | + int tcpPort = DefaultControllerNode.DEFAULT_PORT; |
| 42 | 43 | ||
| 43 | @Override | 44 | @Override |
| 44 | protected void execute() { | 45 | protected void execute() { | ... | ... |
| ... | @@ -17,12 +17,24 @@ package org.onosproject.cluster; | ... | @@ -17,12 +17,24 @@ package org.onosproject.cluster; |
| 17 | 17 | ||
| 18 | import org.onlab.packet.IpAddress; | 18 | import org.onlab.packet.IpAddress; |
| 19 | 19 | ||
| 20 | +import java.util.Set; | ||
| 21 | + | ||
| 20 | /** | 22 | /** |
| 21 | * Service for administering the cluster node membership. | 23 | * Service for administering the cluster node membership. |
| 22 | */ | 24 | */ |
| 23 | public interface ClusterAdminService { | 25 | public interface ClusterAdminService { |
| 24 | 26 | ||
| 25 | /** | 27 | /** |
| 28 | + * Forms cluster configuration based on the specified set of node | ||
| 29 | + * information. This method resets and restarts the controller | ||
| 30 | + * instance. | ||
| 31 | + * | ||
| 32 | + * @param nodes set of nodes that form the cluster | ||
| 33 | + * @param ipPrefix IP address prefix, e.g. 10.0.1.* | ||
| 34 | + */ | ||
| 35 | + void formCluster(Set<ControllerNode> nodes, String ipPrefix); | ||
| 36 | + | ||
| 37 | + /** | ||
| 26 | * Adds a new controller node to the cluster. | 38 | * Adds a new controller node to the cluster. |
| 27 | * | 39 | * |
| 28 | * @param nodeId controller node identifier | 40 | * @param nodeId controller node identifier | ... | ... |
| ... | @@ -15,12 +15,12 @@ | ... | @@ -15,12 +15,12 @@ |
| 15 | */ | 15 | */ |
| 16 | package org.onosproject.cluster; | 16 | package org.onosproject.cluster; |
| 17 | 17 | ||
| 18 | -import java.util.Set; | ||
| 19 | - | ||
| 20 | import org.joda.time.DateTime; | 18 | import org.joda.time.DateTime; |
| 21 | import org.onlab.packet.IpAddress; | 19 | import org.onlab.packet.IpAddress; |
| 22 | import org.onosproject.store.Store; | 20 | import org.onosproject.store.Store; |
| 23 | 21 | ||
| 22 | +import java.util.Set; | ||
| 23 | + | ||
| 24 | /** | 24 | /** |
| 25 | * Manages inventory of controller cluster nodes; not intended for direct use. | 25 | * Manages inventory of controller cluster nodes; not intended for direct use. |
| 26 | */ | 26 | */ |
| ... | @@ -65,6 +65,16 @@ public interface ClusterStore extends Store<ClusterEvent, ClusterStoreDelegate> | ... | @@ -65,6 +65,16 @@ public interface ClusterStore extends Store<ClusterEvent, ClusterStoreDelegate> |
| 65 | DateTime getLastUpdated(NodeId nodeId); | 65 | DateTime getLastUpdated(NodeId nodeId); |
| 66 | 66 | ||
| 67 | /** | 67 | /** |
| 68 | + * Forms cluster configuration based on the specified set of node | ||
| 69 | + * information. Assumes subsequent restart for the new configuration to | ||
| 70 | + * take hold. | ||
| 71 | + * | ||
| 72 | + * @param nodes set of nodes that form the cluster | ||
| 73 | + * @param ipPrefix IP address prefix, e.g. 10.0.1.* | ||
| 74 | + */ | ||
| 75 | + void formCluster(Set<ControllerNode> nodes, String ipPrefix); | ||
| 76 | + | ||
| 77 | + /** | ||
| 68 | * Adds a new controller node to the cluster. | 78 | * Adds a new controller node to the cluster. |
| 69 | * | 79 | * |
| 70 | * @param nodeId controller node identifier | 80 | * @param nodeId controller node identifier | ... | ... |
| ... | @@ -26,7 +26,7 @@ import static com.google.common.base.MoreObjects.toStringHelper; | ... | @@ -26,7 +26,7 @@ import static com.google.common.base.MoreObjects.toStringHelper; |
| 26 | */ | 26 | */ |
| 27 | public class DefaultControllerNode implements ControllerNode { | 27 | public class DefaultControllerNode implements ControllerNode { |
| 28 | 28 | ||
| 29 | - private static final int DEFAULT_PORT = 9876; | 29 | + public static final int DEFAULT_PORT = 9876; |
| 30 | 30 | ||
| 31 | private final NodeId id; | 31 | private final NodeId id; |
| 32 | private final IpAddress ip; | 32 | private final IpAddress ip; | ... | ... |
| ... | @@ -15,15 +15,13 @@ | ... | @@ -15,15 +15,13 @@ |
| 15 | */ | 15 | */ |
| 16 | package org.onosproject.codec.impl; | 16 | package org.onosproject.codec.impl; |
| 17 | 17 | ||
| 18 | -import java.util.Map; | 18 | +import com.google.common.collect.ImmutableSet; |
| 19 | -import java.util.Set; | ||
| 20 | -import java.util.concurrent.ConcurrentHashMap; | ||
| 21 | - | ||
| 22 | import org.apache.felix.scr.annotations.Activate; | 19 | import org.apache.felix.scr.annotations.Activate; |
| 23 | import org.apache.felix.scr.annotations.Component; | 20 | import org.apache.felix.scr.annotations.Component; |
| 24 | import org.apache.felix.scr.annotations.Deactivate; | 21 | import org.apache.felix.scr.annotations.Deactivate; |
| 25 | import org.apache.felix.scr.annotations.Service; | 22 | import org.apache.felix.scr.annotations.Service; |
| 26 | import org.onlab.packet.Ethernet; | 23 | import org.onlab.packet.Ethernet; |
| 24 | +import org.onosproject.cluster.ControllerNode; | ||
| 27 | import org.onosproject.codec.CodecService; | 25 | import org.onosproject.codec.CodecService; |
| 28 | import org.onosproject.codec.JsonCodec; | 26 | import org.onosproject.codec.JsonCodec; |
| 29 | import org.onosproject.core.Application; | 27 | import org.onosproject.core.Application; |
| ... | @@ -50,7 +48,9 @@ import org.onosproject.net.topology.TopologyCluster; | ... | @@ -50,7 +48,9 @@ import org.onosproject.net.topology.TopologyCluster; |
| 50 | import org.slf4j.Logger; | 48 | import org.slf4j.Logger; |
| 51 | import org.slf4j.LoggerFactory; | 49 | import org.slf4j.LoggerFactory; |
| 52 | 50 | ||
| 53 | -import com.google.common.collect.ImmutableSet; | 51 | +import java.util.Map; |
| 52 | +import java.util.Set; | ||
| 53 | +import java.util.concurrent.ConcurrentHashMap; | ||
| 54 | 54 | ||
| 55 | /** | 55 | /** |
| 56 | * Implementation of the JSON codec brokering service. | 56 | * Implementation of the JSON codec brokering service. |
| ... | @@ -67,6 +67,7 @@ public class CodecManager implements CodecService { | ... | @@ -67,6 +67,7 @@ public class CodecManager implements CodecService { |
| 67 | public void activate() { | 67 | public void activate() { |
| 68 | codecs.clear(); | 68 | codecs.clear(); |
| 69 | registerCodec(Application.class, new ApplicationCodec()); | 69 | registerCodec(Application.class, new ApplicationCodec()); |
| 70 | + registerCodec(ControllerNode.class, new ControllerNodeCodec()); | ||
| 70 | registerCodec(Annotations.class, new AnnotationsCodec()); | 71 | registerCodec(Annotations.class, new AnnotationsCodec()); |
| 71 | registerCodec(Device.class, new DeviceCodec()); | 72 | registerCodec(Device.class, new DeviceCodec()); |
| 72 | registerCodec(Port.class, new PortCodec()); | 73 | registerCodec(Port.class, new PortCodec()); | ... | ... |
| 1 | +/* | ||
| 2 | + * Copyright 2015 Open Networking Laboratory | ||
| 3 | + * | ||
| 4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | ||
| 5 | + * you may not use this file except in compliance with the License. | ||
| 6 | + * You may obtain a copy of the License at | ||
| 7 | + * | ||
| 8 | + * http://www.apache.org/licenses/LICENSE-2.0 | ||
| 9 | + * | ||
| 10 | + * Unless required by applicable law or agreed to in writing, software | ||
| 11 | + * distributed under the License is distributed on an "AS IS" BASIS, | ||
| 12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| 13 | + * See the License for the specific language governing permissions and | ||
| 14 | + * limitations under the License. | ||
| 15 | + */ | ||
| 16 | +package org.onosproject.codec.impl; | ||
| 17 | + | ||
| 18 | +import com.fasterxml.jackson.databind.node.ObjectNode; | ||
| 19 | +import org.onlab.packet.IpAddress; | ||
| 20 | +import org.onosproject.cluster.ClusterService; | ||
| 21 | +import org.onosproject.cluster.ControllerNode; | ||
| 22 | +import org.onosproject.cluster.DefaultControllerNode; | ||
| 23 | +import org.onosproject.cluster.NodeId; | ||
| 24 | +import org.onosproject.codec.CodecContext; | ||
| 25 | +import org.onosproject.codec.JsonCodec; | ||
| 26 | + | ||
| 27 | +import static com.google.common.base.Preconditions.checkNotNull; | ||
| 28 | +import static org.onosproject.cluster.DefaultControllerNode.DEFAULT_PORT; | ||
| 29 | + | ||
| 30 | +/** | ||
| 31 | + * Device JSON codec. | ||
| 32 | + */ | ||
| 33 | +public final class ControllerNodeCodec extends JsonCodec<ControllerNode> { | ||
| 34 | + | ||
| 35 | + @Override | ||
| 36 | + public ObjectNode encode(ControllerNode node, CodecContext context) { | ||
| 37 | + checkNotNull(node, "Controller node cannot be null"); | ||
| 38 | + ClusterService service = context.get(ClusterService.class); | ||
| 39 | + return context.mapper().createObjectNode() | ||
| 40 | + .put("id", node.id().toString()) | ||
| 41 | + .put("ip", node.ip().toString()) | ||
| 42 | + .put("tcpPort", node.tcpPort()) | ||
| 43 | + .put("status", service.getState(node.id()).toString()); | ||
| 44 | + } | ||
| 45 | + | ||
| 46 | + | ||
| 47 | + @Override | ||
| 48 | + public ControllerNode decode(ObjectNode json, CodecContext context) { | ||
| 49 | + checkNotNull(json, "JSON cannot be null"); | ||
| 50 | + String ip = json.path("ip").asText(); | ||
| 51 | + return new DefaultControllerNode(new NodeId(json.path("id").asText(ip)), | ||
| 52 | + IpAddress.valueOf(ip), | ||
| 53 | + json.path("tcpPort").asInt(DEFAULT_PORT)); | ||
| 54 | + } | ||
| 55 | + | ||
| 56 | + | ||
| 57 | +} |
| ... | @@ -82,6 +82,11 @@ | ... | @@ -82,6 +82,11 @@ |
| 82 | <groupId>org.apache.karaf.features</groupId> | 82 | <groupId>org.apache.karaf.features</groupId> |
| 83 | <artifactId>org.apache.karaf.features.core</artifactId> | 83 | <artifactId>org.apache.karaf.features.core</artifactId> |
| 84 | </dependency> | 84 | </dependency> |
| 85 | + | ||
| 86 | + <dependency> | ||
| 87 | + <groupId>org.apache.karaf.system</groupId> | ||
| 88 | + <artifactId>org.apache.karaf.system.core</artifactId> | ||
| 89 | + </dependency> | ||
| 85 | </dependencies> | 90 | </dependencies> |
| 86 | 91 | ||
| 87 | <build> | 92 | <build> | ... | ... |
| ... | @@ -15,18 +15,13 @@ | ... | @@ -15,18 +15,13 @@ |
| 15 | */ | 15 | */ |
| 16 | package org.onosproject.cluster.impl; | 16 | package org.onosproject.cluster.impl; |
| 17 | 17 | ||
| 18 | -import static com.google.common.base.Preconditions.checkArgument; | ||
| 19 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
| 20 | -import static org.slf4j.LoggerFactory.getLogger; | ||
| 21 | - | ||
| 22 | -import java.util.Set; | ||
| 23 | - | ||
| 24 | import org.apache.felix.scr.annotations.Activate; | 18 | import org.apache.felix.scr.annotations.Activate; |
| 25 | import org.apache.felix.scr.annotations.Component; | 19 | import org.apache.felix.scr.annotations.Component; |
| 26 | import org.apache.felix.scr.annotations.Deactivate; | 20 | import org.apache.felix.scr.annotations.Deactivate; |
| 27 | import org.apache.felix.scr.annotations.Reference; | 21 | import org.apache.felix.scr.annotations.Reference; |
| 28 | import org.apache.felix.scr.annotations.ReferenceCardinality; | 22 | import org.apache.felix.scr.annotations.ReferenceCardinality; |
| 29 | import org.apache.felix.scr.annotations.Service; | 23 | import org.apache.felix.scr.annotations.Service; |
| 24 | +import org.apache.karaf.system.SystemService; | ||
| 30 | import org.joda.time.DateTime; | 25 | import org.joda.time.DateTime; |
| 31 | import org.onlab.packet.IpAddress; | 26 | import org.onlab.packet.IpAddress; |
| 32 | import org.onosproject.cluster.ClusterAdminService; | 27 | import org.onosproject.cluster.ClusterAdminService; |
| ... | @@ -41,6 +36,12 @@ import org.onosproject.event.AbstractListenerRegistry; | ... | @@ -41,6 +36,12 @@ import org.onosproject.event.AbstractListenerRegistry; |
| 41 | import org.onosproject.event.EventDeliveryService; | 36 | import org.onosproject.event.EventDeliveryService; |
| 42 | import org.slf4j.Logger; | 37 | import org.slf4j.Logger; |
| 43 | 38 | ||
| 39 | +import java.util.Set; | ||
| 40 | + | ||
| 41 | +import static com.google.common.base.Preconditions.checkArgument; | ||
| 42 | +import static com.google.common.base.Preconditions.checkNotNull; | ||
| 43 | +import static org.slf4j.LoggerFactory.getLogger; | ||
| 44 | + | ||
| 44 | /** | 45 | /** |
| 45 | * Implementation of the cluster service. | 46 | * Implementation of the cluster service. |
| 46 | */ | 47 | */ |
| ... | @@ -62,6 +63,9 @@ public class ClusterManager implements ClusterService, ClusterAdminService { | ... | @@ -62,6 +63,9 @@ public class ClusterManager implements ClusterService, ClusterAdminService { |
| 62 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 63 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
| 63 | protected EventDeliveryService eventDispatcher; | 64 | protected EventDeliveryService eventDispatcher; |
| 64 | 65 | ||
| 66 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
| 67 | + protected SystemService systemService; | ||
| 68 | + | ||
| 65 | @Activate | 69 | @Activate |
| 66 | public void activate() { | 70 | public void activate() { |
| 67 | store.setDelegate(delegate); | 71 | store.setDelegate(delegate); |
| ... | @@ -105,6 +109,20 @@ public class ClusterManager implements ClusterService, ClusterAdminService { | ... | @@ -105,6 +109,20 @@ public class ClusterManager implements ClusterService, ClusterAdminService { |
| 105 | } | 109 | } |
| 106 | 110 | ||
| 107 | @Override | 111 | @Override |
| 112 | + public void formCluster(Set<ControllerNode> nodes, String ipPrefix) { | ||
| 113 | + checkNotNull(nodes, "Nodes cannot be null"); | ||
| 114 | + checkArgument(!nodes.isEmpty(), "Nodes cannot be empty"); | ||
| 115 | + checkNotNull(ipPrefix, "IP prefix cannot be null"); | ||
| 116 | + store.formCluster(nodes, ipPrefix); | ||
| 117 | + try { | ||
| 118 | + log.warn("Shutting down container for cluster reconfiguration!"); | ||
| 119 | + systemService.reboot("now", SystemService.Swipe.NONE); | ||
| 120 | + } catch (Exception e) { | ||
| 121 | + log.error("Unable to reboot container", e); | ||
| 122 | + } | ||
| 123 | + } | ||
| 124 | + | ||
| 125 | + @Override | ||
| 108 | public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) { | 126 | public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) { |
| 109 | checkNotNull(nodeId, INSTANCE_ID_NULL); | 127 | checkNotNull(nodeId, INSTANCE_ID_NULL); |
| 110 | checkNotNull(ip, "IP address cannot be null"); | 128 | checkNotNull(ip, "IP address cannot be null"); | ... | ... |
| ... | @@ -18,6 +18,8 @@ package org.onosproject.store.cluster.impl; | ... | @@ -18,6 +18,8 @@ package org.onosproject.store.cluster.impl; |
| 18 | import static com.google.common.base.Preconditions.checkNotNull; | 18 | import static com.google.common.base.Preconditions.checkNotNull; |
| 19 | 19 | ||
| 20 | import com.fasterxml.jackson.databind.ObjectMapper; | 20 | import com.fasterxml.jackson.databind.ObjectMapper; |
| 21 | +import com.google.common.io.Files; | ||
| 22 | + | ||
| 21 | import java.io.File; | 23 | import java.io.File; |
| 22 | import java.io.IOException; | 24 | import java.io.IOException; |
| 23 | 25 | ||
| ... | @@ -43,8 +45,7 @@ public class ClusterDefinitionStore { | ... | @@ -43,8 +45,7 @@ public class ClusterDefinitionStore { |
| 43 | */ | 45 | */ |
| 44 | public ClusterDefinition read() throws IOException { | 46 | public ClusterDefinition read() throws IOException { |
| 45 | ObjectMapper mapper = new ObjectMapper(); | 47 | ObjectMapper mapper = new ObjectMapper(); |
| 46 | - ClusterDefinition definition = mapper.readValue(file, ClusterDefinition.class); | 48 | + return mapper.readValue(file, ClusterDefinition.class); |
| 47 | - return definition; | ||
| 48 | } | 49 | } |
| 49 | 50 | ||
| 50 | /** | 51 | /** |
| ... | @@ -55,7 +56,8 @@ public class ClusterDefinitionStore { | ... | @@ -55,7 +56,8 @@ public class ClusterDefinitionStore { |
| 55 | public void write(ClusterDefinition definition) throws IOException { | 56 | public void write(ClusterDefinition definition) throws IOException { |
| 56 | checkNotNull(definition); | 57 | checkNotNull(definition); |
| 57 | // write back to file | 58 | // write back to file |
| 58 | - final ObjectMapper mapper = new ObjectMapper(); | 59 | + Files.createParentDirs(file); |
| 60 | + ObjectMapper mapper = new ObjectMapper(); | ||
| 59 | mapper.writeValue(file, definition); | 61 | mapper.writeValue(file, definition); |
| 60 | } | 62 | } |
| 61 | } | 63 | } |
| ... | \ No newline at end of file | ... | \ No newline at end of file | ... | ... |
| ... | @@ -17,6 +17,7 @@ package org.onosproject.store.cluster.impl; | ... | @@ -17,6 +17,7 @@ package org.onosproject.store.cluster.impl; |
| 17 | 17 | ||
| 18 | import com.google.common.collect.ImmutableSet; | 18 | import com.google.common.collect.ImmutableSet; |
| 19 | import com.google.common.collect.Maps; | 19 | import com.google.common.collect.Maps; |
| 20 | +import com.google.common.collect.Sets; | ||
| 20 | import com.hazelcast.util.AddressUtil; | 21 | import com.hazelcast.util.AddressUtil; |
| 21 | import org.apache.felix.scr.annotations.Activate; | 22 | import org.apache.felix.scr.annotations.Activate; |
| 22 | import org.apache.felix.scr.annotations.Component; | 23 | import org.apache.felix.scr.annotations.Component; |
| ... | @@ -37,6 +38,8 @@ import org.onosproject.cluster.ControllerNode.State; | ... | @@ -37,6 +38,8 @@ import org.onosproject.cluster.ControllerNode.State; |
| 37 | import org.onosproject.cluster.DefaultControllerNode; | 38 | import org.onosproject.cluster.DefaultControllerNode; |
| 38 | import org.onosproject.cluster.NodeId; | 39 | import org.onosproject.cluster.NodeId; |
| 39 | import org.onosproject.store.AbstractStore; | 40 | import org.onosproject.store.AbstractStore; |
| 41 | +import org.onosproject.store.consistent.impl.DatabaseDefinition; | ||
| 42 | +import org.onosproject.store.consistent.impl.DatabaseDefinitionStore; | ||
| 40 | import org.onosproject.store.serializers.KryoNamespaces; | 43 | import org.onosproject.store.serializers.KryoNamespaces; |
| 41 | import org.onosproject.store.serializers.KryoSerializer; | 44 | import org.onosproject.store.serializers.KryoSerializer; |
| 42 | import org.slf4j.Logger; | 45 | import org.slf4j.Logger; |
| ... | @@ -55,11 +58,13 @@ import java.util.concurrent.ScheduledExecutorService; | ... | @@ -55,11 +58,13 @@ import java.util.concurrent.ScheduledExecutorService; |
| 55 | import java.util.concurrent.TimeUnit; | 58 | import java.util.concurrent.TimeUnit; |
| 56 | import java.util.stream.Collectors; | 59 | import java.util.stream.Collectors; |
| 57 | 60 | ||
| 58 | -import static com.google.common.base.Preconditions.checkArgument; | ||
| 59 | import static com.google.common.base.Preconditions.checkNotNull; | 61 | import static com.google.common.base.Preconditions.checkNotNull; |
| 62 | +import static com.hazelcast.util.AddressUtil.matchInterface; | ||
| 60 | import static java.net.NetworkInterface.getNetworkInterfaces; | 63 | import static java.net.NetworkInterface.getNetworkInterfaces; |
| 61 | import static java.util.Collections.list; | 64 | import static java.util.Collections.list; |
| 62 | import static org.onlab.util.Tools.groupedThreads; | 65 | import static org.onlab.util.Tools.groupedThreads; |
| 66 | +import static org.onosproject.cluster.DefaultControllerNode.DEFAULT_PORT; | ||
| 67 | +import static org.onosproject.store.consistent.impl.DatabaseManager.PARTITION_DEFINITION_FILE; | ||
| 63 | import static org.slf4j.LoggerFactory.getLogger; | 68 | import static org.slf4j.LoggerFactory.getLogger; |
| 64 | 69 | ||
| 65 | @Component(immediate = true) | 70 | @Component(immediate = true) |
| ... | @@ -74,17 +79,14 @@ public class DistributedClusterStore | ... | @@ -74,17 +79,14 @@ public class DistributedClusterStore |
| 74 | 79 | ||
| 75 | private static final Logger log = getLogger(DistributedClusterStore.class); | 80 | private static final Logger log = getLogger(DistributedClusterStore.class); |
| 76 | 81 | ||
| 82 | + public static final String CLUSTER_DEFINITION_FILE = "../config/cluster.json"; | ||
| 83 | + public static final String HEARTBEAT_MESSAGE = "onos-cluster-heartbeat"; | ||
| 84 | + | ||
| 77 | // TODO: make these configurable. | 85 | // TODO: make these configurable. |
| 78 | private static final int HEARTBEAT_FD_PORT = 2419; | 86 | private static final int HEARTBEAT_FD_PORT = 2419; |
| 79 | private static final int HEARTBEAT_INTERVAL_MS = 100; | 87 | private static final int HEARTBEAT_INTERVAL_MS = 100; |
| 80 | private static final int PHI_FAILURE_THRESHOLD = 10; | 88 | private static final int PHI_FAILURE_THRESHOLD = 10; |
| 81 | 89 | ||
| 82 | - private static final String CONFIG_DIR = "../config"; | ||
| 83 | - private static final String CLUSTER_DEFINITION_FILE = "cluster.json"; | ||
| 84 | - private static final String HEARTBEAT_MESSAGE = "onos-cluster-heartbeat"; | ||
| 85 | - | ||
| 86 | - public static final int DEFAULT_PORT = 9876; | ||
| 87 | - | ||
| 88 | private static final KryoSerializer SERIALIZER = new KryoSerializer() { | 90 | private static final KryoSerializer SERIALIZER = new KryoSerializer() { |
| 89 | @Override | 91 | @Override |
| 90 | protected void setupKryoPool() { | 92 | protected void setupKryoPool() { |
| ... | @@ -97,6 +99,8 @@ public class DistributedClusterStore | ... | @@ -97,6 +99,8 @@ public class DistributedClusterStore |
| 97 | }; | 99 | }; |
| 98 | 100 | ||
| 99 | private static final String INSTANCE_ID_NULL = "Instance ID cannot be null"; | 101 | private static final String INSTANCE_ID_NULL = "Instance ID cannot be null"; |
| 102 | + private static final byte SITE_LOCAL_BYTE = (byte) 0xC0; | ||
| 103 | + private static final String ONOS_NIC = "ONOS_NIC"; | ||
| 100 | 104 | ||
| 101 | private ClusterDefinition clusterDefinition; | 105 | private ClusterDefinition clusterDefinition; |
| 102 | 106 | ||
| ... | @@ -116,7 +120,7 @@ public class DistributedClusterStore | ... | @@ -116,7 +120,7 @@ public class DistributedClusterStore |
| 116 | 120 | ||
| 117 | @Activate | 121 | @Activate |
| 118 | public void activate() { | 122 | public void activate() { |
| 119 | - File clusterDefinitionFile = new File(CONFIG_DIR, CLUSTER_DEFINITION_FILE); | 123 | + File clusterDefinitionFile = new File(CLUSTER_DEFINITION_FILE); |
| 120 | ClusterDefinitionStore clusterDefinitionStore = | 124 | ClusterDefinitionStore clusterDefinitionStore = |
| 121 | new ClusterDefinitionStore(clusterDefinitionFile.getPath()); | 125 | new ClusterDefinitionStore(clusterDefinitionFile.getPath()); |
| 122 | 126 | ||
| ... | @@ -129,13 +133,12 @@ public class DistributedClusterStore | ... | @@ -129,13 +133,12 @@ public class DistributedClusterStore |
| 129 | seedNodes = ImmutableSet | 133 | seedNodes = ImmutableSet |
| 130 | .copyOf(clusterDefinition.getNodes()) | 134 | .copyOf(clusterDefinition.getNodes()) |
| 131 | .stream() | 135 | .stream() |
| 132 | - .map(nodeInfo -> new DefaultControllerNode(new NodeId(nodeInfo.getId()), | 136 | + .map(n -> new DefaultControllerNode(new NodeId(n.getId()), |
| 133 | - IpAddress.valueOf(nodeInfo.getIp()), | 137 | + IpAddress.valueOf(n.getIp()), |
| 134 | - nodeInfo.getTcpPort())) | 138 | + n.getTcpPort())) |
| 135 | .collect(Collectors.toSet()); | 139 | .collect(Collectors.toSet()); |
| 136 | } catch (IOException e) { | 140 | } catch (IOException e) { |
| 137 | - throw new IllegalStateException( | 141 | + throw new IllegalStateException("Failed to read cluster definition.", e); |
| 138 | - "Failed to read cluster definition.", e); | ||
| 139 | } | 142 | } |
| 140 | 143 | ||
| 141 | seedNodes.forEach(node -> { | 144 | seedNodes.forEach(node -> { |
| ... | @@ -179,26 +182,30 @@ public class DistributedClusterStore | ... | @@ -179,26 +182,30 @@ public class DistributedClusterStore |
| 179 | } | 182 | } |
| 180 | 183 | ||
| 181 | /** | 184 | /** |
| 182 | - * Returns the site local address if one can be found, loopback otherwise. | 185 | + * Returns the address that matches the IP prefix given in ONOS_NIC |
| 186 | + * environment variable if one was specified, or the first site local | ||
| 187 | + * address if one can be found or the loopback address otherwise. | ||
| 183 | * | 188 | * |
| 184 | * @return site-local address in string form | 189 | * @return site-local address in string form |
| 185 | */ | 190 | */ |
| 186 | public static String getSiteLocalAddress() { | 191 | public static String getSiteLocalAddress() { |
| 187 | try { | 192 | try { |
| 193 | + String ipPrefix = System.getenv(ONOS_NIC); | ||
| 188 | for (NetworkInterface nif : list(getNetworkInterfaces())) { | 194 | for (NetworkInterface nif : list(getNetworkInterfaces())) { |
| 189 | for (InetAddress address : list(nif.getInetAddresses())) { | 195 | for (InetAddress address : list(nif.getInetAddresses())) { |
| 190 | - if (address.getAddress()[0] == (byte) 0xC0) { | 196 | + IpAddress ip = IpAddress.valueOf(address); |
| 191 | - return address.toString().substring(1); | 197 | + if (ipPrefix == null && address.isSiteLocalAddress() || |
| 198 | + ipPrefix != null && matchInterface(ip.toString(), ipPrefix)) { | ||
| 199 | + return ip.toString(); | ||
| 192 | } | 200 | } |
| 193 | } | 201 | } |
| 194 | } | 202 | } |
| 195 | - return InetAddress.getLoopbackAddress().toString().substring(1); | ||
| 196 | 203 | ||
| 197 | } catch (SocketException e) { | 204 | } catch (SocketException e) { |
| 198 | log.error("Unable to get network interfaces", e); | 205 | log.error("Unable to get network interfaces", e); |
| 199 | } | 206 | } |
| 200 | 207 | ||
| 201 | - return null; | 208 | + return IpAddress.valueOf(InetAddress.getLoopbackAddress()).toString(); |
| 202 | } | 209 | } |
| 203 | 210 | ||
| 204 | @Deactivate | 211 | @Deactivate |
| ... | @@ -255,9 +262,6 @@ public class DistributedClusterStore | ... | @@ -255,9 +262,6 @@ public class DistributedClusterStore |
| 255 | 262 | ||
| 256 | @Override | 263 | @Override |
| 257 | public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) { | 264 | public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) { |
| 258 | - checkNotNull(nodeId, INSTANCE_ID_NULL); | ||
| 259 | - checkNotNull(ip, "IP address must not be null"); | ||
| 260 | - checkArgument(tcpPort > 5000, "Tcp port must be greater than 5000"); | ||
| 261 | ControllerNode node = new DefaultControllerNode(nodeId, ip, tcpPort); | 265 | ControllerNode node = new DefaultControllerNode(nodeId, ip, tcpPort); |
| 262 | allNodes.put(node.id(), node); | 266 | allNodes.put(node.id(), node); |
| 263 | updateState(nodeId, State.INACTIVE); | 267 | updateState(nodeId, State.INACTIVE); |
| ... | @@ -275,6 +279,24 @@ public class DistributedClusterStore | ... | @@ -275,6 +279,24 @@ public class DistributedClusterStore |
| 275 | } | 279 | } |
| 276 | } | 280 | } |
| 277 | 281 | ||
| 282 | + @Override | ||
| 283 | + public void formCluster(Set<ControllerNode> nodes, String ipPrefix) { | ||
| 284 | + try { | ||
| 285 | + Set<NodeInfo> infos = Sets.newHashSet(); | ||
| 286 | + nodes.forEach(n -> infos.add(NodeInfo.from(n.id().toString(), | ||
| 287 | + n.ip().toString(), | ||
| 288 | + n.tcpPort()))); | ||
| 289 | + | ||
| 290 | + ClusterDefinition cdef = ClusterDefinition.from(infos, ipPrefix); | ||
| 291 | + new ClusterDefinitionStore(CLUSTER_DEFINITION_FILE).write(cdef); | ||
| 292 | + | ||
| 293 | + DatabaseDefinition ddef = DatabaseDefinition.from(infos); | ||
| 294 | + new DatabaseDefinitionStore(PARTITION_DEFINITION_FILE).write(ddef); | ||
| 295 | + } catch (IOException e) { | ||
| 296 | + log.error("Unable to form cluster", e); | ||
| 297 | + } | ||
| 298 | + } | ||
| 299 | + | ||
| 278 | private void updateState(NodeId nodeId, State newState) { | 300 | private void updateState(NodeId nodeId, State newState) { |
| 279 | nodeStates.put(nodeId, newState); | 301 | nodeStates.put(nodeId, newState); |
| 280 | nodeStateLastUpdatedTimes.put(nodeId, DateTime.now()); | 302 | nodeStateLastUpdatedTimes.put(nodeId, DateTime.now()); |
| ... | @@ -387,4 +409,5 @@ public class DistributedClusterStore | ... | @@ -387,4 +409,5 @@ public class DistributedClusterStore |
| 387 | public DateTime getLastUpdated(NodeId nodeId) { | 409 | public DateTime getLastUpdated(NodeId nodeId) { |
| 388 | return nodeStateLastUpdatedTimes.get(nodeId); | 410 | return nodeStateLastUpdatedTimes.get(nodeId); |
| 389 | } | 411 | } |
| 412 | + | ||
| 390 | } | 413 | } | ... | ... |
| ... | @@ -24,12 +24,12 @@ import com.hazelcast.core.Member; | ... | @@ -24,12 +24,12 @@ import com.hazelcast.core.Member; |
| 24 | import com.hazelcast.core.MemberAttributeEvent; | 24 | import com.hazelcast.core.MemberAttributeEvent; |
| 25 | import com.hazelcast.core.MembershipEvent; | 25 | import com.hazelcast.core.MembershipEvent; |
| 26 | import com.hazelcast.core.MembershipListener; | 26 | import com.hazelcast.core.MembershipListener; |
| 27 | - | ||
| 28 | import org.apache.felix.scr.annotations.Activate; | 27 | import org.apache.felix.scr.annotations.Activate; |
| 29 | import org.apache.felix.scr.annotations.Component; | 28 | import org.apache.felix.scr.annotations.Component; |
| 30 | import org.apache.felix.scr.annotations.Deactivate; | 29 | import org.apache.felix.scr.annotations.Deactivate; |
| 31 | import org.apache.felix.scr.annotations.Service; | 30 | import org.apache.felix.scr.annotations.Service; |
| 32 | import org.joda.time.DateTime; | 31 | import org.joda.time.DateTime; |
| 32 | +import org.onlab.packet.IpAddress; | ||
| 33 | import org.onosproject.cluster.ClusterEvent; | 33 | import org.onosproject.cluster.ClusterEvent; |
| 34 | import org.onosproject.cluster.ClusterStore; | 34 | import org.onosproject.cluster.ClusterStore; |
| 35 | import org.onosproject.cluster.ClusterStoreDelegate; | 35 | import org.onosproject.cluster.ClusterStoreDelegate; |
| ... | @@ -39,7 +39,6 @@ import org.onosproject.cluster.NodeId; | ... | @@ -39,7 +39,6 @@ import org.onosproject.cluster.NodeId; |
| 39 | import org.onosproject.store.hz.AbsentInvalidatingLoadingCache; | 39 | import org.onosproject.store.hz.AbsentInvalidatingLoadingCache; |
| 40 | import org.onosproject.store.hz.AbstractHazelcastStore; | 40 | import org.onosproject.store.hz.AbstractHazelcastStore; |
| 41 | import org.onosproject.store.hz.OptionalCacheLoader; | 41 | import org.onosproject.store.hz.OptionalCacheLoader; |
| 42 | -import org.onlab.packet.IpAddress; | ||
| 43 | 42 | ||
| 44 | import java.util.Map; | 43 | import java.util.Map; |
| 45 | import java.util.Set; | 44 | import java.util.Set; |
| ... | @@ -131,6 +130,11 @@ public class HazelcastClusterStore | ... | @@ -131,6 +130,11 @@ public class HazelcastClusterStore |
| 131 | } | 130 | } |
| 132 | 131 | ||
| 133 | @Override | 132 | @Override |
| 133 | + public void formCluster(Set<ControllerNode> nodes, String ipPrefix) { | ||
| 134 | + throw new UnsupportedOperationException("formCluster not implemented"); | ||
| 135 | + } | ||
| 136 | + | ||
| 137 | + @Override | ||
| 134 | public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) { | 138 | public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) { |
| 135 | return addNode(new DefaultControllerNode(nodeId, ip, tcpPort)); | 139 | return addNode(new DefaultControllerNode(nodeId, ip, tcpPort)); |
| 136 | } | 140 | } | ... | ... |
| ... | @@ -15,16 +15,20 @@ | ... | @@ -15,16 +15,20 @@ |
| 15 | */ | 15 | */ |
| 16 | package org.onosproject.store.consistent.impl; | 16 | package org.onosproject.store.consistent.impl; |
| 17 | 17 | ||
| 18 | +import com.google.common.collect.ImmutableMap; | ||
| 19 | +import com.google.common.collect.ImmutableSet; | ||
| 20 | +import com.google.common.collect.Maps; | ||
| 21 | +import org.onosproject.store.cluster.impl.NodeInfo; | ||
| 22 | + | ||
| 23 | +import java.util.ArrayList; | ||
| 24 | +import java.util.Collections; | ||
| 25 | +import java.util.HashSet; | ||
| 26 | +import java.util.List; | ||
| 18 | import java.util.Map; | 27 | import java.util.Map; |
| 19 | import java.util.Set; | 28 | import java.util.Set; |
| 20 | 29 | ||
| 21 | -import org.onosproject.store.cluster.impl.NodeInfo; | ||
| 22 | - | ||
| 23 | import static com.google.common.base.Preconditions.checkNotNull; | 30 | import static com.google.common.base.Preconditions.checkNotNull; |
| 24 | 31 | ||
| 25 | -import com.google.common.collect.ImmutableMap; | ||
| 26 | -import com.google.common.collect.ImmutableSet; | ||
| 27 | - | ||
| 28 | /** | 32 | /** |
| 29 | * Partitioned database configuration. | 33 | * Partitioned database configuration. |
| 30 | */ | 34 | */ |
| ... | @@ -34,11 +38,13 @@ public class DatabaseDefinition { | ... | @@ -34,11 +38,13 @@ public class DatabaseDefinition { |
| 34 | 38 | ||
| 35 | /** | 39 | /** |
| 36 | * Creates a new DatabaseDefinition. | 40 | * Creates a new DatabaseDefinition. |
| 41 | + * | ||
| 37 | * @param partitions partition map | 42 | * @param partitions partition map |
| 38 | - * @param nodes set of nodes | 43 | + * @param nodes set of nodes |
| 39 | * @return database definition | 44 | * @return database definition |
| 40 | */ | 45 | */ |
| 41 | - public static DatabaseDefinition from(Map<String, Set<NodeInfo>> partitions, Set<NodeInfo> nodes) { | 46 | + public static DatabaseDefinition from(Map<String, Set<NodeInfo>> partitions, |
| 47 | + Set<NodeInfo> nodes) { | ||
| 42 | checkNotNull(partitions); | 48 | checkNotNull(partitions); |
| 43 | checkNotNull(nodes); | 49 | checkNotNull(nodes); |
| 44 | DatabaseDefinition definition = new DatabaseDefinition(); | 50 | DatabaseDefinition definition = new DatabaseDefinition(); |
| ... | @@ -48,7 +54,18 @@ public class DatabaseDefinition { | ... | @@ -48,7 +54,18 @@ public class DatabaseDefinition { |
| 48 | } | 54 | } |
| 49 | 55 | ||
| 50 | /** | 56 | /** |
| 57 | + * Creates a new DatabaseDefinition using default partitions. | ||
| 58 | + * | ||
| 59 | + * @param nodes set of nodes | ||
| 60 | + * @return database definition | ||
| 61 | + */ | ||
| 62 | + public static DatabaseDefinition from(Set<NodeInfo> nodes) { | ||
| 63 | + return from(generateDefaultPartitions(nodes), nodes); | ||
| 64 | + } | ||
| 65 | + | ||
| 66 | + /** | ||
| 51 | * Returns the map of database partitions. | 67 | * Returns the map of database partitions. |
| 68 | + * | ||
| 52 | * @return db partition map | 69 | * @return db partition map |
| 53 | */ | 70 | */ |
| 54 | public Map<String, Set<NodeInfo>> getPartitions() { | 71 | public Map<String, Set<NodeInfo>> getPartitions() { |
| ... | @@ -57,9 +74,35 @@ public class DatabaseDefinition { | ... | @@ -57,9 +74,35 @@ public class DatabaseDefinition { |
| 57 | 74 | ||
| 58 | /** | 75 | /** |
| 59 | * Returns the set of nodes. | 76 | * Returns the set of nodes. |
| 77 | + * | ||
| 60 | * @return nodes | 78 | * @return nodes |
| 61 | */ | 79 | */ |
| 62 | public Set<NodeInfo> getNodes() { | 80 | public Set<NodeInfo> getNodes() { |
| 63 | return nodes; | 81 | return nodes; |
| 64 | } | 82 | } |
| 83 | + | ||
| 84 | + | ||
| 85 | + /** | ||
| 86 | + * Generates set of default partitions using permutations of the nodes. | ||
| 87 | + * | ||
| 88 | + * @param nodes information about cluster nodes | ||
| 89 | + * @return default partition map | ||
| 90 | + */ | ||
| 91 | + private static Map<String, Set<NodeInfo>> generateDefaultPartitions(Set<NodeInfo> nodes) { | ||
| 92 | + List<NodeInfo> sorted = new ArrayList<>(nodes); | ||
| 93 | + Collections.sort(sorted, (o1, o2) -> o1.getId().compareTo(o2.getId())); | ||
| 94 | + Map<String, Set<NodeInfo>> partitions = Maps.newHashMap(); | ||
| 95 | + | ||
| 96 | + int length = nodes.size(); | ||
| 97 | + int count = 3; | ||
| 98 | + for (int i = 0; i < length; i++) { | ||
| 99 | + Set<NodeInfo> set = new HashSet<>(count); | ||
| 100 | + for (int j = 0; j < count; j++) { | ||
| 101 | + set.add(sorted.get((i + j) % length)); | ||
| 102 | + } | ||
| 103 | + partitions.put("p" + (i + 1), set); | ||
| 104 | + } | ||
| 105 | + return partitions; | ||
| 106 | + } | ||
| 107 | + | ||
| 65 | } | 108 | } |
| ... | \ No newline at end of file | ... | \ No newline at end of file | ... | ... |
| ... | @@ -20,13 +20,14 @@ import static com.google.common.base.Preconditions.checkNotNull; | ... | @@ -20,13 +20,14 @@ import static com.google.common.base.Preconditions.checkNotNull; |
| 20 | import java.io.File; | 20 | import java.io.File; |
| 21 | import java.io.IOException; | 21 | import java.io.IOException; |
| 22 | import com.fasterxml.jackson.databind.ObjectMapper; | 22 | import com.fasterxml.jackson.databind.ObjectMapper; |
| 23 | +import com.google.common.io.Files; | ||
| 23 | 24 | ||
| 24 | /** | 25 | /** |
| 25 | * Allows for reading and writing partitioned database definition as a JSON file. | 26 | * Allows for reading and writing partitioned database definition as a JSON file. |
| 26 | */ | 27 | */ |
| 27 | public class DatabaseDefinitionStore { | 28 | public class DatabaseDefinitionStore { |
| 28 | 29 | ||
| 29 | - private final File definitionfile; | 30 | + private final File file; |
| 30 | 31 | ||
| 31 | /** | 32 | /** |
| 32 | * Creates a reader/writer of the database definition file. | 33 | * Creates a reader/writer of the database definition file. |
| ... | @@ -34,7 +35,7 @@ public class DatabaseDefinitionStore { | ... | @@ -34,7 +35,7 @@ public class DatabaseDefinitionStore { |
| 34 | * @param filePath location of the definition file | 35 | * @param filePath location of the definition file |
| 35 | */ | 36 | */ |
| 36 | public DatabaseDefinitionStore(String filePath) { | 37 | public DatabaseDefinitionStore(String filePath) { |
| 37 | - definitionfile = new File(checkNotNull(filePath)); | 38 | + file = new File(checkNotNull(filePath)); |
| 38 | } | 39 | } |
| 39 | 40 | ||
| 40 | /** | 41 | /** |
| ... | @@ -43,7 +44,7 @@ public class DatabaseDefinitionStore { | ... | @@ -43,7 +44,7 @@ public class DatabaseDefinitionStore { |
| 43 | * @param filePath location of the definition file | 44 | * @param filePath location of the definition file |
| 44 | */ | 45 | */ |
| 45 | public DatabaseDefinitionStore(File filePath) { | 46 | public DatabaseDefinitionStore(File filePath) { |
| 46 | - definitionfile = checkNotNull(filePath); | 47 | + file = checkNotNull(filePath); |
| 47 | } | 48 | } |
| 48 | 49 | ||
| 49 | /** | 50 | /** |
| ... | @@ -54,8 +55,7 @@ public class DatabaseDefinitionStore { | ... | @@ -54,8 +55,7 @@ public class DatabaseDefinitionStore { |
| 54 | */ | 55 | */ |
| 55 | public DatabaseDefinition read() throws IOException { | 56 | public DatabaseDefinition read() throws IOException { |
| 56 | ObjectMapper mapper = new ObjectMapper(); | 57 | ObjectMapper mapper = new ObjectMapper(); |
| 57 | - DatabaseDefinition definition = mapper.readValue(definitionfile, DatabaseDefinition.class); | 58 | + return mapper.readValue(file, DatabaseDefinition.class); |
| 58 | - return definition; | ||
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | /** | 61 | /** |
| ... | @@ -67,7 +67,8 @@ public class DatabaseDefinitionStore { | ... | @@ -67,7 +67,8 @@ public class DatabaseDefinitionStore { |
| 67 | public void write(DatabaseDefinition definition) throws IOException { | 67 | public void write(DatabaseDefinition definition) throws IOException { |
| 68 | checkNotNull(definition); | 68 | checkNotNull(definition); |
| 69 | // write back to file | 69 | // write back to file |
| 70 | - final ObjectMapper mapper = new ObjectMapper(); | 70 | + Files.createParentDirs(file); |
| 71 | - mapper.writeValue(definitionfile, definition); | 71 | + ObjectMapper mapper = new ObjectMapper(); |
| 72 | + mapper.writeValue(file, definition); | ||
| 72 | } | 73 | } |
| 73 | } | 74 | } | ... | ... |
| ... | @@ -16,11 +16,9 @@ | ... | @@ -16,11 +16,9 @@ |
| 16 | 16 | ||
| 17 | package org.onosproject.store.consistent.impl; | 17 | package org.onosproject.store.consistent.impl; |
| 18 | 18 | ||
| 19 | -import com.google.common.collect.ImmutableMap; | ||
| 20 | import com.google.common.collect.ImmutableSet; | 19 | import com.google.common.collect.ImmutableSet; |
| 21 | import com.google.common.collect.Lists; | 20 | import com.google.common.collect.Lists; |
| 22 | import com.google.common.collect.Sets; | 21 | import com.google.common.collect.Sets; |
| 23 | - | ||
| 24 | import net.kuujo.copycat.CopycatConfig; | 22 | import net.kuujo.copycat.CopycatConfig; |
| 25 | import net.kuujo.copycat.cluster.ClusterConfig; | 23 | import net.kuujo.copycat.cluster.ClusterConfig; |
| 26 | import net.kuujo.copycat.cluster.Member; | 24 | import net.kuujo.copycat.cluster.Member; |
| ... | @@ -34,7 +32,6 @@ import net.kuujo.copycat.netty.NettyTcpProtocol; | ... | @@ -34,7 +32,6 @@ import net.kuujo.copycat.netty.NettyTcpProtocol; |
| 34 | import net.kuujo.copycat.protocol.Consistency; | 32 | import net.kuujo.copycat.protocol.Consistency; |
| 35 | import net.kuujo.copycat.protocol.Protocol; | 33 | import net.kuujo.copycat.protocol.Protocol; |
| 36 | import net.kuujo.copycat.util.concurrent.NamedThreadFactory; | 34 | import net.kuujo.copycat.util.concurrent.NamedThreadFactory; |
| 37 | - | ||
| 38 | import org.apache.felix.scr.annotations.Activate; | 35 | import org.apache.felix.scr.annotations.Activate; |
| 39 | import org.apache.felix.scr.annotations.Component; | 36 | import org.apache.felix.scr.annotations.Component; |
| 40 | import org.apache.felix.scr.annotations.Deactivate; | 37 | import org.apache.felix.scr.annotations.Deactivate; |
| ... | @@ -75,17 +72,19 @@ import static org.slf4j.LoggerFactory.getLogger; | ... | @@ -75,17 +72,19 @@ import static org.slf4j.LoggerFactory.getLogger; |
| 75 | public class DatabaseManager implements StorageService, StorageAdminService { | 72 | public class DatabaseManager implements StorageService, StorageAdminService { |
| 76 | 73 | ||
| 77 | private final Logger log = getLogger(getClass()); | 74 | private final Logger log = getLogger(getClass()); |
| 78 | - private ClusterCoordinator coordinator; | 75 | + |
| 79 | - private PartitionedDatabase partitionedDatabase; | ||
| 80 | - private Database inMemoryDatabase; | ||
| 81 | public static final int COPYCAT_TCP_PORT = 7238; // 7238 = RAFT | 76 | public static final int COPYCAT_TCP_PORT = 7238; // 7238 = RAFT |
| 82 | - private static final String CONFIG_DIR = "../config"; | 77 | + public static final String PARTITION_DEFINITION_FILE = "../config/tablets.json"; |
| 83 | - private static final String PARTITION_DEFINITION_FILE = "tablets.json"; | ||
| 84 | - private static final int DATABASE_STARTUP_TIMEOUT_SEC = 60; | ||
| 85 | public static final String BASE_PARTITION_NAME = "p0"; | 78 | public static final String BASE_PARTITION_NAME = "p0"; |
| 79 | + | ||
| 80 | + private static final int DATABASE_STARTUP_TIMEOUT_SEC = 60; | ||
| 86 | private static final int RAFT_ELECTION_TIMEOUT = 3000; | 81 | private static final int RAFT_ELECTION_TIMEOUT = 3000; |
| 87 | private static final int RAFT_HEARTBEAT_TIMEOUT = 1500; | 82 | private static final int RAFT_HEARTBEAT_TIMEOUT = 1500; |
| 88 | 83 | ||
| 84 | + private ClusterCoordinator coordinator; | ||
| 85 | + private PartitionedDatabase partitionedDatabase; | ||
| 86 | + private Database inMemoryDatabase; | ||
| 87 | + | ||
| 89 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 88 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
| 90 | protected ClusterService clusterService; | 89 | protected ClusterService clusterService; |
| 91 | 90 | ||
| ... | @@ -98,15 +97,14 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -98,15 +97,14 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
| 98 | 97 | ||
| 99 | @Activate | 98 | @Activate |
| 100 | public void activate() { | 99 | public void activate() { |
| 101 | - | ||
| 102 | // load database configuration | 100 | // load database configuration |
| 103 | - File file = new File(CONFIG_DIR, PARTITION_DEFINITION_FILE); | 101 | + File databaseDefFile = new File(PARTITION_DEFINITION_FILE); |
| 104 | - log.info("Loading database definition: {}", file.getAbsolutePath()); | 102 | + log.info("Loading database definition: {}", databaseDefFile.getAbsolutePath()); |
| 105 | 103 | ||
| 106 | Map<String, Set<NodeInfo>> partitionMap; | 104 | Map<String, Set<NodeInfo>> partitionMap; |
| 107 | try { | 105 | try { |
| 108 | - DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(file); | 106 | + DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(databaseDefFile); |
| 109 | - if (!file.exists()) { | 107 | + if (!databaseDefFile.exists()) { |
| 110 | createDefaultDatabaseDefinition(databaseDefStore); | 108 | createDefaultDatabaseDefinition(databaseDefStore); |
| 111 | } | 109 | } |
| 112 | partitionMap = databaseDefStore.read().getPartitions(); | 110 | partitionMap = databaseDefStore.read().getPartitions(); |
| ... | @@ -189,10 +187,9 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -189,10 +187,9 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
| 189 | private void createDefaultDatabaseDefinition(DatabaseDefinitionStore store) { | 187 | private void createDefaultDatabaseDefinition(DatabaseDefinitionStore store) { |
| 190 | // Assumes IPv4 is returned. | 188 | // Assumes IPv4 is returned. |
| 191 | String ip = DistributedClusterStore.getSiteLocalAddress(); | 189 | String ip = DistributedClusterStore.getSiteLocalAddress(); |
| 192 | - NodeInfo node = NodeInfo.from(ip, ip, DistributedClusterStore.DEFAULT_PORT); | 190 | + NodeInfo node = NodeInfo.from(ip, ip, COPYCAT_TCP_PORT); |
| 193 | try { | 191 | try { |
| 194 | - store.write(DatabaseDefinition.from(ImmutableMap.of("p1", ImmutableSet.of(node)), | 192 | + store.write(DatabaseDefinition.from(ImmutableSet.of(node))); |
| 195 | - ImmutableSet.of(node))); | ||
| 196 | } catch (IOException e) { | 193 | } catch (IOException e) { |
| 197 | log.warn("Unable to write default cluster definition", e); | 194 | log.warn("Unable to write default cluster definition", e); |
| 198 | } | 195 | } | ... | ... |
| ... | @@ -15,10 +15,7 @@ | ... | @@ -15,10 +15,7 @@ |
| 15 | */ | 15 | */ |
| 16 | package org.onosproject.store.trivial.impl; | 16 | package org.onosproject.store.trivial.impl; |
| 17 | 17 | ||
| 18 | -import static org.slf4j.LoggerFactory.getLogger; | 18 | +import com.google.common.collect.ImmutableSet; |
| 19 | - | ||
| 20 | -import java.util.Set; | ||
| 21 | - | ||
| 22 | import org.apache.felix.scr.annotations.Activate; | 19 | import org.apache.felix.scr.annotations.Activate; |
| 23 | import org.apache.felix.scr.annotations.Component; | 20 | import org.apache.felix.scr.annotations.Component; |
| 24 | import org.apache.felix.scr.annotations.Deactivate; | 21 | import org.apache.felix.scr.annotations.Deactivate; |
| ... | @@ -36,7 +33,9 @@ import org.onosproject.net.intent.PartitionService; | ... | @@ -36,7 +33,9 @@ import org.onosproject.net.intent.PartitionService; |
| 36 | import org.onosproject.store.AbstractStore; | 33 | import org.onosproject.store.AbstractStore; |
| 37 | import org.slf4j.Logger; | 34 | import org.slf4j.Logger; |
| 38 | 35 | ||
| 39 | -import com.google.common.collect.ImmutableSet; | 36 | +import java.util.Set; |
| 37 | + | ||
| 38 | +import static org.slf4j.LoggerFactory.getLogger; | ||
| 40 | 39 | ||
| 41 | /** | 40 | /** |
| 42 | * Manages inventory of infrastructure devices using trivial in-memory | 41 | * Manages inventory of infrastructure devices using trivial in-memory |
| ... | @@ -94,6 +93,11 @@ public class SimpleClusterStore | ... | @@ -94,6 +93,11 @@ public class SimpleClusterStore |
| 94 | } | 93 | } |
| 95 | 94 | ||
| 96 | @Override | 95 | @Override |
| 96 | + public void formCluster(Set<ControllerNode> nodes, String ipPrefix) { | ||
| 97 | + | ||
| 98 | + } | ||
| 99 | + | ||
| 100 | + @Override | ||
| 97 | public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) { | 101 | public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) { |
| 98 | return null; | 102 | return null; |
| 99 | } | 103 | } | ... | ... |
| ... | @@ -258,7 +258,12 @@ | ... | @@ -258,7 +258,12 @@ |
| 258 | <version>${karaf.version}</version> | 258 | <version>${karaf.version}</version> |
| 259 | <scope>provided</scope> | 259 | <scope>provided</scope> |
| 260 | </dependency> | 260 | </dependency> |
| 261 | - | 261 | + <dependency> |
| 262 | + <groupId>org.apache.karaf.system</groupId> | ||
| 263 | + <artifactId>org.apache.karaf.system.core</artifactId> | ||
| 264 | + <version>${karaf.version}</version> | ||
| 265 | + <scope>provided</scope> | ||
| 266 | + </dependency> | ||
| 262 | <dependency> | 267 | <dependency> |
| 263 | <groupId>org.apache.karaf.shell</groupId> | 268 | <groupId>org.apache.karaf.shell</groupId> |
| 264 | <artifactId>org.apache.karaf.shell.console</artifactId> | 269 | <artifactId>org.apache.karaf.shell.console</artifactId> | ... | ... |
tools/test/bin/onos-form-cluster
0 → 100755
| 1 | +#!/bin/bash | ||
| 2 | +# ----------------------------------------------------------------------------- | ||
| 3 | +# Forms ONOS cluster using REST API. | ||
| 4 | +# ----------------------------------------------------------------------------- | ||
| 5 | + | ||
| 6 | +[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1 | ||
| 7 | +. $ONOS_ROOT/tools/build/envDefaults | ||
| 8 | + | ||
| 9 | +ip=${1:-$OCI} | ||
| 10 | + | ||
| 11 | +if [ $ip = "cell" ]; then | ||
| 12 | + ip=$OC1 | ||
| 13 | + nodes=$(env | grep "OC[0-9]*=" | grep -v "OC1=" | cut -d= -f2) | ||
| 14 | +else | ||
| 15 | + shift | ||
| 16 | + nodes=$* | ||
| 17 | +fi | ||
| 18 | + | ||
| 19 | +ipPrefix=${ip%.*} | ||
| 20 | + | ||
| 21 | +aux=/tmp/${ipPrefix}.cluster.json | ||
| 22 | +trap "rm -f $aux" EXIT | ||
| 23 | + | ||
| 24 | +echo "{ \"nodes\": [ { \"ip\": \"$ip\" }" > $aux | ||
| 25 | +for node in $nodes; do | ||
| 26 | + echo ", { \"ip\": \"$node\" }" >> $aux | ||
| 27 | +done | ||
| 28 | +echo "], \"ipPrefix\": \"$ipPrefix.*\" }" >> $aux | ||
| 29 | + | ||
| 30 | +for node in $ip $nodes; do | ||
| 31 | + echo "Forming cluster on $node..." | ||
| 32 | + curl -X POST http://$node:8181/onos/v1/cluster/configuration -d @$aux | ||
| 33 | +done | ||
| ... | \ No newline at end of file | ... | \ No newline at end of file |
| 1 | +/* | ||
| 2 | + * Copyright 2015 Open Networking Laboratory | ||
| 3 | + * | ||
| 4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | ||
| 5 | + * you may not use this file except in compliance with the License. | ||
| 6 | + * You may obtain a copy of the License at | ||
| 7 | + * | ||
| 8 | + * http://www.apache.org/licenses/LICENSE-2.0 | ||
| 9 | + * | ||
| 10 | + * Unless required by applicable law or agreed to in writing, software | ||
| 11 | + * distributed under the License is distributed on an "AS IS" BASIS, | ||
| 12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| 13 | + * See the License for the specific language governing permissions and | ||
| 14 | + * limitations under the License. | ||
| 15 | + */ | ||
| 16 | +package org.onosproject.rest; | ||
| 17 | + | ||
| 18 | +import com.fasterxml.jackson.databind.node.ArrayNode; | ||
| 19 | +import com.fasterxml.jackson.databind.node.ObjectNode; | ||
| 20 | +import org.onosproject.cluster.ClusterAdminService; | ||
| 21 | +import org.onosproject.cluster.ClusterService; | ||
| 22 | +import org.onosproject.cluster.ControllerNode; | ||
| 23 | +import org.onosproject.cluster.NodeId; | ||
| 24 | +import org.onosproject.codec.JsonCodec; | ||
| 25 | + | ||
| 26 | +import javax.ws.rs.GET; | ||
| 27 | +import javax.ws.rs.POST; | ||
| 28 | +import javax.ws.rs.Path; | ||
| 29 | +import javax.ws.rs.PathParam; | ||
| 30 | +import javax.ws.rs.core.Response; | ||
| 31 | +import java.io.IOException; | ||
| 32 | +import java.io.InputStream; | ||
| 33 | +import java.util.HashSet; | ||
| 34 | +import java.util.List; | ||
| 35 | + | ||
| 36 | +/** | ||
| 37 | + * REST resource for interacting with the ONOS cluster subsystem. | ||
| 38 | + */ | ||
| 39 | +@Path("cluster") | ||
| 40 | +public class ClusterWebResource extends AbstractWebResource { | ||
| 41 | + | ||
| 42 | + public static final String NODE_NOT_FOUND = "Node is not found"; | ||
| 43 | + | ||
| 44 | + @GET | ||
| 45 | + public Response getClusterNodes() { | ||
| 46 | + Iterable<ControllerNode> nodes = get(ClusterService.class).getNodes(); | ||
| 47 | + return ok(encodeArray(ControllerNode.class, "nodes", nodes)).build(); | ||
| 48 | + } | ||
| 49 | + | ||
| 50 | + @GET | ||
| 51 | + @Path("{id}") | ||
| 52 | + public Response getClusterNode(@PathParam("id") String id) { | ||
| 53 | + ControllerNode node = nullIsNotFound(get(ClusterService.class).getNode(new NodeId(id)), | ||
| 54 | + NODE_NOT_FOUND); | ||
| 55 | + return ok(codec(ControllerNode.class).encode(node, this)).build(); | ||
| 56 | + } | ||
| 57 | + | ||
| 58 | + @POST | ||
| 59 | + @Path("configuration") | ||
| 60 | + public Response formCluster(InputStream config) throws IOException { | ||
| 61 | + JsonCodec<ControllerNode> codec = codec(ControllerNode.class); | ||
| 62 | + ObjectNode root = (ObjectNode) mapper().readTree(config); | ||
| 63 | + String ipPrefix = root.path("ipPrefix").asText(); | ||
| 64 | + | ||
| 65 | + List<ControllerNode> nodes = codec.decode((ArrayNode) root.path("nodes"), this); | ||
| 66 | + get(ClusterAdminService.class).formCluster(new HashSet<>(nodes), ipPrefix); | ||
| 67 | + | ||
| 68 | + return Response.ok().build(); | ||
| 69 | + } | ||
| 70 | + | ||
| 71 | +} |
| ... | @@ -62,6 +62,7 @@ | ... | @@ -62,6 +62,7 @@ |
| 62 | org.onosproject.rest.JsonBodyWriter, | 62 | org.onosproject.rest.JsonBodyWriter, |
| 63 | 63 | ||
| 64 | org.onosproject.rest.ApplicationsWebResource, | 64 | org.onosproject.rest.ApplicationsWebResource, |
| 65 | + org.onosproject.rest.ClusterWebResource, | ||
| 65 | org.onosproject.rest.DevicesWebResource, | 66 | org.onosproject.rest.DevicesWebResource, |
| 66 | org.onosproject.rest.LinksWebResource, | 67 | org.onosproject.rest.LinksWebResource, |
| 67 | org.onosproject.rest.HostsWebResource, | 68 | org.onosproject.rest.HostsWebResource, | ... | ... |
-
Please register or login to post a comment