Merge remote-tracking branch 'origin/master'
Conflicts: apps/foo/pom.xml apps/foo/src/main/java/org/onlab/onos/ccc/DistributedClusterStore.java cli/src/main/java/org/onlab/onos/cli/NodeAddCommand.java cli/src/main/resources/OSGI-INF/blueprint/shell-config.xml tools/test/bin/onos-config utils/nio/src/main/java/org/onlab/nio/IOLoop.java
Showing
78 changed files
with
2173 additions
and
743 deletions
apps/config/pom.xml
0 → 100644
1 | +<?xml version="1.0" encoding="UTF-8"?> | ||
2 | +<project xmlns="http://maven.apache.org/POM/4.0.0" | ||
3 | + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" | ||
4 | + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> | ||
5 | + <modelVersion>4.0.0</modelVersion> | ||
6 | + | ||
7 | + <parent> | ||
8 | + <groupId>org.onlab.onos</groupId> | ||
9 | + <artifactId>onos-apps</artifactId> | ||
10 | + <version>1.0.0-SNAPSHOT</version> | ||
11 | + <relativePath>../pom.xml</relativePath> | ||
12 | + </parent> | ||
13 | + | ||
14 | + <artifactId>onos-app-config</artifactId> | ||
15 | + <packaging>bundle</packaging> | ||
16 | + | ||
17 | + <description>ONOS simple network configuration reader</description> | ||
18 | + | ||
19 | + <dependencies> | ||
20 | + <dependency> | ||
21 | + <groupId>org.codehaus.jackson</groupId> | ||
22 | + <artifactId>jackson-core-asl</artifactId> | ||
23 | + </dependency> | ||
24 | + <dependency> | ||
25 | + <groupId>org.codehaus.jackson</groupId> | ||
26 | + <artifactId>jackson-mapper-asl</artifactId> | ||
27 | + </dependency> | ||
28 | + <dependency> | ||
29 | + <groupId>com.fasterxml.jackson.core</groupId> | ||
30 | + <artifactId>jackson-annotations</artifactId> | ||
31 | + <version>2.4.2</version> | ||
32 | + <scope>provided</scope> | ||
33 | + </dependency> | ||
34 | + </dependencies> | ||
35 | + | ||
36 | +</project> |
1 | +package org.onlab.onos.config; | ||
2 | + | ||
3 | +import java.util.Collections; | ||
4 | +import java.util.List; | ||
5 | + | ||
6 | +import org.codehaus.jackson.annotate.JsonProperty; | ||
7 | + | ||
8 | +/** | ||
9 | + * Object to store address configuration read from a JSON file. | ||
10 | + */ | ||
11 | +public class AddressConfiguration { | ||
12 | + | ||
13 | + private List<AddressEntry> addresses; | ||
14 | + | ||
15 | + /** | ||
16 | + * Gets a list of addresses in the system. | ||
17 | + * | ||
18 | + * @return the list of addresses | ||
19 | + */ | ||
20 | + public List<AddressEntry> getAddresses() { | ||
21 | + return Collections.unmodifiableList(addresses); | ||
22 | + } | ||
23 | + | ||
24 | + /** | ||
25 | + * Sets a list of addresses in the system. | ||
26 | + * | ||
27 | + * @param addresses the list of addresses | ||
28 | + */ | ||
29 | + @JsonProperty("addresses") | ||
30 | + public void setAddresses(List<AddressEntry> addresses) { | ||
31 | + this.addresses = addresses; | ||
32 | + } | ||
33 | + | ||
34 | +} |
1 | +package org.onlab.onos.config; | ||
2 | + | ||
3 | +import java.util.List; | ||
4 | + | ||
5 | +import org.codehaus.jackson.annotate.JsonProperty; | ||
6 | +import org.onlab.packet.IpPrefix; | ||
7 | +import org.onlab.packet.MacAddress; | ||
8 | + | ||
9 | +/** | ||
10 | + * Represents a set of addresses bound to a port. | ||
11 | + */ | ||
12 | +public class AddressEntry { | ||
13 | + private String dpid; | ||
14 | + private short portNumber; | ||
15 | + private List<IpPrefix> ipAddresses; | ||
16 | + private MacAddress macAddress; | ||
17 | + | ||
18 | + public String getDpid() { | ||
19 | + return dpid; | ||
20 | + } | ||
21 | + | ||
22 | + @JsonProperty("dpid") | ||
23 | + public void setDpid(String strDpid) { | ||
24 | + this.dpid = strDpid; | ||
25 | + } | ||
26 | + | ||
27 | + public short getPortNumber() { | ||
28 | + return portNumber; | ||
29 | + } | ||
30 | + | ||
31 | + @JsonProperty("port") | ||
32 | + public void setPortNumber(short portNumber) { | ||
33 | + this.portNumber = portNumber; | ||
34 | + } | ||
35 | + | ||
36 | + public List<IpPrefix> getIpAddresses() { | ||
37 | + return ipAddresses; | ||
38 | + } | ||
39 | + | ||
40 | + @JsonProperty("ips") | ||
41 | + public void setIpAddresses(List<IpPrefix> ipAddresses) { | ||
42 | + this.ipAddresses = ipAddresses; | ||
43 | + } | ||
44 | + | ||
45 | + public MacAddress getMacAddress() { | ||
46 | + return macAddress; | ||
47 | + } | ||
48 | + | ||
49 | + @JsonProperty("mac") | ||
50 | + public void setMacAddress(MacAddress macAddress) { | ||
51 | + this.macAddress = macAddress; | ||
52 | + } | ||
53 | +} |
1 | +package org.onlab.onos.config; | ||
2 | + | ||
3 | +import static org.slf4j.LoggerFactory.getLogger; | ||
4 | + | ||
5 | +import java.io.File; | ||
6 | +import java.io.FileNotFoundException; | ||
7 | +import java.io.IOException; | ||
8 | + | ||
9 | +import org.apache.felix.scr.annotations.Activate; | ||
10 | +import org.apache.felix.scr.annotations.Component; | ||
11 | +import org.apache.felix.scr.annotations.Deactivate; | ||
12 | +import org.apache.felix.scr.annotations.Reference; | ||
13 | +import org.apache.felix.scr.annotations.ReferenceCardinality; | ||
14 | +import org.codehaus.jackson.map.ObjectMapper; | ||
15 | +import org.onlab.onos.net.ConnectPoint; | ||
16 | +import org.onlab.onos.net.DeviceId; | ||
17 | +import org.onlab.onos.net.PortNumber; | ||
18 | +import org.onlab.onos.net.host.HostAdminService; | ||
19 | +import org.onlab.onos.net.host.PortAddresses; | ||
20 | +import org.slf4j.Logger; | ||
21 | + | ||
22 | +import com.google.common.collect.Sets; | ||
23 | + | ||
24 | +/** | ||
25 | + * Simple configuration module to read in supplementary network configuration | ||
26 | + * from a file. | ||
27 | + */ | ||
28 | +@Component(immediate = true) | ||
29 | +public class NetworkConfigReader { | ||
30 | + | ||
31 | + private final Logger log = getLogger(getClass()); | ||
32 | + | ||
33 | + private static final String DEFAULT_CONFIG_FILE = "config/addresses.json"; | ||
34 | + private String configFileName = DEFAULT_CONFIG_FILE; | ||
35 | + | ||
36 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
37 | + protected HostAdminService hostAdminService; | ||
38 | + | ||
39 | + @Activate | ||
40 | + protected void activate() { | ||
41 | + log.info("Started network config reader"); | ||
42 | + | ||
43 | + log.info("Config file set to {}", configFileName); | ||
44 | + | ||
45 | + AddressConfiguration config = readNetworkConfig(); | ||
46 | + | ||
47 | + if (config != null) { | ||
48 | + for (AddressEntry entry : config.getAddresses()) { | ||
49 | + | ||
50 | + ConnectPoint cp = new ConnectPoint( | ||
51 | + DeviceId.deviceId(dpidToUri(entry.getDpid())), | ||
52 | + PortNumber.portNumber(entry.getPortNumber())); | ||
53 | + | ||
54 | + PortAddresses addresses = new PortAddresses(cp, | ||
55 | + Sets.newHashSet(entry.getIpAddresses()), | ||
56 | + entry.getMacAddress()); | ||
57 | + | ||
58 | + hostAdminService.bindAddressesToPort(addresses); | ||
59 | + } | ||
60 | + } | ||
61 | + } | ||
62 | + | ||
63 | + @Deactivate | ||
64 | + protected void deactivate() { | ||
65 | + log.info("Stopped"); | ||
66 | + } | ||
67 | + | ||
68 | + private AddressConfiguration readNetworkConfig() { | ||
69 | + File configFile = new File(configFileName); | ||
70 | + | ||
71 | + ObjectMapper mapper = new ObjectMapper(); | ||
72 | + | ||
73 | + try { | ||
74 | + AddressConfiguration config = | ||
75 | + mapper.readValue(configFile, AddressConfiguration.class); | ||
76 | + | ||
77 | + return config; | ||
78 | + } catch (FileNotFoundException e) { | ||
79 | + log.warn("Configuration file not found: {}", configFileName); | ||
80 | + } catch (IOException e) { | ||
81 | + log.error("Unable to read config from file:", e); | ||
82 | + } | ||
83 | + | ||
84 | + return null; | ||
85 | + } | ||
86 | + | ||
87 | + private static String dpidToUri(String dpid) { | ||
88 | + return "of:" + dpid.replace(":", ""); | ||
89 | + } | ||
90 | +} |
apps/config/src/main/resources/config.json
0 → 100644
1 | +{ | ||
2 | + "interfaces" : [ | ||
3 | + { | ||
4 | + "dpid" : "00:00:00:00:00:00:01", | ||
5 | + "port" : "1", | ||
6 | + "ips" : ["192.168.10.101/24"], | ||
7 | + "mac" : "00:00:00:11:22:33" | ||
8 | + }, | ||
9 | + { | ||
10 | + "dpid" : "00:00:00:00:00:00:02", | ||
11 | + "port" : "1", | ||
12 | + "ips" : ["192.168.20.101/24", "192.168.30.101/24"] | ||
13 | + }, | ||
14 | + { | ||
15 | + "dpid" : "00:00:00:00:00:00:03", | ||
16 | + "port" : "1", | ||
17 | + "ips" : ["10.1.0.1/16"], | ||
18 | + "mac" : "00:00:00:00:00:01" | ||
19 | + } | ||
20 | + ] | ||
21 | +} |
... | @@ -28,20 +28,6 @@ | ... | @@ -28,20 +28,6 @@ |
28 | <version>${project.version}</version> | 28 | <version>${project.version}</version> |
29 | </dependency> | 29 | </dependency> |
30 | <dependency> | 30 | <dependency> |
31 | - <groupId>org.livetribe.slp</groupId> | ||
32 | - <artifactId>livetribe-slp</artifactId> | ||
33 | - </dependency> | ||
34 | - | ||
35 | - <dependency> | ||
36 | - <groupId>com.fasterxml.jackson.core</groupId> | ||
37 | - <artifactId>jackson-databind</artifactId> | ||
38 | - </dependency> | ||
39 | - <dependency> | ||
40 | - <groupId>com.fasterxml.jackson.core</groupId> | ||
41 | - <artifactId>jackson-annotations</artifactId> | ||
42 | - </dependency> | ||
43 | - | ||
44 | - <dependency> | ||
45 | <groupId>org.apache.karaf.shell</groupId> | 31 | <groupId>org.apache.karaf.shell</groupId> |
46 | <artifactId>org.apache.karaf.shell.console</artifactId> | 32 | <artifactId>org.apache.karaf.shell.console</artifactId> |
47 | </dependency> | 33 | </dependency> | ... | ... |
1 | -package org.onlab.onos.ccc; | ||
2 | - | ||
3 | -import com.google.common.collect.ImmutableSet; | ||
4 | -import org.apache.felix.scr.annotations.Activate; | ||
5 | -import org.apache.felix.scr.annotations.Component; | ||
6 | -import org.apache.felix.scr.annotations.Deactivate; | ||
7 | -import org.apache.felix.scr.annotations.Service; | ||
8 | -import org.onlab.nio.AcceptorLoop; | ||
9 | -import org.onlab.nio.IOLoop; | ||
10 | -import org.onlab.nio.MessageStream; | ||
11 | -import org.onlab.onos.cluster.ClusterEvent; | ||
12 | -import org.onlab.onos.cluster.ClusterStore; | ||
13 | -import org.onlab.onos.cluster.ClusterStoreDelegate; | ||
14 | -import org.onlab.onos.cluster.ControllerNode; | ||
15 | -import org.onlab.onos.cluster.DefaultControllerNode; | ||
16 | -import org.onlab.onos.cluster.NodeId; | ||
17 | -import org.onlab.onos.store.AbstractStore; | ||
18 | -import org.onlab.packet.IpPrefix; | ||
19 | -import org.slf4j.Logger; | ||
20 | -import org.slf4j.LoggerFactory; | ||
21 | - | ||
22 | -import java.io.IOException; | ||
23 | -import java.net.InetSocketAddress; | ||
24 | -import java.net.Socket; | ||
25 | -import java.net.SocketAddress; | ||
26 | -import java.nio.channels.ByteChannel; | ||
27 | -import java.nio.channels.ServerSocketChannel; | ||
28 | -import java.nio.channels.SocketChannel; | ||
29 | -import java.util.ArrayList; | ||
30 | -import java.util.List; | ||
31 | -import java.util.Map; | ||
32 | -import java.util.Set; | ||
33 | -import java.util.Timer; | ||
34 | -import java.util.TimerTask; | ||
35 | -import java.util.concurrent.ConcurrentHashMap; | ||
36 | -import java.util.concurrent.ExecutorService; | ||
37 | -import java.util.concurrent.Executors; | ||
38 | - | ||
39 | -import static java.net.InetAddress.getByAddress; | ||
40 | -import static org.onlab.onos.cluster.ControllerNode.State; | ||
41 | -import static org.onlab.packet.IpPrefix.valueOf; | ||
42 | -import static org.onlab.util.Tools.namedThreads; | ||
43 | - | ||
44 | -/** | ||
45 | - * Distributed implementation of the cluster nodes store. | ||
46 | - */ | ||
47 | -@Component(immediate = true) | ||
48 | -@Service | ||
49 | -public class DistributedClusterStore | ||
50 | - extends AbstractStore<ClusterEvent, ClusterStoreDelegate> | ||
51 | - implements ClusterStore { | ||
52 | - | ||
53 | - private final Logger log = LoggerFactory.getLogger(getClass()); | ||
54 | - | ||
55 | - private static final long CONNECTION_CUSTODIAN_DELAY = 100L; | ||
56 | - private static final long CONNECTION_CUSTODIAN_FREQUENCY = 5000; | ||
57 | - | ||
58 | - private static final long SELECT_TIMEOUT = 50; | ||
59 | - private static final int WORKERS = 3; | ||
60 | - private static final int INITIATORS = 2; | ||
61 | - private static final int COMM_BUFFER_SIZE = 16 * 1024; | ||
62 | - private static final int COMM_IDLE_TIME = 500; | ||
63 | - | ||
64 | - private static final boolean SO_NO_DELAY = false; | ||
65 | - private static final int SO_SEND_BUFFER_SIZE = 128 * 1024; | ||
66 | - private static final int SO_RCV_BUFFER_SIZE = 128 * 1024; | ||
67 | - | ||
68 | - private DefaultControllerNode self; | ||
69 | - private final Map<NodeId, DefaultControllerNode> nodes = new ConcurrentHashMap<>(); | ||
70 | - private final Map<NodeId, State> states = new ConcurrentHashMap<>(); | ||
71 | - private final Map<NodeId, TLVMessageStream> streams = new ConcurrentHashMap<>(); | ||
72 | - private final Map<SocketChannel, DefaultControllerNode> nodesByChannel = new ConcurrentHashMap<>(); | ||
73 | - | ||
74 | - private final ExecutorService listenExecutor = | ||
75 | - Executors.newSingleThreadExecutor(namedThreads("onos-comm-listen")); | ||
76 | - private final ExecutorService commExecutors = | ||
77 | - Executors.newFixedThreadPool(WORKERS, namedThreads("onos-comm-cluster")); | ||
78 | - private final ExecutorService heartbeatExecutor = | ||
79 | - Executors.newSingleThreadExecutor(namedThreads("onos-comm-heartbeat")); | ||
80 | - private final ExecutorService initiatorExecutors = | ||
81 | - Executors.newFixedThreadPool(INITIATORS, namedThreads("onos-comm-initiator")); | ||
82 | - | ||
83 | - private final Timer timer = new Timer(); | ||
84 | - private final TimerTask connectionCustodian = new ConnectionCustodian(); | ||
85 | - | ||
86 | - private ListenLoop listenLoop; | ||
87 | - private List<CommLoop> commLoops = new ArrayList<>(WORKERS); | ||
88 | - | ||
89 | - @Activate | ||
90 | - public void activate() { | ||
91 | - establishIdentity(); | ||
92 | - startCommunications(); | ||
93 | - startListening(); | ||
94 | - startInitiating(); | ||
95 | - log.info("Started"); | ||
96 | - } | ||
97 | - | ||
98 | - @Deactivate | ||
99 | - public void deactivate() { | ||
100 | - listenLoop.shutdown(); | ||
101 | - for (CommLoop loop : commLoops) { | ||
102 | - loop.shutdown(); | ||
103 | - } | ||
104 | - log.info("Stopped"); | ||
105 | - } | ||
106 | - | ||
107 | - | ||
108 | - // Establishes the controller's own identity. | ||
109 | - private void establishIdentity() { | ||
110 | - IpPrefix ip = valueOf(System.getProperty("onos.ip", "127.0.1.1")); | ||
111 | - self = new DefaultControllerNode(new NodeId(ip.toString()), ip); | ||
112 | - nodes.put(self.id(), self); | ||
113 | - } | ||
114 | - | ||
115 | - // Kicks off the IO loops. | ||
116 | - private void startCommunications() { | ||
117 | - for (int i = 0; i < WORKERS; i++) { | ||
118 | - try { | ||
119 | - CommLoop loop = new CommLoop(); | ||
120 | - commLoops.add(loop); | ||
121 | - commExecutors.execute(loop); | ||
122 | - } catch (IOException e) { | ||
123 | - log.warn("Unable to start comm IO loop", e); | ||
124 | - } | ||
125 | - } | ||
126 | - } | ||
127 | - | ||
128 | - // Starts listening for connections from peer cluster members. | ||
129 | - private void startListening() { | ||
130 | - try { | ||
131 | - listenLoop = new ListenLoop(self.ip(), self.tcpPort()); | ||
132 | - listenExecutor.execute(listenLoop); | ||
133 | - } catch (IOException e) { | ||
134 | - log.error("Unable to listen for cluster connections", e); | ||
135 | - } | ||
136 | - } | ||
137 | - | ||
138 | - /** | ||
139 | - * Initiates open connection request and registers the pending socket | ||
140 | - * channel with the given IO loop. | ||
141 | - * | ||
142 | - * @param loop loop with which the channel should be registered | ||
143 | - * @throws java.io.IOException if the socket could not be open or connected | ||
144 | - */ | ||
145 | - private void openConnection(DefaultControllerNode node, CommLoop loop) throws IOException { | ||
146 | - SocketAddress sa = new InetSocketAddress(getByAddress(node.ip().toOctets()), node.tcpPort()); | ||
147 | - SocketChannel ch = SocketChannel.open(); | ||
148 | - nodesByChannel.put(ch, node); | ||
149 | - ch.configureBlocking(false); | ||
150 | - loop.connectStream(ch); | ||
151 | - ch.connect(sa); | ||
152 | - } | ||
153 | - | ||
154 | - | ||
155 | - // Attempts to connect to any nodes that do not have an associated connection. | ||
156 | - private void startInitiating() { | ||
157 | - timer.schedule(connectionCustodian, CONNECTION_CUSTODIAN_DELAY, CONNECTION_CUSTODIAN_FREQUENCY); | ||
158 | - } | ||
159 | - | ||
160 | - @Override | ||
161 | - public ControllerNode getLocalNode() { | ||
162 | - return self; | ||
163 | - } | ||
164 | - | ||
165 | - @Override | ||
166 | - public Set<ControllerNode> getNodes() { | ||
167 | - ImmutableSet.Builder<ControllerNode> builder = ImmutableSet.builder(); | ||
168 | - return builder.addAll(nodes.values()).build(); | ||
169 | - } | ||
170 | - | ||
171 | - @Override | ||
172 | - public ControllerNode getNode(NodeId nodeId) { | ||
173 | - return nodes.get(nodeId); | ||
174 | - } | ||
175 | - | ||
176 | - @Override | ||
177 | - public State getState(NodeId nodeId) { | ||
178 | - State state = states.get(nodeId); | ||
179 | - return state == null ? State.INACTIVE : state; | ||
180 | - } | ||
181 | - | ||
182 | - @Override | ||
183 | - public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) { | ||
184 | - DefaultControllerNode node = new DefaultControllerNode(nodeId, ip, tcpPort); | ||
185 | - nodes.put(nodeId, node); | ||
186 | - return node; | ||
187 | - } | ||
188 | - | ||
189 | - @Override | ||
190 | - public void removeNode(NodeId nodeId) { | ||
191 | - nodes.remove(nodeId); | ||
192 | - } | ||
193 | - | ||
194 | - // Listens and accepts inbound connections from other cluster nodes. | ||
195 | - private class ListenLoop extends AcceptorLoop { | ||
196 | - ListenLoop(IpPrefix ip, int tcpPort) throws IOException { | ||
197 | - super(SELECT_TIMEOUT, new InetSocketAddress(getByAddress(ip.toOctets()), tcpPort)); | ||
198 | - } | ||
199 | - | ||
200 | - @Override | ||
201 | - protected void acceptConnection(ServerSocketChannel channel) throws IOException { | ||
202 | - SocketChannel sc = channel.accept(); | ||
203 | - sc.configureBlocking(false); | ||
204 | - | ||
205 | - Socket so = sc.socket(); | ||
206 | - so.setTcpNoDelay(SO_NO_DELAY); | ||
207 | - so.setReceiveBufferSize(SO_RCV_BUFFER_SIZE); | ||
208 | - so.setSendBufferSize(SO_SEND_BUFFER_SIZE); | ||
209 | - | ||
210 | - findLeastUtilizedLoop().acceptStream(sc); | ||
211 | - log.info("Connected client"); | ||
212 | - } | ||
213 | - } | ||
214 | - | ||
215 | - private class CommLoop extends IOLoop<TLVMessage, TLVMessageStream> { | ||
216 | - CommLoop() throws IOException { | ||
217 | - super(SELECT_TIMEOUT); | ||
218 | - } | ||
219 | - | ||
220 | - @Override | ||
221 | - protected TLVMessageStream createStream(ByteChannel byteChannel) { | ||
222 | - return new TLVMessageStream(this, byteChannel, COMM_BUFFER_SIZE, COMM_IDLE_TIME); | ||
223 | - } | ||
224 | - | ||
225 | - @Override | ||
226 | - protected void processMessages(List<TLVMessage> messages, MessageStream<TLVMessage> stream) { | ||
227 | - | ||
228 | - } | ||
229 | - | ||
230 | - @Override | ||
231 | - public TLVMessageStream acceptStream(SocketChannel channel) { | ||
232 | - TLVMessageStream stream = super.acceptStream(channel); | ||
233 | - try { | ||
234 | - InetSocketAddress sa = (InetSocketAddress) channel.getRemoteAddress(); | ||
235 | - log.info("Accepted a new connection from {}", IpPrefix.valueOf(sa.getAddress().getAddress())); | ||
236 | - } catch (IOException e) { | ||
237 | - log.warn("Unable to accept connection from an unknown end-point", e); | ||
238 | - } | ||
239 | - return stream; | ||
240 | - } | ||
241 | - | ||
242 | - @Override | ||
243 | - public TLVMessageStream connectStream(SocketChannel channel) { | ||
244 | - TLVMessageStream stream = super.connectStream(channel); | ||
245 | - DefaultControllerNode node = nodesByChannel.get(channel); | ||
246 | - if (node != null) { | ||
247 | - log.info("Opened connection to {}", node.id()); | ||
248 | - streams.put(node.id(), stream); | ||
249 | - } | ||
250 | - return stream; | ||
251 | - } | ||
252 | - } | ||
253 | - | ||
254 | - | ||
255 | - // Sweeps through all controller nodes and attempts to open connection to | ||
256 | - // those that presently do not have one. | ||
257 | - private class ConnectionCustodian extends TimerTask { | ||
258 | - @Override | ||
259 | - public void run() { | ||
260 | - for (DefaultControllerNode node : nodes.values()) { | ||
261 | - if (node != self && !streams.containsKey(node.id())) { | ||
262 | - try { | ||
263 | - openConnection(node, findLeastUtilizedLoop()); | ||
264 | - } catch (IOException e) { | ||
265 | - log.warn("Unable to connect", e); | ||
266 | - } | ||
267 | - } | ||
268 | - } | ||
269 | - } | ||
270 | - } | ||
271 | - | ||
272 | - // Finds the least utilities IO loop. | ||
273 | - private CommLoop findLeastUtilizedLoop() { | ||
274 | - CommLoop leastUtilized = null; | ||
275 | - int minCount = Integer.MAX_VALUE; | ||
276 | - for (CommLoop loop : commLoops) { | ||
277 | - int count = loop.streamCount(); | ||
278 | - if (count == 0) { | ||
279 | - return loop; | ||
280 | - } | ||
281 | - | ||
282 | - if (count < minCount) { | ||
283 | - leastUtilized = loop; | ||
284 | - minCount = count; | ||
285 | - } | ||
286 | - } | ||
287 | - return leastUtilized; | ||
288 | - } | ||
289 | -} |
1 | -package org.onlab.onos.ccc; | ||
2 | - | ||
3 | -import org.onlab.nio.AbstractMessage; | ||
4 | - | ||
5 | -import java.util.Objects; | ||
6 | - | ||
7 | -import static com.google.common.base.MoreObjects.toStringHelper; | ||
8 | - | ||
9 | -/** | ||
10 | - * Base message for cluster-wide communications using TLVs. | ||
11 | - */ | ||
12 | -public class TLVMessage extends AbstractMessage { | ||
13 | - | ||
14 | - private final int type; | ||
15 | - private final Object data; | ||
16 | - | ||
17 | - /** | ||
18 | - * Creates an immutable TLV message. | ||
19 | - * | ||
20 | - * @param type message type | ||
21 | - * @param length message length | ||
22 | - * @param data message data | ||
23 | - */ | ||
24 | - public TLVMessage(int type, int length, Object data) { | ||
25 | - this.length = length; | ||
26 | - this.type = type; | ||
27 | - this.data = data; | ||
28 | - } | ||
29 | - | ||
30 | - /** | ||
31 | - * Returns the message type indicator. | ||
32 | - * | ||
33 | - * @return message type | ||
34 | - */ | ||
35 | - public int type() { | ||
36 | - return type; | ||
37 | - } | ||
38 | - | ||
39 | - /** | ||
40 | - * Returns the data object. | ||
41 | - * | ||
42 | - * @return message data | ||
43 | - */ | ||
44 | - public Object data() { | ||
45 | - return data; | ||
46 | - } | ||
47 | - | ||
48 | - @Override | ||
49 | - public int hashCode() { | ||
50 | - return Objects.hash(type, data); | ||
51 | - } | ||
52 | - | ||
53 | - @Override | ||
54 | - public boolean equals(Object obj) { | ||
55 | - if (this == obj) { | ||
56 | - return true; | ||
57 | - } | ||
58 | - if (obj == null || getClass() != obj.getClass()) { | ||
59 | - return false; | ||
60 | - } | ||
61 | - final TLVMessage other = (TLVMessage) obj; | ||
62 | - return Objects.equals(this.type, other.type) && | ||
63 | - Objects.equals(this.data, other.data); | ||
64 | - } | ||
65 | - | ||
66 | - @Override | ||
67 | - public String toString() { | ||
68 | - return toStringHelper(this).add("type", type).add("length", length).toString(); | ||
69 | - } | ||
70 | - | ||
71 | -} |
1 | -package org.onlab.onos.ccc; | ||
2 | - | ||
3 | -import org.onlab.nio.IOLoop; | ||
4 | -import org.onlab.nio.MessageStream; | ||
5 | - | ||
6 | -import java.nio.ByteBuffer; | ||
7 | -import java.nio.channels.ByteChannel; | ||
8 | - | ||
9 | -import static com.google.common.base.Preconditions.checkState; | ||
10 | - | ||
11 | -/** | ||
12 | - * Stream for transferring TLV messages between cluster members. | ||
13 | - */ | ||
14 | -public class TLVMessageStream extends MessageStream<TLVMessage> { | ||
15 | - | ||
16 | - private static final long MARKER = 0xfeedcafecafefeedL; | ||
17 | - | ||
18 | - /** | ||
19 | - * Creates a message stream associated with the specified IO loop and | ||
20 | - * backed by the given byte channel. | ||
21 | - * | ||
22 | - * @param loop IO loop | ||
23 | - * @param byteChannel backing byte channel | ||
24 | - * @param bufferSize size of the backing byte buffers | ||
25 | - * @param maxIdleMillis maximum number of millis the stream can be idle | ||
26 | - */ | ||
27 | - protected TLVMessageStream(IOLoop<TLVMessage, ?> loop, ByteChannel byteChannel, | ||
28 | - int bufferSize, int maxIdleMillis) { | ||
29 | - super(loop, byteChannel, bufferSize, maxIdleMillis); | ||
30 | - } | ||
31 | - | ||
32 | - @Override | ||
33 | - protected TLVMessage read(ByteBuffer buffer) { | ||
34 | - long marker = buffer.getLong(); | ||
35 | - checkState(marker == MARKER, "Incorrect message marker"); | ||
36 | - | ||
37 | - int type = buffer.getInt(); | ||
38 | - int length = buffer.getInt(); | ||
39 | - | ||
40 | - // TODO: add deserialization hook here | ||
41 | - | ||
42 | - return new TLVMessage(type, length, null); | ||
43 | - } | ||
44 | - | ||
45 | - @Override | ||
46 | - protected void write(TLVMessage message, ByteBuffer buffer) { | ||
47 | - buffer.putLong(MARKER); | ||
48 | - buffer.putInt(message.type()); | ||
49 | - buffer.putInt(message.length()); | ||
50 | - | ||
51 | - // TODO: add serialization hook here | ||
52 | - } | ||
53 | -} |
... | @@ -233,7 +233,7 @@ public class IOLoopTestClient { | ... | @@ -233,7 +233,7 @@ public class IOLoopTestClient { |
233 | } | 233 | } |
234 | 234 | ||
235 | @Override | 235 | @Override |
236 | - protected void connect(SelectionKey key) { | 236 | + protected void connect(SelectionKey key) throws IOException { |
237 | super.connect(key); | 237 | super.connect(key); |
238 | TestMessageStream b = (TestMessageStream) key.attachment(); | 238 | TestMessageStream b = (TestMessageStream) key.attachment(); |
239 | Worker w = ((CustomIOLoop) b.loop()).worker; | 239 | Worker w = ((CustomIOLoop) b.loop()).worker; | ... | ... |
... | @@ -26,7 +26,9 @@ import org.onlab.onos.net.packet.InboundPacket; | ... | @@ -26,7 +26,9 @@ import org.onlab.onos.net.packet.InboundPacket; |
26 | import org.onlab.onos.net.packet.PacketContext; | 26 | import org.onlab.onos.net.packet.PacketContext; |
27 | import org.onlab.onos.net.packet.PacketProcessor; | 27 | import org.onlab.onos.net.packet.PacketProcessor; |
28 | import org.onlab.onos.net.packet.PacketService; | 28 | import org.onlab.onos.net.packet.PacketService; |
29 | +import org.onlab.onos.net.proxyarp.ProxyArpService; | ||
29 | import org.onlab.onos.net.topology.TopologyService; | 30 | import org.onlab.onos.net.topology.TopologyService; |
31 | +import org.onlab.packet.ARP; | ||
30 | import org.onlab.packet.Ethernet; | 32 | import org.onlab.packet.Ethernet; |
31 | import org.slf4j.Logger; | 33 | import org.slf4j.Logger; |
32 | 34 | ||
... | @@ -50,6 +52,9 @@ public class ReactiveForwarding { | ... | @@ -50,6 +52,9 @@ public class ReactiveForwarding { |
50 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 52 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
51 | protected FlowRuleService flowRuleService; | 53 | protected FlowRuleService flowRuleService; |
52 | 54 | ||
55 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
56 | + protected ProxyArpService proxyArpService; | ||
57 | + | ||
53 | private ReactivePacketProcessor processor = new ReactivePacketProcessor(); | 58 | private ReactivePacketProcessor processor = new ReactivePacketProcessor(); |
54 | 59 | ||
55 | private ApplicationId appId; | 60 | private ApplicationId appId; |
... | @@ -85,6 +90,16 @@ public class ReactiveForwarding { | ... | @@ -85,6 +90,16 @@ public class ReactiveForwarding { |
85 | 90 | ||
86 | InboundPacket pkt = context.inPacket(); | 91 | InboundPacket pkt = context.inPacket(); |
87 | Ethernet ethPkt = pkt.parsed(); | 92 | Ethernet ethPkt = pkt.parsed(); |
93 | + if (ethPkt.getEtherType() == Ethernet.TYPE_ARP) { | ||
94 | + ARP arp = (ARP) ethPkt.getPayload(); | ||
95 | + if (arp.getOpCode() == ARP.OP_REPLY) { | ||
96 | + proxyArpService.forward(ethPkt); | ||
97 | + } else if (arp.getOpCode() == ARP.OP_REQUEST) { | ||
98 | + proxyArpService.reply(ethPkt); | ||
99 | + } | ||
100 | + context.block(); | ||
101 | + return; | ||
102 | + } | ||
88 | HostId id = HostId.hostId(ethPkt.getDestinationMAC()); | 103 | HostId id = HostId.hostId(ethPkt.getDestinationMAC()); |
89 | 104 | ||
90 | // Do we know who this is for? If not, flood and bail. | 105 | // Do we know who this is for? If not, flood and bail. | ... | ... |
... | @@ -7,10 +7,10 @@ import org.onlab.onos.cluster.NodeId; | ... | @@ -7,10 +7,10 @@ import org.onlab.onos.cluster.NodeId; |
7 | import org.onlab.packet.IpPrefix; | 7 | import org.onlab.packet.IpPrefix; |
8 | 8 | ||
9 | /** | 9 | /** |
10 | - * Lists all controller cluster nodes. | 10 | + * Adds a new controller cluster node. |
11 | */ | 11 | */ |
12 | @Command(scope = "onos", name = "add-node", | 12 | @Command(scope = "onos", name = "add-node", |
13 | - description = "Lists all controller cluster nodes") | 13 | + description = "Adds a new controller cluster node") |
14 | public class NodeAddCommand extends AbstractShellCommand { | 14 | public class NodeAddCommand extends AbstractShellCommand { |
15 | 15 | ||
16 | @Argument(index = 0, name = "nodeId", description = "Node ID", | 16 | @Argument(index = 0, name = "nodeId", description = "Node ID", |
... | @@ -21,7 +21,7 @@ public class NodeAddCommand extends AbstractShellCommand { | ... | @@ -21,7 +21,7 @@ public class NodeAddCommand extends AbstractShellCommand { |
21 | required = true, multiValued = false) | 21 | required = true, multiValued = false) |
22 | String ip = null; | 22 | String ip = null; |
23 | 23 | ||
24 | - @Argument(index = 2, name = "tcpPort", description = "TCP port", | 24 | + @Argument(index = 2, name = "tcpPort", description = "Node TCP listen port", |
25 | required = false, multiValued = false) | 25 | required = false, multiValued = false) |
26 | int tcpPort = 9876; | 26 | int tcpPort = 9876; |
27 | 27 | ... | ... |
1 | +package org.onlab.onos.cli; | ||
2 | + | ||
3 | +import org.apache.karaf.shell.commands.Argument; | ||
4 | +import org.apache.karaf.shell.commands.Command; | ||
5 | +import org.onlab.onos.cluster.ClusterAdminService; | ||
6 | +import org.onlab.onos.cluster.NodeId; | ||
7 | + | ||
8 | +/** | ||
9 | + * Removes a controller cluster node. | ||
10 | + */ | ||
11 | +@Command(scope = "onos", name = "remove-node", | ||
12 | + description = "Removes a new controller cluster node") | ||
13 | +public class NodeRemoveCommand extends AbstractShellCommand { | ||
14 | + | ||
15 | + @Argument(index = 0, name = "nodeId", description = "Node ID", | ||
16 | + required = true, multiValued = false) | ||
17 | + String nodeId = null; | ||
18 | + | ||
19 | + @Override | ||
20 | + protected void execute() { | ||
21 | + ClusterAdminService service = get(ClusterAdminService.class); | ||
22 | + service.removeNode(new NodeId(nodeId)); | ||
23 | + } | ||
24 | + | ||
25 | +} |
... | @@ -8,6 +8,9 @@ | ... | @@ -8,6 +8,9 @@ |
8 | <action class="org.onlab.onos.cli.NodeAddCommand"/> | 8 | <action class="org.onlab.onos.cli.NodeAddCommand"/> |
9 | </command> | 9 | </command> |
10 | <command> | 10 | <command> |
11 | + <action class="org.onlab.onos.cli.NodeRemoveCommand"/> | ||
12 | + </command> | ||
13 | + <command> | ||
11 | <action class="org.onlab.onos.cli.MastersListCommand"/> | 14 | <action class="org.onlab.onos.cli.MastersListCommand"/> |
12 | <completers> | 15 | <completers> |
13 | <ref component-id="clusterIdCompleter"/> | 16 | <ref component-id="clusterIdCompleter"/> | ... | ... |
... | @@ -9,6 +9,8 @@ import org.onlab.onos.net.ConnectPoint; | ... | @@ -9,6 +9,8 @@ import org.onlab.onos.net.ConnectPoint; |
9 | import org.onlab.packet.IpPrefix; | 9 | import org.onlab.packet.IpPrefix; |
10 | import org.onlab.packet.MacAddress; | 10 | import org.onlab.packet.MacAddress; |
11 | 11 | ||
12 | +import com.google.common.base.MoreObjects; | ||
13 | + | ||
12 | /** | 14 | /** |
13 | * Represents address information bound to a port. | 15 | * Represents address information bound to a port. |
14 | */ | 16 | */ |
... | @@ -83,4 +85,13 @@ public class PortAddresses { | ... | @@ -83,4 +85,13 @@ public class PortAddresses { |
83 | public int hashCode() { | 85 | public int hashCode() { |
84 | return Objects.hash(connectPoint, ipAddresses, macAddress); | 86 | return Objects.hash(connectPoint, ipAddresses, macAddress); |
85 | } | 87 | } |
88 | + | ||
89 | + @Override | ||
90 | + public String toString() { | ||
91 | + return MoreObjects.toStringHelper(getClass()) | ||
92 | + .add("connect-point", connectPoint) | ||
93 | + .add("ip-addresses", ipAddresses) | ||
94 | + .add("mac-address", macAddress) | ||
95 | + .toString(); | ||
96 | + } | ||
86 | } | 97 | } | ... | ... |
1 | package org.onlab.onos.net.link; | 1 | package org.onlab.onos.net.link; |
2 | 2 | ||
3 | +import java.util.Set; | ||
4 | + | ||
3 | import org.onlab.onos.net.ConnectPoint; | 5 | import org.onlab.onos.net.ConnectPoint; |
4 | import org.onlab.onos.net.DeviceId; | 6 | import org.onlab.onos.net.DeviceId; |
5 | import org.onlab.onos.net.Link; | 7 | import org.onlab.onos.net.Link; |
6 | 8 | ||
7 | -import java.util.Set; | ||
8 | - | ||
9 | /** | 9 | /** |
10 | * Service for interacting with the inventory of infrastructure links. | 10 | * Service for interacting with the inventory of infrastructure links. |
11 | */ | 11 | */ | ... | ... |
... | @@ -21,9 +21,16 @@ public interface ProxyArpService { | ... | @@ -21,9 +21,16 @@ public interface ProxyArpService { |
21 | * Sends a reply for a given request. If the host is not known then the arp | 21 | * Sends a reply for a given request. If the host is not known then the arp |
22 | * will be flooded at all edge ports. | 22 | * will be flooded at all edge ports. |
23 | * | 23 | * |
24 | - * @param request | 24 | + * @param eth |
25 | * an arp request | 25 | * an arp request |
26 | */ | 26 | */ |
27 | - void reply(Ethernet request); | 27 | + void reply(Ethernet eth); |
28 | + | ||
29 | + /** | ||
30 | + * Forwards an ARP request to its destination. Floods at the edge the ARP request if the | ||
31 | + * destination is not known. | ||
32 | + * @param eth an ethernet frame containing an ARP request. | ||
33 | + */ | ||
34 | + void forward(Ethernet eth); | ||
28 | 35 | ||
29 | } | 36 | } | ... | ... |
1 | package org.onlab.onos.net.link; | 1 | package org.onlab.onos.net.link; |
2 | 2 | ||
3 | +import java.util.Set; | ||
4 | + | ||
3 | import org.onlab.onos.net.ConnectPoint; | 5 | import org.onlab.onos.net.ConnectPoint; |
4 | import org.onlab.onos.net.DeviceId; | 6 | import org.onlab.onos.net.DeviceId; |
5 | import org.onlab.onos.net.Link; | 7 | import org.onlab.onos.net.Link; |
6 | 8 | ||
7 | -import java.util.Set; | ||
8 | - | ||
9 | /** | 9 | /** |
10 | * Test adapter for link service. | 10 | * Test adapter for link service. |
11 | */ | 11 | */ |
... | @@ -63,4 +63,5 @@ public class LinkServiceAdapter implements LinkService { | ... | @@ -63,4 +63,5 @@ public class LinkServiceAdapter implements LinkService { |
63 | public void removeListener(LinkListener listener) { | 63 | public void removeListener(LinkListener listener) { |
64 | } | 64 | } |
65 | 65 | ||
66 | + | ||
66 | } | 67 | } | ... | ... |
... | @@ -53,7 +53,7 @@ public class LinkManager | ... | @@ -53,7 +53,7 @@ public class LinkManager |
53 | protected final AbstractListenerRegistry<LinkEvent, LinkListener> | 53 | protected final AbstractListenerRegistry<LinkEvent, LinkListener> |
54 | listenerRegistry = new AbstractListenerRegistry<>(); | 54 | listenerRegistry = new AbstractListenerRegistry<>(); |
55 | 55 | ||
56 | - private LinkStoreDelegate delegate = new InternalStoreDelegate(); | 56 | + private final LinkStoreDelegate delegate = new InternalStoreDelegate(); |
57 | 57 | ||
58 | private final DeviceListener deviceListener = new InternalDeviceListener(); | 58 | private final DeviceListener deviceListener = new InternalDeviceListener(); |
59 | 59 | ... | ... |
This diff is collapsed. Click to expand it.
1 | -package org.onlab.onos.proxyarp.impl; | ||
2 | - | ||
3 | -import static com.google.common.base.Preconditions.checkArgument; | ||
4 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
5 | - | ||
6 | -import java.nio.ByteBuffer; | ||
7 | -import java.util.Set; | ||
8 | - | ||
9 | -import org.apache.felix.scr.annotations.Reference; | ||
10 | -import org.apache.felix.scr.annotations.ReferenceCardinality; | ||
11 | -import org.onlab.onos.net.Host; | ||
12 | -import org.onlab.onos.net.flow.DefaultTrafficTreatment; | ||
13 | -import org.onlab.onos.net.flow.TrafficTreatment; | ||
14 | -import org.onlab.onos.net.host.HostService; | ||
15 | -import org.onlab.onos.net.packet.DefaultOutboundPacket; | ||
16 | -import org.onlab.onos.net.packet.PacketService; | ||
17 | -import org.onlab.onos.net.proxyarp.ProxyArpService; | ||
18 | -import org.onlab.onos.net.topology.TopologyService; | ||
19 | -import org.onlab.packet.ARP; | ||
20 | -import org.onlab.packet.Ethernet; | ||
21 | -import org.onlab.packet.IpPrefix; | ||
22 | -import org.onlab.packet.VlanId; | ||
23 | - | ||
24 | -public class ProxyArpManager implements ProxyArpService { | ||
25 | - | ||
26 | - private static final String MAC_ADDR_NULL = "Mac address cannot be null."; | ||
27 | - private static final String REQUEST_NULL = "Arp request cannot be null."; | ||
28 | - private static final String REQUEST_NOT_ARP = "Ethernet frame does not contain ARP request."; | ||
29 | - private static final String NOT_ARP_REQUEST = "ARP is not a request."; | ||
30 | - | ||
31 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
32 | - protected HostService hostService; | ||
33 | - | ||
34 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
35 | - protected PacketService packetService; | ||
36 | - | ||
37 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
38 | - protected TopologyService topologyService; | ||
39 | - | ||
40 | - @Override | ||
41 | - public boolean known(IpPrefix addr) { | ||
42 | - checkNotNull(MAC_ADDR_NULL, addr); | ||
43 | - Set<Host> hosts = hostService.getHostsByIp(addr); | ||
44 | - return !hosts.isEmpty(); | ||
45 | - } | ||
46 | - | ||
47 | - @Override | ||
48 | - public void reply(Ethernet request) { | ||
49 | - checkNotNull(REQUEST_NULL, request); | ||
50 | - checkArgument(request.getEtherType() == Ethernet.TYPE_ARP, | ||
51 | - REQUEST_NOT_ARP); | ||
52 | - ARP arp = (ARP) request.getPayload(); | ||
53 | - checkArgument(arp.getOpCode() == ARP.OP_REQUEST, NOT_ARP_REQUEST); | ||
54 | - | ||
55 | - VlanId vlan = VlanId.vlanId(request.getVlanID()); | ||
56 | - Set<Host> hosts = hostService.getHostsByIp(IpPrefix.valueOf(arp | ||
57 | - .getTargetProtocolAddress())); | ||
58 | - | ||
59 | - Host h = null; | ||
60 | - for (Host host : hosts) { | ||
61 | - if (host.vlan().equals(vlan)) { | ||
62 | - h = host; | ||
63 | - break; | ||
64 | - } | ||
65 | - } | ||
66 | - | ||
67 | - if (h == null) { | ||
68 | - flood(request); | ||
69 | - return; | ||
70 | - } | ||
71 | - | ||
72 | - Ethernet arpReply = buildArpReply(h, request); | ||
73 | - // TODO: check send status with host service. | ||
74 | - TrafficTreatment.Builder builder = new DefaultTrafficTreatment.Builder(); | ||
75 | - builder.setOutput(h.location().port()); | ||
76 | - packetService.emit(new DefaultOutboundPacket(h.location().deviceId(), | ||
77 | - builder.build(), ByteBuffer.wrap(arpReply.serialize()))); | ||
78 | - } | ||
79 | - | ||
80 | - private void flood(Ethernet request) { | ||
81 | - // TODO: flood on all edge ports. | ||
82 | - } | ||
83 | - | ||
84 | - private Ethernet buildArpReply(Host h, Ethernet request) { | ||
85 | - Ethernet eth = new Ethernet(); | ||
86 | - eth.setDestinationMACAddress(request.getSourceMACAddress()); | ||
87 | - eth.setSourceMACAddress(h.mac().getAddress()); | ||
88 | - eth.setEtherType(Ethernet.TYPE_ARP); | ||
89 | - ARP arp = new ARP(); | ||
90 | - arp.setOpCode(ARP.OP_REPLY); | ||
91 | - arp.setSenderHardwareAddress(h.mac().getAddress()); | ||
92 | - arp.setTargetHardwareAddress(request.getSourceMACAddress()); | ||
93 | - | ||
94 | - arp.setTargetProtocolAddress(((ARP) request.getPayload()) | ||
95 | - .getSenderProtocolAddress()); | ||
96 | - arp.setSenderProtocolAddress(h.ipAddresses().iterator().next().toInt()); | ||
97 | - eth.setPayload(arp); | ||
98 | - return eth; | ||
99 | - } | ||
100 | -} |
... | @@ -33,8 +33,11 @@ import org.onlab.onos.net.device.PortDescription; | ... | @@ -33,8 +33,11 @@ import org.onlab.onos.net.device.PortDescription; |
33 | import org.onlab.onos.net.provider.AbstractProvider; | 33 | import org.onlab.onos.net.provider.AbstractProvider; |
34 | import org.onlab.onos.net.provider.ProviderId; | 34 | import org.onlab.onos.net.provider.ProviderId; |
35 | import org.onlab.onos.store.common.StoreManager; | 35 | import org.onlab.onos.store.common.StoreManager; |
36 | +import org.onlab.onos.store.common.StoreService; | ||
36 | import org.onlab.onos.store.common.TestStoreManager; | 37 | import org.onlab.onos.store.common.TestStoreManager; |
37 | import org.onlab.onos.store.device.impl.DistributedDeviceStore; | 38 | import org.onlab.onos.store.device.impl.DistributedDeviceStore; |
39 | +import org.onlab.onos.store.serializers.KryoSerializationManager; | ||
40 | +import org.onlab.onos.store.serializers.KryoSerializationService; | ||
38 | import org.onlab.packet.IpPrefix; | 41 | import org.onlab.packet.IpPrefix; |
39 | 42 | ||
40 | import java.util.ArrayList; | 43 | import java.util.ArrayList; |
... | @@ -92,6 +95,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -92,6 +95,7 @@ public class DistributedDeviceManagerTest { |
92 | private DistributedDeviceStore dstore; | 95 | private DistributedDeviceStore dstore; |
93 | private TestMastershipManager masterManager; | 96 | private TestMastershipManager masterManager; |
94 | private EventDeliveryService eventService; | 97 | private EventDeliveryService eventService; |
98 | + private KryoSerializationManager serializationMgr; | ||
95 | 99 | ||
96 | @Before | 100 | @Before |
97 | public void setUp() { | 101 | public void setUp() { |
... | @@ -107,7 +111,10 @@ public class DistributedDeviceManagerTest { | ... | @@ -107,7 +111,10 @@ public class DistributedDeviceManagerTest { |
107 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); | 111 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); |
108 | storeManager.activate(); | 112 | storeManager.activate(); |
109 | 113 | ||
110 | - dstore = new TestDistributedDeviceStore(); | 114 | + serializationMgr = new KryoSerializationManager(); |
115 | + serializationMgr.activate(); | ||
116 | + | ||
117 | + dstore = new TestDistributedDeviceStore(storeManager, serializationMgr); | ||
111 | dstore.activate(); | 118 | dstore.activate(); |
112 | 119 | ||
113 | mgr.store = dstore; | 120 | mgr.store = dstore; |
... | @@ -133,6 +140,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -133,6 +140,7 @@ public class DistributedDeviceManagerTest { |
133 | mgr.deactivate(); | 140 | mgr.deactivate(); |
134 | 141 | ||
135 | dstore.deactivate(); | 142 | dstore.deactivate(); |
143 | + serializationMgr.deactivate(); | ||
136 | storeManager.deactivate(); | 144 | storeManager.deactivate(); |
137 | } | 145 | } |
138 | 146 | ||
... | @@ -163,7 +171,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -163,7 +171,7 @@ public class DistributedDeviceManagerTest { |
163 | public void deviceDisconnected() { | 171 | public void deviceDisconnected() { |
164 | connectDevice(DID1, SW1); | 172 | connectDevice(DID1, SW1); |
165 | connectDevice(DID2, SW1); | 173 | connectDevice(DID2, SW1); |
166 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED); | 174 | + validateEvents(DEVICE_ADDED, DEVICE_ADDED); |
167 | assertTrue("device should be available", service.isAvailable(DID1)); | 175 | assertTrue("device should be available", service.isAvailable(DID1)); |
168 | 176 | ||
169 | // Disconnect | 177 | // Disconnect |
... | @@ -182,10 +190,10 @@ public class DistributedDeviceManagerTest { | ... | @@ -182,10 +190,10 @@ public class DistributedDeviceManagerTest { |
182 | @Test | 190 | @Test |
183 | public void deviceUpdated() { | 191 | public void deviceUpdated() { |
184 | connectDevice(DID1, SW1); | 192 | connectDevice(DID1, SW1); |
185 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED); | 193 | + validateEvents(DEVICE_ADDED); |
186 | 194 | ||
187 | connectDevice(DID1, SW2); | 195 | connectDevice(DID1, SW2); |
188 | - validateEvents(DEVICE_UPDATED, DEVICE_UPDATED); | 196 | + validateEvents(DEVICE_UPDATED); |
189 | } | 197 | } |
190 | 198 | ||
191 | @Test | 199 | @Test |
... | @@ -202,7 +210,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -202,7 +210,7 @@ public class DistributedDeviceManagerTest { |
202 | pds.add(new DefaultPortDescription(P2, true)); | 210 | pds.add(new DefaultPortDescription(P2, true)); |
203 | pds.add(new DefaultPortDescription(P3, true)); | 211 | pds.add(new DefaultPortDescription(P3, true)); |
204 | providerService.updatePorts(DID1, pds); | 212 | providerService.updatePorts(DID1, pds); |
205 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED, PORT_ADDED); | 213 | + validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED, PORT_ADDED); |
206 | pds.clear(); | 214 | pds.clear(); |
207 | 215 | ||
208 | pds.add(new DefaultPortDescription(P1, false)); | 216 | pds.add(new DefaultPortDescription(P1, false)); |
... | @@ -218,7 +226,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -218,7 +226,7 @@ public class DistributedDeviceManagerTest { |
218 | pds.add(new DefaultPortDescription(P1, true)); | 226 | pds.add(new DefaultPortDescription(P1, true)); |
219 | pds.add(new DefaultPortDescription(P2, true)); | 227 | pds.add(new DefaultPortDescription(P2, true)); |
220 | providerService.updatePorts(DID1, pds); | 228 | providerService.updatePorts(DID1, pds); |
221 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED); | 229 | + validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED); |
222 | 230 | ||
223 | providerService.portStatusChanged(DID1, new DefaultPortDescription(P1, false)); | 231 | providerService.portStatusChanged(DID1, new DefaultPortDescription(P1, false)); |
224 | validateEvents(PORT_UPDATED); | 232 | validateEvents(PORT_UPDATED); |
... | @@ -233,7 +241,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -233,7 +241,7 @@ public class DistributedDeviceManagerTest { |
233 | pds.add(new DefaultPortDescription(P1, true)); | 241 | pds.add(new DefaultPortDescription(P1, true)); |
234 | pds.add(new DefaultPortDescription(P2, true)); | 242 | pds.add(new DefaultPortDescription(P2, true)); |
235 | providerService.updatePorts(DID1, pds); | 243 | providerService.updatePorts(DID1, pds); |
236 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED); | 244 | + validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED); |
237 | assertEquals("wrong port count", 2, service.getPorts(DID1).size()); | 245 | assertEquals("wrong port count", 2, service.getPorts(DID1).size()); |
238 | 246 | ||
239 | Port port = service.getPort(DID1, P1); | 247 | Port port = service.getPort(DID1, P1); |
... | @@ -247,7 +255,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -247,7 +255,7 @@ public class DistributedDeviceManagerTest { |
247 | connectDevice(DID2, SW2); | 255 | connectDevice(DID2, SW2); |
248 | assertEquals("incorrect device count", 2, service.getDeviceCount()); | 256 | assertEquals("incorrect device count", 2, service.getDeviceCount()); |
249 | admin.removeDevice(DID1); | 257 | admin.removeDevice(DID1); |
250 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_REMOVED, DEVICE_REMOVED); | 258 | + validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_REMOVED); |
251 | assertNull("device should not be found", service.getDevice(DID1)); | 259 | assertNull("device should not be found", service.getDevice(DID1)); |
252 | assertNotNull("device should be found", service.getDevice(DID2)); | 260 | assertNotNull("device should be found", service.getDevice(DID2)); |
253 | assertEquals("incorrect device count", 1, service.getDeviceCount()); | 261 | assertEquals("incorrect device count", 1, service.getDeviceCount()); |
... | @@ -298,8 +306,10 @@ public class DistributedDeviceManagerTest { | ... | @@ -298,8 +306,10 @@ public class DistributedDeviceManagerTest { |
298 | 306 | ||
299 | private class TestDistributedDeviceStore extends DistributedDeviceStore { | 307 | private class TestDistributedDeviceStore extends DistributedDeviceStore { |
300 | 308 | ||
301 | - public TestDistributedDeviceStore() { | 309 | + public TestDistributedDeviceStore(StoreService storeService, |
302 | - this.storeService = storeManager; | 310 | + KryoSerializationService kryoSerializationService) { |
311 | + this.storeService = storeService; | ||
312 | + this.kryoSerializationService = kryoSerializationService; | ||
303 | } | 313 | } |
304 | } | 314 | } |
305 | 315 | ... | ... |
... | @@ -26,6 +26,23 @@ | ... | @@ -26,6 +26,23 @@ |
26 | <artifactId>onos-core-serializers</artifactId> | 26 | <artifactId>onos-core-serializers</artifactId> |
27 | <version>${project.version}</version> | 27 | <version>${project.version}</version> |
28 | </dependency> | 28 | </dependency> |
29 | + | ||
30 | + | ||
31 | + <dependency> | ||
32 | + <groupId>org.onlab.onos</groupId> | ||
33 | + <artifactId>onlab-nio</artifactId> | ||
34 | + <version>${project.version}</version> | ||
35 | + </dependency> | ||
36 | + | ||
37 | + <dependency> | ||
38 | + <groupId>com.fasterxml.jackson.core</groupId> | ||
39 | + <artifactId>jackson-databind</artifactId> | ||
40 | + </dependency> | ||
41 | + <dependency> | ||
42 | + <groupId>com.fasterxml.jackson.core</groupId> | ||
43 | + <artifactId>jackson-annotations</artifactId> | ||
44 | + </dependency> | ||
45 | + | ||
29 | <dependency> | 46 | <dependency> |
30 | <groupId>org.apache.felix</groupId> | 47 | <groupId>org.apache.felix</groupId> |
31 | <artifactId>org.apache.felix.scr.annotations</artifactId> | 48 | <artifactId>org.apache.felix.scr.annotations</artifactId> | ... | ... |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/ClusterConnectionListener.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import org.onlab.nio.AcceptorLoop; | ||
4 | +import org.onlab.packet.IpPrefix; | ||
5 | + | ||
6 | +import java.io.IOException; | ||
7 | +import java.net.InetSocketAddress; | ||
8 | +import java.net.Socket; | ||
9 | +import java.nio.channels.ServerSocketChannel; | ||
10 | +import java.nio.channels.SocketChannel; | ||
11 | + | ||
12 | +import static java.net.InetAddress.getByAddress; | ||
13 | + | ||
14 | +/** | ||
15 | + * Listens to inbound connection requests and accepts them. | ||
16 | + */ | ||
17 | +public class ClusterConnectionListener extends AcceptorLoop { | ||
18 | + | ||
19 | + private static final long SELECT_TIMEOUT = 50; | ||
20 | + private static final int COMM_BUFFER_SIZE = 32 * 1024; | ||
21 | + | ||
22 | + private static final boolean SO_NO_DELAY = false; | ||
23 | + private static final int SO_SEND_BUFFER_SIZE = COMM_BUFFER_SIZE; | ||
24 | + private static final int SO_RCV_BUFFER_SIZE = COMM_BUFFER_SIZE; | ||
25 | + | ||
26 | + private final WorkerFinder workerFinder; | ||
27 | + | ||
28 | + ClusterConnectionListener(IpPrefix ip, int tcpPort, | ||
29 | + WorkerFinder workerFinder) throws IOException { | ||
30 | + super(SELECT_TIMEOUT, new InetSocketAddress(getByAddress(ip.toOctets()), tcpPort)); | ||
31 | + this.workerFinder = workerFinder; | ||
32 | + } | ||
33 | + | ||
34 | + @Override | ||
35 | + protected void acceptConnection(ServerSocketChannel channel) throws IOException { | ||
36 | + SocketChannel sc = channel.accept(); | ||
37 | + sc.configureBlocking(false); | ||
38 | + | ||
39 | + Socket so = sc.socket(); | ||
40 | + so.setTcpNoDelay(SO_NO_DELAY); | ||
41 | + so.setReceiveBufferSize(SO_RCV_BUFFER_SIZE); | ||
42 | + so.setSendBufferSize(SO_SEND_BUFFER_SIZE); | ||
43 | + | ||
44 | + workerFinder.findWorker().acceptStream(sc); | ||
45 | + } | ||
46 | + | ||
47 | +} |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/ClusterDefinitionStore.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import com.fasterxml.jackson.core.JsonEncoding; | ||
4 | +import com.fasterxml.jackson.core.JsonFactory; | ||
5 | +import com.fasterxml.jackson.databind.JsonNode; | ||
6 | +import com.fasterxml.jackson.databind.ObjectMapper; | ||
7 | +import com.fasterxml.jackson.databind.node.ArrayNode; | ||
8 | +import com.fasterxml.jackson.databind.node.ObjectNode; | ||
9 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
10 | +import org.onlab.onos.cluster.NodeId; | ||
11 | +import org.onlab.packet.IpPrefix; | ||
12 | + | ||
13 | +import java.io.File; | ||
14 | +import java.io.IOException; | ||
15 | +import java.util.HashSet; | ||
16 | +import java.util.Iterator; | ||
17 | +import java.util.Set; | ||
18 | + | ||
19 | +/** | ||
20 | + * Allows for reading and writing cluster definition as a JSON file. | ||
21 | + */ | ||
22 | +public class ClusterDefinitionStore { | ||
23 | + | ||
24 | + private final File file; | ||
25 | + | ||
26 | + /** | ||
27 | + * Creates a reader/writer of the cluster definition file. | ||
28 | + * | ||
29 | + * @param filePath location of the definition file | ||
30 | + */ | ||
31 | + public ClusterDefinitionStore(String filePath) { | ||
32 | + file = new File(filePath); | ||
33 | + } | ||
34 | + | ||
35 | + /** | ||
36 | + * Returns set of the controller nodes, including self. | ||
37 | + * | ||
38 | + * @return set of controller nodes | ||
39 | + */ | ||
40 | + public Set<DefaultControllerNode> read() throws IOException { | ||
41 | + Set<DefaultControllerNode> nodes = new HashSet<>(); | ||
42 | + ObjectMapper mapper = new ObjectMapper(); | ||
43 | + ObjectNode clusterNodeDef = (ObjectNode) mapper.readTree(file); | ||
44 | + Iterator<JsonNode> it = ((ArrayNode) clusterNodeDef.get("nodes")).elements(); | ||
45 | + while (it.hasNext()) { | ||
46 | + ObjectNode nodeDef = (ObjectNode) it.next(); | ||
47 | + nodes.add(new DefaultControllerNode(new NodeId(nodeDef.get("id").asText()), | ||
48 | + IpPrefix.valueOf(nodeDef.get("ip").asText()), | ||
49 | + nodeDef.get("tcpPort").asInt(9876))); | ||
50 | + } | ||
51 | + return nodes; | ||
52 | + } | ||
53 | + | ||
54 | + /** | ||
55 | + * Writes the given set of the controller nodes. | ||
56 | + * | ||
57 | + * @param nodes set of controller nodes | ||
58 | + */ | ||
59 | + public void write(Set<DefaultControllerNode> nodes) throws IOException { | ||
60 | + ObjectMapper mapper = new ObjectMapper(); | ||
61 | + ObjectNode clusterNodeDef = mapper.createObjectNode(); | ||
62 | + ArrayNode nodeDefs = mapper.createArrayNode(); | ||
63 | + clusterNodeDef.set("nodes", nodeDefs); | ||
64 | + for (DefaultControllerNode node : nodes) { | ||
65 | + ObjectNode nodeDef = mapper.createObjectNode(); | ||
66 | + nodeDef.put("id", node.id().toString()) | ||
67 | + .put("ip", node.ip().toString()) | ||
68 | + .put("tcpPort", node.tcpPort()); | ||
69 | + nodeDefs.add(nodeDef); | ||
70 | + } | ||
71 | + mapper.writeTree(new JsonFactory().createGenerator(file, JsonEncoding.UTF8), | ||
72 | + clusterNodeDef); | ||
73 | + } | ||
74 | + | ||
75 | +} |
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import org.onlab.nio.IOLoop; | ||
4 | +import org.onlab.nio.MessageStream; | ||
5 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
6 | +import org.onlab.onos.store.cluster.messaging.ClusterMessage; | ||
7 | +import org.onlab.onos.store.cluster.messaging.ClusterMessageStream; | ||
8 | +import org.onlab.onos.store.cluster.messaging.SerializationService; | ||
9 | +import org.slf4j.Logger; | ||
10 | +import org.slf4j.LoggerFactory; | ||
11 | + | ||
12 | +import java.io.IOException; | ||
13 | +import java.net.InetSocketAddress; | ||
14 | +import java.nio.channels.ByteChannel; | ||
15 | +import java.nio.channels.SelectionKey; | ||
16 | +import java.nio.channels.SocketChannel; | ||
17 | +import java.util.List; | ||
18 | +import java.util.Objects; | ||
19 | + | ||
20 | +import static org.onlab.packet.IpPrefix.valueOf; | ||
21 | + | ||
22 | +/** | ||
23 | + * Performs the IO operations related to a cluster-wide communications. | ||
24 | + */ | ||
25 | +public class ClusterIOWorker extends | ||
26 | + IOLoop<ClusterMessage, ClusterMessageStream> { | ||
27 | + | ||
28 | + private final Logger log = LoggerFactory.getLogger(getClass()); | ||
29 | + | ||
30 | + private static final long SELECT_TIMEOUT = 50; | ||
31 | + | ||
32 | + private final ConnectionManager connectionManager; | ||
33 | + private final CommunicationsDelegate commsDelegate; | ||
34 | + private final SerializationService serializationService; | ||
35 | + private final ClusterMessage helloMessage; | ||
36 | + | ||
37 | + /** | ||
38 | + * Creates a new cluster IO worker. | ||
39 | + * | ||
40 | + * @param connectionManager parent connection manager | ||
41 | + * @param commsDelegate communications delegate for dispatching | ||
42 | + * @param serializationService serialization service for encode/decode | ||
43 | + * @param helloMessage hello message for greeting peers | ||
44 | + * @throws IOException if errors occur during IO loop ignition | ||
45 | + */ | ||
46 | + ClusterIOWorker(ConnectionManager connectionManager, | ||
47 | + CommunicationsDelegate commsDelegate, | ||
48 | + SerializationService serializationService, | ||
49 | + ClusterMessage helloMessage) throws IOException { | ||
50 | + super(SELECT_TIMEOUT); | ||
51 | + this.connectionManager = connectionManager; | ||
52 | + this.commsDelegate = commsDelegate; | ||
53 | + this.serializationService = serializationService; | ||
54 | + this.helloMessage = helloMessage; | ||
55 | + } | ||
56 | + | ||
57 | + @Override | ||
58 | + protected ClusterMessageStream createStream(ByteChannel byteChannel) { | ||
59 | + return new ClusterMessageStream(serializationService, this, byteChannel); | ||
60 | + } | ||
61 | + | ||
62 | + @Override | ||
63 | + protected void processMessages(List<ClusterMessage> messages, MessageStream<ClusterMessage> stream) { | ||
64 | + for (ClusterMessage message : messages) { | ||
65 | + commsDelegate.dispatch(message); | ||
66 | + } | ||
67 | + } | ||
68 | + | ||
69 | + @Override | ||
70 | + public ClusterMessageStream acceptStream(SocketChannel channel) { | ||
71 | + ClusterMessageStream stream = super.acceptStream(channel); | ||
72 | + try { | ||
73 | + InetSocketAddress sa = (InetSocketAddress) channel.getRemoteAddress(); | ||
74 | + log.info("Accepted connection from node {}", valueOf(sa.getAddress().getAddress())); | ||
75 | + stream.write(helloMessage); | ||
76 | + | ||
77 | + } catch (IOException e) { | ||
78 | + log.warn("Unable to accept connection from an unknown end-point", e); | ||
79 | + } | ||
80 | + return stream; | ||
81 | + } | ||
82 | + | ||
83 | + @Override | ||
84 | + protected void connect(SelectionKey key) throws IOException { | ||
85 | + try { | ||
86 | + super.connect(key); | ||
87 | + ClusterMessageStream stream = (ClusterMessageStream) key.attachment(); | ||
88 | + stream.write(helloMessage); | ||
89 | + | ||
90 | + } catch (IOException e) { | ||
91 | + if (!Objects.equals(e.getMessage(), "Connection refused")) { | ||
92 | + throw e; | ||
93 | + } | ||
94 | + } | ||
95 | + } | ||
96 | + | ||
97 | + @Override | ||
98 | + protected void removeStream(MessageStream<ClusterMessage> stream) { | ||
99 | + DefaultControllerNode node = ((ClusterMessageStream) stream).node(); | ||
100 | + if (node != null) { | ||
101 | + log.info("Closed connection to node {}", node.id()); | ||
102 | + connectionManager.removeNodeStream(node); | ||
103 | + } | ||
104 | + super.removeStream(stream); | ||
105 | + } | ||
106 | + | ||
107 | +} |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/ClusterNodesDelegate.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
4 | + | ||
5 | +/** | ||
6 | + * Simple back interface through which connection manager can interact with | ||
7 | + * the cluster store. | ||
8 | + */ | ||
9 | +public interface ClusterNodesDelegate { | ||
10 | + | ||
11 | + /** | ||
12 | + * Notifies about a new cluster node being detected. | ||
13 | + * | ||
14 | + * @param node newly detected cluster node | ||
15 | + */ | ||
16 | + void nodeDetected(DefaultControllerNode node); | ||
17 | + | ||
18 | + /** | ||
19 | + * Notifies about cluster node going offline. | ||
20 | + * | ||
21 | + * @param node cluster node that vanished | ||
22 | + */ | ||
23 | + void nodeVanished(DefaultControllerNode node); | ||
24 | + | ||
25 | +} |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/CommunicationsDelegate.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import org.onlab.onos.store.cluster.messaging.ClusterMessage; | ||
4 | + | ||
5 | +/** | ||
6 | + * Simple back interface for interacting with the communications service. | ||
7 | + */ | ||
8 | +public interface CommunicationsDelegate { | ||
9 | + | ||
10 | + /** | ||
11 | + * Dispatches the specified message to all registered subscribers. | ||
12 | + * | ||
13 | + * @param message message to be dispatched | ||
14 | + */ | ||
15 | + void dispatch(ClusterMessage message); | ||
16 | + | ||
17 | + /** | ||
18 | + * Sets the sender. | ||
19 | + * | ||
20 | + * @param messageSender message sender | ||
21 | + */ | ||
22 | + void setSender(MessageSender messageSender); | ||
23 | + | ||
24 | +} |
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
4 | +import org.onlab.onos.cluster.NodeId; | ||
5 | +import org.onlab.onos.store.cluster.messaging.ClusterMessage; | ||
6 | +import org.onlab.onos.store.cluster.messaging.ClusterMessageStream; | ||
7 | +import org.onlab.onos.store.cluster.messaging.HelloMessage; | ||
8 | +import org.onlab.onos.store.cluster.messaging.SerializationService; | ||
9 | +import org.slf4j.Logger; | ||
10 | +import org.slf4j.LoggerFactory; | ||
11 | + | ||
12 | +import java.io.IOException; | ||
13 | +import java.net.InetSocketAddress; | ||
14 | +import java.net.SocketAddress; | ||
15 | +import java.nio.channels.SocketChannel; | ||
16 | +import java.util.ArrayList; | ||
17 | +import java.util.HashSet; | ||
18 | +import java.util.List; | ||
19 | +import java.util.Map; | ||
20 | +import java.util.Set; | ||
21 | +import java.util.Timer; | ||
22 | +import java.util.TimerTask; | ||
23 | +import java.util.concurrent.ConcurrentHashMap; | ||
24 | +import java.util.concurrent.ExecutorService; | ||
25 | +import java.util.concurrent.Executors; | ||
26 | + | ||
27 | +import static java.net.InetAddress.getByAddress; | ||
28 | +import static org.onlab.util.Tools.namedThreads; | ||
29 | + | ||
30 | +/** | ||
31 | + * Manages connections to other controller cluster nodes. | ||
32 | + */ | ||
33 | +public class ConnectionManager implements MessageSender { | ||
34 | + | ||
35 | + private final Logger log = LoggerFactory.getLogger(getClass()); | ||
36 | + | ||
37 | + private static final long CONNECTION_CUSTODIAN_DELAY = 1000L; | ||
38 | + private static final long CONNECTION_CUSTODIAN_FREQUENCY = 5000; | ||
39 | + | ||
40 | + private static final long START_TIMEOUT = 1000; | ||
41 | + private static final int WORKERS = 3; | ||
42 | + | ||
43 | + private ClusterConnectionListener connectionListener; | ||
44 | + private List<ClusterIOWorker> workers = new ArrayList<>(WORKERS); | ||
45 | + | ||
46 | + private final DefaultControllerNode localNode; | ||
47 | + private final ClusterNodesDelegate nodesDelegate; | ||
48 | + private final CommunicationsDelegate commsDelegate; | ||
49 | + private final SerializationService serializationService; | ||
50 | + | ||
51 | + // Nodes to be monitored to make sure they have a connection. | ||
52 | + private final Set<DefaultControllerNode> nodes = new HashSet<>(); | ||
53 | + | ||
54 | + // Means to track message streams to other nodes. | ||
55 | + private final Map<NodeId, ClusterMessageStream> streams = new ConcurrentHashMap<>(); | ||
56 | + | ||
57 | + // Executor pools for listening and managing connections to other nodes. | ||
58 | + private final ExecutorService listenExecutor = | ||
59 | + Executors.newSingleThreadExecutor(namedThreads("onos-comm-listen")); | ||
60 | + private final ExecutorService commExecutors = | ||
61 | + Executors.newFixedThreadPool(WORKERS, namedThreads("onos-comm-cluster")); | ||
62 | + private final ExecutorService heartbeatExecutor = | ||
63 | + Executors.newSingleThreadExecutor(namedThreads("onos-comm-heartbeat")); | ||
64 | + | ||
65 | + private final Timer timer = new Timer("onos-comm-initiator"); | ||
66 | + private final TimerTask connectionCustodian = new ConnectionCustodian(); | ||
67 | + | ||
68 | + private final WorkerFinder workerFinder = new LeastUtilitiedWorkerFinder(); | ||
69 | + | ||
70 | + | ||
71 | + /** | ||
72 | + * Creates a new connection manager. | ||
73 | + */ | ||
74 | + ConnectionManager(DefaultControllerNode localNode, | ||
75 | + ClusterNodesDelegate nodesDelegate, | ||
76 | + CommunicationsDelegate commsDelegate, | ||
77 | + SerializationService serializationService) { | ||
78 | + this.localNode = localNode; | ||
79 | + this.nodesDelegate = nodesDelegate; | ||
80 | + this.commsDelegate = commsDelegate; | ||
81 | + this.serializationService = serializationService; | ||
82 | + | ||
83 | + commsDelegate.setSender(this); | ||
84 | + startCommunications(); | ||
85 | + startListening(); | ||
86 | + startInitiating(); | ||
87 | + log.info("Started"); | ||
88 | + } | ||
89 | + | ||
90 | + /** | ||
91 | + * Shuts down the connection manager. | ||
92 | + */ | ||
93 | + void shutdown() { | ||
94 | + connectionListener.shutdown(); | ||
95 | + for (ClusterIOWorker worker : workers) { | ||
96 | + worker.shutdown(); | ||
97 | + } | ||
98 | + log.info("Stopped"); | ||
99 | + } | ||
100 | + | ||
101 | + /** | ||
102 | + * Adds the node to the list of monitored nodes. | ||
103 | + * | ||
104 | + * @param node node to be added | ||
105 | + */ | ||
106 | + void addNode(DefaultControllerNode node) { | ||
107 | + nodes.add(node); | ||
108 | + } | ||
109 | + | ||
110 | + /** | ||
111 | + * Removes the node from the list of monitored nodes. | ||
112 | + * | ||
113 | + * @param node node to be removed | ||
114 | + */ | ||
115 | + void removeNode(DefaultControllerNode node) { | ||
116 | + nodes.remove(node); | ||
117 | + ClusterMessageStream stream = streams.remove(node.id()); | ||
118 | + if (stream != null) { | ||
119 | + stream.close(); | ||
120 | + } | ||
121 | + } | ||
122 | + | ||
123 | + /** | ||
124 | + * Removes the stream associated with the specified node. | ||
125 | + * | ||
126 | + * @param node node whose stream to remove | ||
127 | + */ | ||
128 | + void removeNodeStream(DefaultControllerNode node) { | ||
129 | + nodesDelegate.nodeVanished(node); | ||
130 | + streams.remove(node.id()); | ||
131 | + } | ||
132 | + | ||
133 | + @Override | ||
134 | + public boolean send(NodeId nodeId, ClusterMessage message) { | ||
135 | + ClusterMessageStream stream = streams.get(nodeId); | ||
136 | + if (stream != null) { | ||
137 | + try { | ||
138 | + stream.write(message); | ||
139 | + return true; | ||
140 | + } catch (IOException e) { | ||
141 | + log.warn("Unable to send a message about {} to node {}", | ||
142 | + message.subject(), nodeId); | ||
143 | + } | ||
144 | + } | ||
145 | + return false; | ||
146 | + } | ||
147 | + | ||
148 | + /** | ||
149 | + * Kicks off the IO loops and waits for them to startup. | ||
150 | + */ | ||
151 | + private void startCommunications() { | ||
152 | + HelloMessage hello = new HelloMessage(localNode.id(), localNode.ip(), | ||
153 | + localNode.tcpPort()); | ||
154 | + for (int i = 0; i < WORKERS; i++) { | ||
155 | + try { | ||
156 | + ClusterIOWorker worker = | ||
157 | + new ClusterIOWorker(this, commsDelegate, | ||
158 | + serializationService, hello); | ||
159 | + workers.add(worker); | ||
160 | + commExecutors.execute(worker); | ||
161 | + } catch (IOException e) { | ||
162 | + log.warn("Unable to start communication worker", e); | ||
163 | + } | ||
164 | + } | ||
165 | + | ||
166 | + // Wait for the IO loops to start | ||
167 | + for (ClusterIOWorker loop : workers) { | ||
168 | + if (!loop.awaitStart(START_TIMEOUT)) { | ||
169 | + log.warn("Comm loop did not start on-time; moving on..."); | ||
170 | + } | ||
171 | + } | ||
172 | + } | ||
173 | + | ||
174 | + /** | ||
175 | + * Starts listening for connections from peer cluster members. | ||
176 | + */ | ||
177 | + private void startListening() { | ||
178 | + try { | ||
179 | + connectionListener = | ||
180 | + new ClusterConnectionListener(localNode.ip(), localNode.tcpPort(), | ||
181 | + workerFinder); | ||
182 | + listenExecutor.execute(connectionListener); | ||
183 | + if (!connectionListener.awaitStart(START_TIMEOUT)) { | ||
184 | + log.warn("Listener did not start on-time; moving on..."); | ||
185 | + } | ||
186 | + } catch (IOException e) { | ||
187 | + log.error("Unable to listen for cluster connections", e); | ||
188 | + } | ||
189 | + } | ||
190 | + | ||
191 | + /** | ||
192 | + * Initiates open connection request and registers the pending socket | ||
193 | + * channel with the given IO loop. | ||
194 | + * | ||
195 | + * @param loop loop with which the channel should be registered | ||
196 | + * @throws java.io.IOException if the socket could not be open or connected | ||
197 | + */ | ||
198 | + private void initiateConnection(DefaultControllerNode node, | ||
199 | + ClusterIOWorker loop) throws IOException { | ||
200 | + SocketAddress sa = new InetSocketAddress(getByAddress(node.ip().toOctets()), node.tcpPort()); | ||
201 | + SocketChannel ch = SocketChannel.open(); | ||
202 | + ch.configureBlocking(false); | ||
203 | + ch.connect(sa); | ||
204 | + loop.connectStream(ch); | ||
205 | + } | ||
206 | + | ||
207 | + | ||
208 | + /** | ||
209 | + * Attempts to connect to any nodes that do not have an associated connection. | ||
210 | + */ | ||
211 | + private void startInitiating() { | ||
212 | + timer.schedule(connectionCustodian, CONNECTION_CUSTODIAN_DELAY, | ||
213 | + CONNECTION_CUSTODIAN_FREQUENCY); | ||
214 | + } | ||
215 | + | ||
216 | + // Sweeps through all controller nodes and attempts to open connection to | ||
217 | + // those that presently do not have one. | ||
218 | + private class ConnectionCustodian extends TimerTask { | ||
219 | + @Override | ||
220 | + public void run() { | ||
221 | + for (DefaultControllerNode node : nodes) { | ||
222 | + if (node != localNode && !streams.containsKey(node.id())) { | ||
223 | + try { | ||
224 | + initiateConnection(node, workerFinder.findWorker()); | ||
225 | + } catch (IOException e) { | ||
226 | + log.debug("Unable to connect", e); | ||
227 | + } | ||
228 | + } | ||
229 | + } | ||
230 | + } | ||
231 | + } | ||
232 | + | ||
233 | + // Finds the least utilitied IO loop. | ||
234 | + private class LeastUtilitiedWorkerFinder implements WorkerFinder { | ||
235 | + | ||
236 | + @Override | ||
237 | + public ClusterIOWorker findWorker() { | ||
238 | + ClusterIOWorker leastUtilized = null; | ||
239 | + int minCount = Integer.MAX_VALUE; | ||
240 | + for (ClusterIOWorker worker : workers) { | ||
241 | + int count = worker.streamCount(); | ||
242 | + if (count == 0) { | ||
243 | + return worker; | ||
244 | + } | ||
245 | + | ||
246 | + if (count < minCount) { | ||
247 | + leastUtilized = worker; | ||
248 | + minCount = count; | ||
249 | + } | ||
250 | + } | ||
251 | + return leastUtilized; | ||
252 | + } | ||
253 | + } | ||
254 | + | ||
255 | +} |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import com.google.common.collect.ImmutableSet; | ||
4 | +import org.apache.felix.scr.annotations.Activate; | ||
5 | +import org.apache.felix.scr.annotations.Component; | ||
6 | +import org.apache.felix.scr.annotations.Deactivate; | ||
7 | +import org.apache.felix.scr.annotations.Reference; | ||
8 | +import org.apache.felix.scr.annotations.ReferenceCardinality; | ||
9 | +import org.apache.felix.scr.annotations.Service; | ||
10 | +import org.onlab.onos.cluster.ClusterEvent; | ||
11 | +import org.onlab.onos.cluster.ClusterStore; | ||
12 | +import org.onlab.onos.cluster.ClusterStoreDelegate; | ||
13 | +import org.onlab.onos.cluster.ControllerNode; | ||
14 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
15 | +import org.onlab.onos.cluster.NodeId; | ||
16 | +import org.onlab.onos.store.AbstractStore; | ||
17 | +import org.onlab.onos.store.cluster.messaging.SerializationService; | ||
18 | +import org.onlab.packet.IpPrefix; | ||
19 | +import org.slf4j.Logger; | ||
20 | +import org.slf4j.LoggerFactory; | ||
21 | + | ||
22 | +import java.io.IOException; | ||
23 | +import java.util.Map; | ||
24 | +import java.util.Set; | ||
25 | +import java.util.concurrent.ConcurrentHashMap; | ||
26 | + | ||
27 | +import static org.onlab.onos.cluster.ControllerNode.State; | ||
28 | +import static org.onlab.packet.IpPrefix.valueOf; | ||
29 | + | ||
30 | +/** | ||
31 | + * Distributed implementation of the cluster nodes store. | ||
32 | + */ | ||
33 | +@Component(immediate = true) | ||
34 | +@Service | ||
35 | +public class DistributedClusterStore | ||
36 | + extends AbstractStore<ClusterEvent, ClusterStoreDelegate> | ||
37 | + implements ClusterStore { | ||
38 | + | ||
39 | + private final Logger log = LoggerFactory.getLogger(getClass()); | ||
40 | + | ||
41 | + private DefaultControllerNode localNode; | ||
42 | + private final Map<NodeId, DefaultControllerNode> nodes = new ConcurrentHashMap<>(); | ||
43 | + private final Map<NodeId, State> states = new ConcurrentHashMap<>(); | ||
44 | + | ||
45 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
46 | + private CommunicationsDelegate commsDelegate; | ||
47 | + | ||
48 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
49 | + private SerializationService serializationService; | ||
50 | + | ||
51 | + private final ClusterNodesDelegate nodesDelegate = new InnerNodesDelegate(); | ||
52 | + private ConnectionManager connectionManager; | ||
53 | + | ||
54 | + @Activate | ||
55 | + public void activate() { | ||
56 | + loadClusterDefinition(); | ||
57 | + establishSelfIdentity(); | ||
58 | + connectionManager = new ConnectionManager(localNode, nodesDelegate, | ||
59 | + commsDelegate, serializationService); | ||
60 | + log.info("Started"); | ||
61 | + } | ||
62 | + | ||
63 | + @Deactivate | ||
64 | + public void deactivate() { | ||
65 | + log.info("Stopped"); | ||
66 | + } | ||
67 | + | ||
68 | + /** | ||
69 | + * Loads the cluster definition file. | ||
70 | + */ | ||
71 | + private void loadClusterDefinition() { | ||
72 | + ClusterDefinitionStore cds = new ClusterDefinitionStore("../config/cluster.json"); | ||
73 | + try { | ||
74 | + Set<DefaultControllerNode> storedNodes = cds.read(); | ||
75 | + for (DefaultControllerNode node : storedNodes) { | ||
76 | + nodes.put(node.id(), node); | ||
77 | + } | ||
78 | + } catch (IOException e) { | ||
79 | + log.error("Unable to read cluster definitions", e); | ||
80 | + } | ||
81 | + } | ||
82 | + | ||
83 | + /** | ||
84 | + * Determines who the local controller node is. | ||
85 | + */ | ||
86 | + private void establishSelfIdentity() { | ||
87 | + // Establishes the controller's own identity. | ||
88 | + IpPrefix ip = valueOf(System.getProperty("onos.ip", "127.0.1.1")); | ||
89 | + localNode = nodes.get(new NodeId(ip.toString())); | ||
90 | + | ||
91 | + // As a fall-back, let's make sure we at least know who we are. | ||
92 | + if (localNode == null) { | ||
93 | + localNode = new DefaultControllerNode(new NodeId(ip.toString()), ip); | ||
94 | + nodes.put(localNode.id(), localNode); | ||
95 | + states.put(localNode.id(), State.ACTIVE); | ||
96 | + } | ||
97 | + } | ||
98 | + | ||
99 | + @Override | ||
100 | + public ControllerNode getLocalNode() { | ||
101 | + return localNode; | ||
102 | + } | ||
103 | + | ||
104 | + @Override | ||
105 | + public Set<ControllerNode> getNodes() { | ||
106 | + ImmutableSet.Builder<ControllerNode> builder = ImmutableSet.builder(); | ||
107 | + return builder.addAll(nodes.values()).build(); | ||
108 | + } | ||
109 | + | ||
110 | + @Override | ||
111 | + public ControllerNode getNode(NodeId nodeId) { | ||
112 | + return nodes.get(nodeId); | ||
113 | + } | ||
114 | + | ||
115 | + @Override | ||
116 | + public State getState(NodeId nodeId) { | ||
117 | + State state = states.get(nodeId); | ||
118 | + return state == null ? State.INACTIVE : state; | ||
119 | + } | ||
120 | + | ||
121 | + @Override | ||
122 | + public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) { | ||
123 | + DefaultControllerNode node = new DefaultControllerNode(nodeId, ip, tcpPort); | ||
124 | + nodes.put(nodeId, node); | ||
125 | + connectionManager.addNode(node); | ||
126 | + return node; | ||
127 | + } | ||
128 | + | ||
129 | + @Override | ||
130 | + public void removeNode(NodeId nodeId) { | ||
131 | + DefaultControllerNode node = nodes.remove(nodeId); | ||
132 | + if (node != null) { | ||
133 | + connectionManager.removeNode(node); | ||
134 | + } | ||
135 | + } | ||
136 | + | ||
137 | + // Entity to handle back calls from the connection manager. | ||
138 | + private class InnerNodesDelegate implements ClusterNodesDelegate { | ||
139 | + @Override | ||
140 | + public void nodeDetected(DefaultControllerNode node) { | ||
141 | + nodes.put(node.id(), node); | ||
142 | + states.put(node.id(), State.ACTIVE); | ||
143 | + } | ||
144 | + | ||
145 | + @Override | ||
146 | + public void nodeVanished(DefaultControllerNode node) { | ||
147 | + states.put(node.id(), State.INACTIVE); | ||
148 | + } | ||
149 | + } | ||
150 | +} |
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import org.onlab.onos.cluster.NodeId; | ||
4 | +import org.onlab.onos.store.cluster.messaging.ClusterMessage; | ||
5 | + | ||
6 | +/** | ||
7 | + * Created by tom on 9/29/14. | ||
8 | + */ | ||
9 | +public interface MessageSender { | ||
10 | + | ||
11 | + /** | ||
12 | + * Sends the specified message to the given cluster node. | ||
13 | + * | ||
14 | + * @param nodeId node identifier | ||
15 | + * @param message mesage to send | ||
16 | + * @return true if the message was sent sucessfully; false if there is | ||
17 | + * no stream or if there was an error | ||
18 | + */ | ||
19 | + boolean send(NodeId nodeId, ClusterMessage message); | ||
20 | + | ||
21 | +} |
1 | +package org.onlab.onos.store.cluster.messaging; | ||
2 | + | ||
3 | +import org.onlab.onos.cluster.NodeId; | ||
4 | + | ||
5 | +import java.util.Set; | ||
6 | + | ||
7 | +/** | ||
8 | + * Service for assisting communications between controller cluster nodes. | ||
9 | + */ | ||
10 | +public interface ClusterCommunicationService { | ||
11 | + | ||
12 | + /** | ||
13 | + * Sends a message to the specified controller node. | ||
14 | + * | ||
15 | + * @param message message to send | ||
16 | + * @param toNodeId node identifier | ||
17 | + * @return true if the message was sent sucessfully; false if there is | ||
18 | + * no stream or if there was an error | ||
19 | + */ | ||
20 | + boolean send(ClusterMessage message, NodeId toNodeId); | ||
21 | + | ||
22 | + /** | ||
23 | + * Adds a new subscriber for the specified message subject. | ||
24 | + * | ||
25 | + * @param subject message subject | ||
26 | + * @param subscriber message subscriber | ||
27 | + */ | ||
28 | + void addSubscriber(MessageSubject subject, MessageSubscriber subscriber); | ||
29 | + | ||
30 | + /** | ||
31 | + * Removes the specified subscriber from the given message subject. | ||
32 | + * | ||
33 | + * @param subject message subject | ||
34 | + * @param subscriber message subscriber | ||
35 | + */ | ||
36 | + void removeSubscriber(MessageSubject subject, MessageSubscriber subscriber); | ||
37 | + | ||
38 | + /** | ||
39 | + * Returns the set of subscribers for the specified message subject. | ||
40 | + * | ||
41 | + * @param subject message subject | ||
42 | + * @return set of message subscribers | ||
43 | + */ | ||
44 | + Set<MessageSubscriber> getSubscribers(MessageSubject subject); | ||
45 | + | ||
46 | +} |
1 | +package org.onlab.onos.store.cluster.messaging; | ||
2 | + | ||
3 | +import org.onlab.nio.AbstractMessage; | ||
4 | + | ||
5 | +import static com.google.common.base.MoreObjects.toStringHelper; | ||
6 | + | ||
7 | +/** | ||
8 | + * Base message for cluster-wide communications. | ||
9 | + */ | ||
10 | +public abstract class ClusterMessage extends AbstractMessage { | ||
11 | + | ||
12 | + private final MessageSubject subject; | ||
13 | + | ||
14 | + /** | ||
15 | + * Creates a cluster message. | ||
16 | + * | ||
17 | + * @param subject message subject | ||
18 | + */ | ||
19 | + protected ClusterMessage(MessageSubject subject) { | ||
20 | + this.subject = subject; | ||
21 | + } | ||
22 | + | ||
23 | + /** | ||
24 | + * Returns the message subject indicator. | ||
25 | + * | ||
26 | + * @return message subject | ||
27 | + */ | ||
28 | + public MessageSubject subject() { | ||
29 | + return subject; | ||
30 | + } | ||
31 | + | ||
32 | + @Override | ||
33 | + public String toString() { | ||
34 | + return toStringHelper(this).add("subject", subject).add("length", length).toString(); | ||
35 | + } | ||
36 | + | ||
37 | +} |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterMessageStream.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.messaging; | ||
2 | + | ||
3 | +import org.onlab.nio.IOLoop; | ||
4 | +import org.onlab.nio.MessageStream; | ||
5 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
6 | + | ||
7 | +import java.nio.ByteBuffer; | ||
8 | +import java.nio.channels.ByteChannel; | ||
9 | + | ||
10 | +import static com.google.common.base.Preconditions.checkState; | ||
11 | + | ||
12 | +/** | ||
13 | + * Stream for transferring messages between two cluster members. | ||
14 | + */ | ||
15 | +public class ClusterMessageStream extends MessageStream<ClusterMessage> { | ||
16 | + | ||
17 | + private static final int COMM_BUFFER_SIZE = 32 * 1024; | ||
18 | + private static final int COMM_IDLE_TIME = 500; | ||
19 | + | ||
20 | + private DefaultControllerNode node; | ||
21 | + private SerializationService serializationService; | ||
22 | + | ||
23 | + /** | ||
24 | + * Creates a message stream associated with the specified IO loop and | ||
25 | + * backed by the given byte channel. | ||
26 | + * | ||
27 | + * @param serializationService service for encoding/decoding messages | ||
28 | + * @param loop IO loop | ||
29 | + * @param byteChannel backing byte channel | ||
30 | + */ | ||
31 | + public ClusterMessageStream(SerializationService serializationService, | ||
32 | + IOLoop<ClusterMessage, ?> loop, | ||
33 | + ByteChannel byteChannel) { | ||
34 | + super(loop, byteChannel, COMM_BUFFER_SIZE, COMM_IDLE_TIME); | ||
35 | + this.serializationService = serializationService; | ||
36 | + } | ||
37 | + | ||
38 | + /** | ||
39 | + * Returns the node with which this stream is associated. | ||
40 | + * | ||
41 | + * @return controller node | ||
42 | + */ | ||
43 | + public DefaultControllerNode node() { | ||
44 | + return node; | ||
45 | + } | ||
46 | + | ||
47 | + /** | ||
48 | + * Sets the node with which this stream is affiliated. | ||
49 | + * | ||
50 | + * @param node controller node | ||
51 | + */ | ||
52 | + public void setNode(DefaultControllerNode node) { | ||
53 | + checkState(this.node == null, "Stream is already bound to a node"); | ||
54 | + this.node = node; | ||
55 | + } | ||
56 | + | ||
57 | + @Override | ||
58 | + protected ClusterMessage read(ByteBuffer buffer) { | ||
59 | + return serializationService.decode(buffer); | ||
60 | + } | ||
61 | + | ||
62 | + @Override | ||
63 | + protected void write(ClusterMessage message, ByteBuffer buffer) { | ||
64 | + serializationService.encode(message, buffer); | ||
65 | + } | ||
66 | + | ||
67 | +} |
1 | +package org.onlab.onos.store.cluster.messaging; | ||
2 | + | ||
3 | +import org.onlab.onos.cluster.NodeId; | ||
4 | + | ||
5 | +/**l | ||
6 | + * Echo heart-beat message that nodes send to each other. | ||
7 | + */ | ||
8 | +public class EchoMessage extends ClusterMessage { | ||
9 | + | ||
10 | + private NodeId nodeId; | ||
11 | + | ||
12 | + // For serialization | ||
13 | + private EchoMessage() { | ||
14 | + super(MessageSubject.HELLO); | ||
15 | + nodeId = null; | ||
16 | + } | ||
17 | + | ||
18 | + /** | ||
19 | + * Creates a new heart-beat echo message. | ||
20 | + * | ||
21 | + * @param nodeId sending node identification | ||
22 | + */ | ||
23 | + public EchoMessage(NodeId nodeId) { | ||
24 | + super(MessageSubject.HELLO); | ||
25 | + nodeId = nodeId; | ||
26 | + } | ||
27 | + | ||
28 | + /** | ||
29 | + * Returns the sending node identifer. | ||
30 | + * | ||
31 | + * @return node identifier | ||
32 | + */ | ||
33 | + public NodeId nodeId() { | ||
34 | + return nodeId; | ||
35 | + } | ||
36 | + | ||
37 | +} |
1 | +package org.onlab.onos.store.cluster.messaging; | ||
2 | + | ||
3 | +import org.onlab.onos.cluster.NodeId; | ||
4 | +import org.onlab.packet.IpPrefix; | ||
5 | + | ||
6 | +/** | ||
7 | + * Hello message that nodes use to greet each other. | ||
8 | + */ | ||
9 | +public class HelloMessage extends ClusterMessage { | ||
10 | + | ||
11 | + private NodeId nodeId; | ||
12 | + private IpPrefix ipAddress; | ||
13 | + private int tcpPort; | ||
14 | + | ||
15 | + // For serialization | ||
16 | + private HelloMessage() { | ||
17 | + super(MessageSubject.HELLO); | ||
18 | + nodeId = null; | ||
19 | + ipAddress = null; | ||
20 | + tcpPort = 0; | ||
21 | + } | ||
22 | + | ||
23 | + /** | ||
24 | + * Creates a new hello message for the specified end-point data. | ||
25 | + * | ||
26 | + * @param nodeId sending node identification | ||
27 | + * @param ipAddress sending node IP address | ||
28 | + * @param tcpPort sending node TCP port | ||
29 | + */ | ||
30 | + public HelloMessage(NodeId nodeId, IpPrefix ipAddress, int tcpPort) { | ||
31 | + super(MessageSubject.HELLO); | ||
32 | + nodeId = nodeId; | ||
33 | + ipAddress = ipAddress; | ||
34 | + tcpPort = tcpPort; | ||
35 | + } | ||
36 | + | ||
37 | + /** | ||
38 | + * Returns the sending node identifer. | ||
39 | + * | ||
40 | + * @return node identifier | ||
41 | + */ | ||
42 | + public NodeId nodeId() { | ||
43 | + return nodeId; | ||
44 | + } | ||
45 | + | ||
46 | + /** | ||
47 | + * Returns the sending node IP address. | ||
48 | + * | ||
49 | + * @return node IP address | ||
50 | + */ | ||
51 | + public IpPrefix ipAddress() { | ||
52 | + return ipAddress; | ||
53 | + } | ||
54 | + | ||
55 | + /** | ||
56 | + * Returns the sending node TCP listen port. | ||
57 | + * | ||
58 | + * @return TCP listen port | ||
59 | + */ | ||
60 | + public int tcpPort() { | ||
61 | + return tcpPort; | ||
62 | + } | ||
63 | +} |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/messaging/MessageSubscriber.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.messaging; | ||
2 | + | ||
3 | +/** | ||
4 | + * Represents a message consumer. | ||
5 | + */ | ||
6 | +public interface MessageSubscriber { | ||
7 | + | ||
8 | + /** | ||
9 | + * Receives the specified cluster message. | ||
10 | + * | ||
11 | + * @param message message to be received | ||
12 | + */ | ||
13 | + void receive(ClusterMessage message); | ||
14 | + | ||
15 | +} |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/messaging/SerializationService.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.messaging; | ||
2 | + | ||
3 | +import java.nio.ByteBuffer; | ||
4 | + | ||
5 | +/** | ||
6 | + * Service for serializing/deserializing intra-cluster messages. | ||
7 | + */ | ||
8 | +public interface SerializationService { | ||
9 | + | ||
10 | + /** | ||
11 | + * Decodes the specified byte buffer to obtain a message within. | ||
12 | + * | ||
13 | + * @param buffer byte buffer with message(s) | ||
14 | + * @return parsed message | ||
15 | + */ | ||
16 | + ClusterMessage decode(ByteBuffer buffer); | ||
17 | + | ||
18 | + /** | ||
19 | + * Encodes the specified message into the given byte buffer. | ||
20 | + * | ||
21 | + * @param message message to be encoded | ||
22 | + * @param buffer byte buffer to receive the message data | ||
23 | + */ | ||
24 | + void encode(ClusterMessage message, ByteBuffer buffer); | ||
25 | + | ||
26 | +} |
1 | +package org.onlab.onos.store.cluster.messaging.impl; | ||
2 | + | ||
3 | +import com.google.common.collect.HashMultimap; | ||
4 | +import com.google.common.collect.ImmutableSet; | ||
5 | +import com.google.common.collect.Multimap; | ||
6 | +import org.apache.felix.scr.annotations.Component; | ||
7 | +import org.apache.felix.scr.annotations.Service; | ||
8 | +import org.onlab.onos.cluster.NodeId; | ||
9 | +import org.onlab.onos.store.cluster.impl.CommunicationsDelegate; | ||
10 | +import org.onlab.onos.store.cluster.impl.MessageSender; | ||
11 | +import org.onlab.onos.store.cluster.messaging.ClusterCommunicationService; | ||
12 | +import org.onlab.onos.store.cluster.messaging.ClusterMessage; | ||
13 | +import org.onlab.onos.store.cluster.messaging.MessageSubject; | ||
14 | +import org.onlab.onos.store.cluster.messaging.MessageSubscriber; | ||
15 | + | ||
16 | +import java.util.Set; | ||
17 | + | ||
18 | +/** | ||
19 | + * Implements the cluster communication services to use by other stores. | ||
20 | + */ | ||
21 | +@Component(immediate = true) | ||
22 | +@Service | ||
23 | +public class ClusterCommunicationManager | ||
24 | + implements ClusterCommunicationService, CommunicationsDelegate { | ||
25 | + | ||
26 | + // TODO: use something different that won't require synchronization | ||
27 | + private Multimap<MessageSubject, MessageSubscriber> subscribers = HashMultimap.create(); | ||
28 | + private MessageSender messageSender; | ||
29 | + | ||
30 | + @Override | ||
31 | + public boolean send(ClusterMessage message, NodeId toNodeId) { | ||
32 | + return messageSender.send(toNodeId, message); | ||
33 | + } | ||
34 | + | ||
35 | + @Override | ||
36 | + public synchronized void addSubscriber(MessageSubject subject, MessageSubscriber subscriber) { | ||
37 | + subscribers.put(subject, subscriber); | ||
38 | + } | ||
39 | + | ||
40 | + @Override | ||
41 | + public synchronized void removeSubscriber(MessageSubject subject, MessageSubscriber subscriber) { | ||
42 | + subscribers.remove(subject, subscriber); | ||
43 | + } | ||
44 | + | ||
45 | + @Override | ||
46 | + public Set<MessageSubscriber> getSubscribers(MessageSubject subject) { | ||
47 | + return ImmutableSet.copyOf(subscribers.get(subject)); | ||
48 | + } | ||
49 | + | ||
50 | + @Override | ||
51 | + public void dispatch(ClusterMessage message) { | ||
52 | + Set<MessageSubscriber> set = getSubscribers(message.subject()); | ||
53 | + if (set != null) { | ||
54 | + for (MessageSubscriber subscriber : set) { | ||
55 | + subscriber.receive(message); | ||
56 | + } | ||
57 | + } | ||
58 | + } | ||
59 | + | ||
60 | + @Override | ||
61 | + public void setSender(MessageSender messageSender) { | ||
62 | + this.messageSender = messageSender; | ||
63 | + } | ||
64 | +} |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/messaging/impl/MessageSerializer.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.messaging.impl; | ||
2 | + | ||
3 | +import org.onlab.onos.store.cluster.messaging.ClusterMessage; | ||
4 | +import org.onlab.onos.store.cluster.messaging.MessageSubject; | ||
5 | +import org.onlab.onos.store.cluster.messaging.SerializationService; | ||
6 | + | ||
7 | +import java.nio.ByteBuffer; | ||
8 | + | ||
9 | +import static com.google.common.base.Preconditions.checkState; | ||
10 | + | ||
11 | +/** | ||
12 | + * Factory for parsing messages sent between cluster members. | ||
13 | + */ | ||
14 | +public class MessageSerializer implements SerializationService { | ||
15 | + | ||
16 | + private static final int METADATA_LENGTH = 16; // 8 + 4 + 4 | ||
17 | + private static final int LENGTH_OFFSET = 12; | ||
18 | + | ||
19 | + private static final long MARKER = 0xfeedcafebeaddeadL; | ||
20 | + | ||
21 | + @Override | ||
22 | + public ClusterMessage decode(ByteBuffer buffer) { | ||
23 | + try { | ||
24 | + // Do we have enough bytes to read the header? If not, bail. | ||
25 | + if (buffer.remaining() < METADATA_LENGTH) { | ||
26 | + return null; | ||
27 | + } | ||
28 | + | ||
29 | + // Peek at the length and if we have enough to read the entire message | ||
30 | + // go ahead, otherwise bail. | ||
31 | + int length = buffer.getInt(buffer.position() + LENGTH_OFFSET); | ||
32 | + if (buffer.remaining() < length) { | ||
33 | + return null; | ||
34 | + } | ||
35 | + | ||
36 | + // At this point, we have enough data to read a complete message. | ||
37 | + long marker = buffer.getLong(); | ||
38 | + checkState(marker == MARKER, "Incorrect message marker"); | ||
39 | + | ||
40 | + int subjectOrdinal = buffer.getInt(); | ||
41 | + MessageSubject subject = MessageSubject.values()[subjectOrdinal]; | ||
42 | + length = buffer.getInt(); | ||
43 | + | ||
44 | + // TODO: sanity checking for length | ||
45 | + byte[] data = new byte[length - METADATA_LENGTH]; | ||
46 | + buffer.get(data); | ||
47 | + | ||
48 | + // TODO: add deserialization hook here; for now this hack | ||
49 | + return null; // actually deserialize | ||
50 | + | ||
51 | + } catch (Exception e) { | ||
52 | + // TODO: recover from exceptions by forwarding stream to next marker | ||
53 | + e.printStackTrace(); | ||
54 | + } | ||
55 | + return null; | ||
56 | + } | ||
57 | + | ||
58 | + @Override | ||
59 | + public void encode(ClusterMessage message, ByteBuffer buffer) { | ||
60 | + try { | ||
61 | + int i = 0; | ||
62 | + // Type based lookup for proper encoder | ||
63 | + } catch (Exception e) { | ||
64 | + // TODO: recover from exceptions by forwarding stream to next marker | ||
65 | + e.printStackTrace(); | ||
66 | + } | ||
67 | + } | ||
68 | + | ||
69 | +} |
... | @@ -86,46 +86,48 @@ public class OnosDistributedDeviceStore | ... | @@ -86,46 +86,48 @@ public class OnosDistributedDeviceStore |
86 | 86 | ||
87 | @Override | 87 | @Override |
88 | public Iterable<Device> getDevices() { | 88 | public Iterable<Device> getDevices() { |
89 | - // TODO builder v.s. copyOf. Guava semms to be using copyOf? | ||
90 | - // FIXME: synchronize. | ||
91 | Builder<Device> builder = ImmutableSet.builder(); | 89 | Builder<Device> builder = ImmutableSet.builder(); |
92 | - for (VersionedValue<? extends Device> device : devices.values()) { | 90 | + synchronized (this) { |
91 | + for (VersionedValue<Device> device : devices.values()) { | ||
93 | builder.add(device.entity()); | 92 | builder.add(device.entity()); |
94 | } | 93 | } |
95 | return builder.build(); | 94 | return builder.build(); |
96 | } | 95 | } |
96 | + } | ||
97 | 97 | ||
98 | @Override | 98 | @Override |
99 | public Device getDevice(DeviceId deviceId) { | 99 | public Device getDevice(DeviceId deviceId) { |
100 | - return devices.get(deviceId).entity(); | 100 | + VersionedValue<Device> device = devices.get(deviceId); |
101 | + checkArgument(device != null, DEVICE_NOT_FOUND, deviceId); | ||
102 | + return device.entity(); | ||
101 | } | 103 | } |
102 | 104 | ||
103 | @Override | 105 | @Override |
104 | public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId, | 106 | public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId, |
105 | DeviceDescription deviceDescription) { | 107 | DeviceDescription deviceDescription) { |
106 | - Timestamp now = clockService.getTimestamp(deviceId); | 108 | + Timestamp newTimestamp = clockService.getTimestamp(deviceId); |
107 | VersionedValue<Device> device = devices.get(deviceId); | 109 | VersionedValue<Device> device = devices.get(deviceId); |
108 | 110 | ||
109 | if (device == null) { | 111 | if (device == null) { |
110 | - return createDevice(providerId, deviceId, deviceDescription, now); | 112 | + return createDevice(providerId, deviceId, deviceDescription, newTimestamp); |
111 | } | 113 | } |
112 | 114 | ||
113 | - checkState(now.compareTo(device.timestamp()) > 0, | 115 | + checkState(newTimestamp.compareTo(device.timestamp()) > 0, |
114 | "Existing device has a timestamp in the future!"); | 116 | "Existing device has a timestamp in the future!"); |
115 | 117 | ||
116 | - return updateDevice(providerId, device.entity(), deviceDescription, now); | 118 | + return updateDevice(providerId, device.entity(), deviceDescription, newTimestamp); |
117 | } | 119 | } |
118 | 120 | ||
119 | // Creates the device and returns the appropriate event if necessary. | 121 | // Creates the device and returns the appropriate event if necessary. |
120 | private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId, | 122 | private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId, |
121 | DeviceDescription desc, Timestamp timestamp) { | 123 | DeviceDescription desc, Timestamp timestamp) { |
122 | - DefaultDevice device = new DefaultDevice(providerId, deviceId, desc.type(), | 124 | + Device device = new DefaultDevice(providerId, deviceId, desc.type(), |
123 | desc.manufacturer(), | 125 | desc.manufacturer(), |
124 | desc.hwVersion(), desc.swVersion(), | 126 | desc.hwVersion(), desc.swVersion(), |
125 | desc.serialNumber()); | 127 | desc.serialNumber()); |
126 | 128 | ||
127 | - devices.put(deviceId, new VersionedValue<Device>(device, true, timestamp)); | 129 | + devices.put(deviceId, new VersionedValue<>(device, true, timestamp)); |
128 | - // FIXME: broadcast a message telling peers of a device event. | 130 | + // TODO,FIXME: broadcast a message telling peers of a device event. |
129 | return new DeviceEvent(DEVICE_ADDED, device, null); | 131 | return new DeviceEvent(DEVICE_ADDED, device, null); |
130 | } | 132 | } |
131 | 133 | ||
... | @@ -148,7 +150,7 @@ public class OnosDistributedDeviceStore | ... | @@ -148,7 +150,7 @@ public class OnosDistributedDeviceStore |
148 | } | 150 | } |
149 | 151 | ||
150 | // Otherwise merely attempt to change availability | 152 | // Otherwise merely attempt to change availability |
151 | - DefaultDevice updated = new DefaultDevice(providerId, device.id(), | 153 | + Device updated = new DefaultDevice(providerId, device.id(), |
152 | desc.type(), | 154 | desc.type(), |
153 | desc.manufacturer(), | 155 | desc.manufacturer(), |
154 | desc.hwVersion(), | 156 | desc.hwVersion(), |
... | @@ -196,18 +198,18 @@ public class OnosDistributedDeviceStore | ... | @@ -196,18 +198,18 @@ public class OnosDistributedDeviceStore |
196 | VersionedValue<Device> device = devices.get(deviceId); | 198 | VersionedValue<Device> device = devices.get(deviceId); |
197 | checkArgument(device != null, DEVICE_NOT_FOUND, deviceId); | 199 | checkArgument(device != null, DEVICE_NOT_FOUND, deviceId); |
198 | Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId); | 200 | Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId); |
199 | - Timestamp timestamp = clockService.getTimestamp(deviceId); | 201 | + Timestamp newTimestamp = clockService.getTimestamp(deviceId); |
200 | 202 | ||
201 | // Add new ports | 203 | // Add new ports |
202 | Set<PortNumber> processed = new HashSet<>(); | 204 | Set<PortNumber> processed = new HashSet<>(); |
203 | for (PortDescription portDescription : portDescriptions) { | 205 | for (PortDescription portDescription : portDescriptions) { |
204 | VersionedValue<Port> port = ports.get(portDescription.portNumber()); | 206 | VersionedValue<Port> port = ports.get(portDescription.portNumber()); |
205 | if (port == null) { | 207 | if (port == null) { |
206 | - events.add(createPort(device, portDescription, ports, timestamp)); | 208 | + events.add(createPort(device, portDescription, ports, newTimestamp)); |
207 | } | 209 | } |
208 | - checkState(timestamp.compareTo(port.timestamp()) > 0, | 210 | + checkState(newTimestamp.compareTo(port.timestamp()) > 0, |
209 | "Existing port state has a timestamp in the future!"); | 211 | "Existing port state has a timestamp in the future!"); |
210 | - events.add(updatePort(device, port, portDescription, ports, timestamp)); | 212 | + events.add(updatePort(device.entity(), port.entity(), portDescription, ports, newTimestamp)); |
211 | processed.add(portDescription.portNumber()); | 213 | processed.add(portDescription.portNumber()); |
212 | } | 214 | } |
213 | 215 | ||
... | @@ -233,19 +235,19 @@ public class OnosDistributedDeviceStore | ... | @@ -233,19 +235,19 @@ public class OnosDistributedDeviceStore |
233 | // Checks if the specified port requires update and if so, it replaces the | 235 | // Checks if the specified port requires update and if so, it replaces the |
234 | // existing entry in the map and returns corresponding event. | 236 | // existing entry in the map and returns corresponding event. |
235 | //@GuardedBy("this") | 237 | //@GuardedBy("this") |
236 | - private DeviceEvent updatePort(VersionedValue<Device> device, VersionedValue<Port> port, | 238 | + private DeviceEvent updatePort(Device device, Port port, |
237 | PortDescription portDescription, | 239 | PortDescription portDescription, |
238 | Map<PortNumber, VersionedValue<Port>> ports, | 240 | Map<PortNumber, VersionedValue<Port>> ports, |
239 | Timestamp timestamp) { | 241 | Timestamp timestamp) { |
240 | - if (port.entity().isEnabled() != portDescription.isEnabled()) { | 242 | + if (port.isEnabled() != portDescription.isEnabled()) { |
241 | VersionedValue<Port> updatedPort = new VersionedValue<Port>( | 243 | VersionedValue<Port> updatedPort = new VersionedValue<Port>( |
242 | - new DefaultPort(device.entity(), portDescription.portNumber(), | 244 | + new DefaultPort(device, portDescription.portNumber(), |
243 | portDescription.isEnabled()), | 245 | portDescription.isEnabled()), |
244 | portDescription.isEnabled(), | 246 | portDescription.isEnabled(), |
245 | timestamp); | 247 | timestamp); |
246 | - ports.put(port.entity().number(), updatedPort); | 248 | + ports.put(port.number(), updatedPort); |
247 | - updatePortMap(device.entity().id(), ports); | 249 | + updatePortMap(device.id(), ports); |
248 | - return new DeviceEvent(PORT_UPDATED, device.entity(), updatedPort.entity()); | 250 | + return new DeviceEvent(PORT_UPDATED, device, updatedPort.entity()); |
249 | } | 251 | } |
250 | return null; | 252 | return null; |
251 | } | 253 | } |
... | @@ -300,7 +302,7 @@ public class OnosDistributedDeviceStore | ... | @@ -300,7 +302,7 @@ public class OnosDistributedDeviceStore |
300 | Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId); | 302 | Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId); |
301 | VersionedValue<Port> port = ports.get(portDescription.portNumber()); | 303 | VersionedValue<Port> port = ports.get(portDescription.portNumber()); |
302 | Timestamp timestamp = clockService.getTimestamp(deviceId); | 304 | Timestamp timestamp = clockService.getTimestamp(deviceId); |
303 | - return updatePort(device, port, portDescription, ports, timestamp); | 305 | + return updatePort(device.entity(), port.entity(), portDescription, ports, timestamp); |
304 | } | 306 | } |
305 | 307 | ||
306 | @Override | 308 | @Override | ... | ... |
core/store/dist/src/main/java/org/onlab/onos/store/link/impl/OnosDistributedLinkStore.java
0 → 100644
1 | +package org.onlab.onos.store.link.impl; | ||
2 | + | ||
3 | +import static org.onlab.onos.net.Link.Type.DIRECT; | ||
4 | +import static org.onlab.onos.net.Link.Type.INDIRECT; | ||
5 | +import static org.onlab.onos.net.link.LinkEvent.Type.LINK_ADDED; | ||
6 | +import static org.onlab.onos.net.link.LinkEvent.Type.LINK_REMOVED; | ||
7 | +import static org.onlab.onos.net.link.LinkEvent.Type.LINK_UPDATED; | ||
8 | +import static org.slf4j.LoggerFactory.getLogger; | ||
9 | + | ||
10 | +import java.util.HashSet; | ||
11 | +import java.util.Set; | ||
12 | +import java.util.concurrent.ConcurrentHashMap; | ||
13 | +import java.util.concurrent.ConcurrentMap; | ||
14 | + | ||
15 | +import org.apache.felix.scr.annotations.Activate; | ||
16 | +import org.apache.felix.scr.annotations.Component; | ||
17 | +import org.apache.felix.scr.annotations.Deactivate; | ||
18 | +import org.apache.felix.scr.annotations.Reference; | ||
19 | +import org.apache.felix.scr.annotations.ReferenceCardinality; | ||
20 | +import org.apache.felix.scr.annotations.Service; | ||
21 | +import org.onlab.onos.net.ConnectPoint; | ||
22 | +import org.onlab.onos.net.DefaultLink; | ||
23 | +import org.onlab.onos.net.DeviceId; | ||
24 | +import org.onlab.onos.net.Link; | ||
25 | +import org.onlab.onos.net.LinkKey; | ||
26 | +import org.onlab.onos.net.link.LinkDescription; | ||
27 | +import org.onlab.onos.net.link.LinkEvent; | ||
28 | +import org.onlab.onos.net.link.LinkStore; | ||
29 | +import org.onlab.onos.net.link.LinkStoreDelegate; | ||
30 | +import org.onlab.onos.net.provider.ProviderId; | ||
31 | +import org.onlab.onos.store.AbstractStore; | ||
32 | +import org.onlab.onos.store.ClockService; | ||
33 | +import org.onlab.onos.store.Timestamp; | ||
34 | +import org.onlab.onos.store.device.impl.VersionedValue; | ||
35 | +import org.slf4j.Logger; | ||
36 | + | ||
37 | +import com.google.common.collect.HashMultimap; | ||
38 | +import com.google.common.collect.ImmutableSet; | ||
39 | +import com.google.common.collect.Multimap; | ||
40 | +import com.google.common.collect.ImmutableSet.Builder; | ||
41 | + | ||
42 | +import static com.google.common.base.Preconditions.checkArgument; | ||
43 | +import static com.google.common.base.Preconditions.checkState; | ||
44 | + | ||
45 | +/** | ||
46 | + * Manages inventory of infrastructure links using a protocol that takes into consideration | ||
47 | + * the order in which events occur. | ||
48 | + */ | ||
49 | +// FIXME: This does not yet implement the full protocol. | ||
50 | +// The full protocol requires the sender of LLDP message to include the | ||
51 | +// version information of src device/port and the receiver to | ||
52 | +// take that into account when figuring out if a more recent src | ||
53 | +// device/port down event renders the link discovery obsolete. | ||
54 | +@Component(immediate = true) | ||
55 | +@Service | ||
56 | +public class OnosDistributedLinkStore | ||
57 | + extends AbstractStore<LinkEvent, LinkStoreDelegate> | ||
58 | + implements LinkStore { | ||
59 | + | ||
60 | + private final Logger log = getLogger(getClass()); | ||
61 | + | ||
62 | + // Link inventory | ||
63 | + private ConcurrentMap<LinkKey, VersionedValue<Link>> links; | ||
64 | + | ||
65 | + public static final String LINK_NOT_FOUND = "Link between %s and %s not found"; | ||
66 | + | ||
67 | + // TODO synchronize? | ||
68 | + // Egress and ingress link sets | ||
69 | + private final Multimap<DeviceId, VersionedValue<Link>> srcLinks = HashMultimap.create(); | ||
70 | + private final Multimap<DeviceId, VersionedValue<Link>> dstLinks = HashMultimap.create(); | ||
71 | + | ||
72 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
73 | + protected ClockService clockService; | ||
74 | + | ||
75 | + @Activate | ||
76 | + public void activate() { | ||
77 | + | ||
78 | + links = new ConcurrentHashMap<>(); | ||
79 | + | ||
80 | + log.info("Started"); | ||
81 | + } | ||
82 | + | ||
83 | + @Deactivate | ||
84 | + public void deactivate() { | ||
85 | + log.info("Stopped"); | ||
86 | + } | ||
87 | + | ||
88 | + @Override | ||
89 | + public int getLinkCount() { | ||
90 | + return links.size(); | ||
91 | + } | ||
92 | + | ||
93 | + @Override | ||
94 | + public Iterable<Link> getLinks() { | ||
95 | + Builder<Link> builder = ImmutableSet.builder(); | ||
96 | + synchronized (this) { | ||
97 | + for (VersionedValue<Link> link : links.values()) { | ||
98 | + builder.add(link.entity()); | ||
99 | + } | ||
100 | + return builder.build(); | ||
101 | + } | ||
102 | + } | ||
103 | + | ||
104 | + @Override | ||
105 | + public Set<Link> getDeviceEgressLinks(DeviceId deviceId) { | ||
106 | + Set<VersionedValue<Link>> egressLinks = ImmutableSet.copyOf(srcLinks.get(deviceId)); | ||
107 | + Set<Link> rawEgressLinks = new HashSet<>(); | ||
108 | + for (VersionedValue<Link> link : egressLinks) { | ||
109 | + rawEgressLinks.add(link.entity()); | ||
110 | + } | ||
111 | + return rawEgressLinks; | ||
112 | + } | ||
113 | + | ||
114 | + @Override | ||
115 | + public Set<Link> getDeviceIngressLinks(DeviceId deviceId) { | ||
116 | + Set<VersionedValue<Link>> ingressLinks = ImmutableSet.copyOf(dstLinks.get(deviceId)); | ||
117 | + Set<Link> rawIngressLinks = new HashSet<>(); | ||
118 | + for (VersionedValue<Link> link : ingressLinks) { | ||
119 | + rawIngressLinks.add(link.entity()); | ||
120 | + } | ||
121 | + return rawIngressLinks; | ||
122 | + } | ||
123 | + | ||
124 | + @Override | ||
125 | + public Link getLink(ConnectPoint src, ConnectPoint dst) { | ||
126 | + VersionedValue<Link> link = links.get(new LinkKey(src, dst)); | ||
127 | + checkArgument(link != null, "LINK_NOT_FOUND", src, dst); | ||
128 | + return link.entity(); | ||
129 | + } | ||
130 | + | ||
131 | + @Override | ||
132 | + public Set<Link> getEgressLinks(ConnectPoint src) { | ||
133 | + Set<Link> egressLinks = new HashSet<>(); | ||
134 | + for (VersionedValue<Link> link : srcLinks.get(src.deviceId())) { | ||
135 | + if (link.entity().src().equals(src)) { | ||
136 | + egressLinks.add(link.entity()); | ||
137 | + } | ||
138 | + } | ||
139 | + return egressLinks; | ||
140 | + } | ||
141 | + | ||
142 | + @Override | ||
143 | + public Set<Link> getIngressLinks(ConnectPoint dst) { | ||
144 | + Set<Link> ingressLinks = new HashSet<>(); | ||
145 | + for (VersionedValue<Link> link : dstLinks.get(dst.deviceId())) { | ||
146 | + if (link.entity().dst().equals(dst)) { | ||
147 | + ingressLinks.add(link.entity()); | ||
148 | + } | ||
149 | + } | ||
150 | + return ingressLinks; | ||
151 | + } | ||
152 | + | ||
153 | + @Override | ||
154 | + public LinkEvent createOrUpdateLink(ProviderId providerId, | ||
155 | + LinkDescription linkDescription) { | ||
156 | + | ||
157 | + final DeviceId destinationDeviceId = linkDescription.dst().deviceId(); | ||
158 | + final Timestamp newTimestamp = clockService.getTimestamp(destinationDeviceId); | ||
159 | + | ||
160 | + LinkKey key = new LinkKey(linkDescription.src(), linkDescription.dst()); | ||
161 | + VersionedValue<Link> link = links.get(key); | ||
162 | + if (link == null) { | ||
163 | + return createLink(providerId, key, linkDescription, newTimestamp); | ||
164 | + } | ||
165 | + | ||
166 | + checkState(newTimestamp.compareTo(link.timestamp()) > 0, | ||
167 | + "Existing Link has a timestamp in the future!"); | ||
168 | + | ||
169 | + return updateLink(providerId, link, key, linkDescription, newTimestamp); | ||
170 | + } | ||
171 | + | ||
172 | + // Creates and stores the link and returns the appropriate event. | ||
173 | + private LinkEvent createLink(ProviderId providerId, LinkKey key, | ||
174 | + LinkDescription linkDescription, Timestamp timestamp) { | ||
175 | + VersionedValue<Link> link = new VersionedValue<Link>(new DefaultLink(providerId, key.src(), key.dst(), | ||
176 | + linkDescription.type()), true, timestamp); | ||
177 | + synchronized (this) { | ||
178 | + links.put(key, link); | ||
179 | + addNewLink(link, timestamp); | ||
180 | + } | ||
181 | + // FIXME: notify peers. | ||
182 | + return new LinkEvent(LINK_ADDED, link.entity()); | ||
183 | + } | ||
184 | + | ||
185 | + // update Egress and ingress link sets | ||
186 | + private void addNewLink(VersionedValue<Link> link, Timestamp timestamp) { | ||
187 | + Link rawLink = link.entity(); | ||
188 | + synchronized (this) { | ||
189 | + srcLinks.put(rawLink.src().deviceId(), link); | ||
190 | + dstLinks.put(rawLink.dst().deviceId(), link); | ||
191 | + } | ||
192 | + } | ||
193 | + | ||
194 | + // Updates, if necessary the specified link and returns the appropriate event. | ||
195 | + private LinkEvent updateLink(ProviderId providerId, VersionedValue<Link> existingLink, | ||
196 | + LinkKey key, LinkDescription linkDescription, Timestamp timestamp) { | ||
197 | + // FIXME confirm Link update condition is OK | ||
198 | + if (existingLink.entity().type() == INDIRECT && linkDescription.type() == DIRECT) { | ||
199 | + synchronized (this) { | ||
200 | + | ||
201 | + VersionedValue<Link> updatedLink = new VersionedValue<Link>( | ||
202 | + new DefaultLink(providerId, existingLink.entity().src(), existingLink.entity().dst(), | ||
203 | + linkDescription.type()), true, timestamp); | ||
204 | + links.replace(key, existingLink, updatedLink); | ||
205 | + | ||
206 | + replaceLink(existingLink, updatedLink); | ||
207 | + // FIXME: notify peers. | ||
208 | + return new LinkEvent(LINK_UPDATED, updatedLink.entity()); | ||
209 | + } | ||
210 | + } | ||
211 | + return null; | ||
212 | + } | ||
213 | + | ||
214 | + // update Egress and ingress link sets | ||
215 | + private void replaceLink(VersionedValue<Link> current, VersionedValue<Link> updated) { | ||
216 | + synchronized (this) { | ||
217 | + srcLinks.remove(current.entity().src().deviceId(), current); | ||
218 | + dstLinks.remove(current.entity().dst().deviceId(), current); | ||
219 | + | ||
220 | + srcLinks.put(current.entity().src().deviceId(), updated); | ||
221 | + dstLinks.put(current.entity().dst().deviceId(), updated); | ||
222 | + } | ||
223 | + } | ||
224 | + | ||
225 | + @Override | ||
226 | + public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) { | ||
227 | + synchronized (this) { | ||
228 | + LinkKey key = new LinkKey(src, dst); | ||
229 | + VersionedValue<Link> link = links.remove(key); | ||
230 | + if (link != null) { | ||
231 | + removeLink(link); | ||
232 | + // notify peers | ||
233 | + return new LinkEvent(LINK_REMOVED, link.entity()); | ||
234 | + } | ||
235 | + return null; | ||
236 | + } | ||
237 | + } | ||
238 | + | ||
239 | + // update Egress and ingress link sets | ||
240 | + private void removeLink(VersionedValue<Link> link) { | ||
241 | + synchronized (this) { | ||
242 | + srcLinks.remove(link.entity().src().deviceId(), link); | ||
243 | + dstLinks.remove(link.entity().dst().deviceId(), link); | ||
244 | + } | ||
245 | + } | ||
246 | +} |
... | @@ -49,6 +49,7 @@ public class DistributedClusterStore | ... | @@ -49,6 +49,7 @@ public class DistributedClusterStore |
49 | private final MembershipListener listener = new InternalMembershipListener(); | 49 | private final MembershipListener listener = new InternalMembershipListener(); |
50 | private final Map<NodeId, State> states = new ConcurrentHashMap<>(); | 50 | private final Map<NodeId, State> states = new ConcurrentHashMap<>(); |
51 | 51 | ||
52 | + @Override | ||
52 | @Activate | 53 | @Activate |
53 | public void activate() { | 54 | public void activate() { |
54 | super.activate(); | 55 | super.activate(); |
... | @@ -56,9 +57,9 @@ public class DistributedClusterStore | ... | @@ -56,9 +57,9 @@ public class DistributedClusterStore |
56 | 57 | ||
57 | rawNodes = theInstance.getMap("nodes"); | 58 | rawNodes = theInstance.getMap("nodes"); |
58 | OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader | 59 | OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader |
59 | - = new OptionalCacheLoader<>(storeService, rawNodes); | 60 | + = new OptionalCacheLoader<>(kryoSerializationService, rawNodes); |
60 | nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); | 61 | nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); |
61 | - rawNodes.addEntryListener(new RemoteEventHandler<>(nodes), true); | 62 | + rawNodes.addEntryListener(new RemoteCacheEventHandler<>(nodes), true); |
62 | 63 | ||
63 | loadClusterNodes(); | 64 | loadClusterNodes(); |
64 | 65 | ... | ... |
... | @@ -52,7 +52,7 @@ implements MastershipStore { | ... | @@ -52,7 +52,7 @@ implements MastershipStore { |
52 | 52 | ||
53 | rawMasters = theInstance.getMap("masters"); | 53 | rawMasters = theInstance.getMap("masters"); |
54 | OptionalCacheLoader<DeviceId, NodeId> nodeLoader | 54 | OptionalCacheLoader<DeviceId, NodeId> nodeLoader |
55 | - = new OptionalCacheLoader<>(storeService, rawMasters); | 55 | + = new OptionalCacheLoader<>(kryoSerializationService, rawMasters); |
56 | masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); | 56 | masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); |
57 | rawMasters.addEntryListener(new RemoteMasterShipEventHandler(masters), true); | 57 | rawMasters.addEntryListener(new RemoteMasterShipEventHandler(masters), true); |
58 | 58 | ||
... | @@ -123,7 +123,7 @@ implements MastershipStore { | ... | @@ -123,7 +123,7 @@ implements MastershipStore { |
123 | return null; | 123 | return null; |
124 | } | 124 | } |
125 | 125 | ||
126 | - private class RemoteMasterShipEventHandler extends RemoteEventHandler<DeviceId, NodeId> { | 126 | + private class RemoteMasterShipEventHandler extends RemoteCacheEventHandler<DeviceId, NodeId> { |
127 | public RemoteMasterShipEventHandler(LoadingCache<DeviceId, Optional<NodeId>> cache) { | 127 | public RemoteMasterShipEventHandler(LoadingCache<DeviceId, Optional<NodeId>> cache) { |
128 | super(cache); | 128 | super(cache); |
129 | } | 129 | } | ... | ... |
... | @@ -6,6 +6,7 @@ import com.hazelcast.core.EntryAdapter; | ... | @@ -6,6 +6,7 @@ import com.hazelcast.core.EntryAdapter; |
6 | import com.hazelcast.core.EntryEvent; | 6 | import com.hazelcast.core.EntryEvent; |
7 | import com.hazelcast.core.HazelcastInstance; | 7 | import com.hazelcast.core.HazelcastInstance; |
8 | import com.hazelcast.core.MapEvent; | 8 | import com.hazelcast.core.MapEvent; |
9 | +import com.hazelcast.core.Member; | ||
9 | 10 | ||
10 | import org.apache.felix.scr.annotations.Activate; | 11 | import org.apache.felix.scr.annotations.Activate; |
11 | import org.apache.felix.scr.annotations.Component; | 12 | import org.apache.felix.scr.annotations.Component; |
... | @@ -14,6 +15,7 @@ import org.apache.felix.scr.annotations.ReferenceCardinality; | ... | @@ -14,6 +15,7 @@ import org.apache.felix.scr.annotations.ReferenceCardinality; |
14 | import org.onlab.onos.event.Event; | 15 | import org.onlab.onos.event.Event; |
15 | import org.onlab.onos.store.AbstractStore; | 16 | import org.onlab.onos.store.AbstractStore; |
16 | import org.onlab.onos.store.StoreDelegate; | 17 | import org.onlab.onos.store.StoreDelegate; |
18 | +import org.onlab.onos.store.serializers.KryoSerializationService; | ||
17 | import org.slf4j.Logger; | 19 | import org.slf4j.Logger; |
18 | 20 | ||
19 | import static com.google.common.base.Preconditions.checkNotNull; | 21 | import static com.google.common.base.Preconditions.checkNotNull; |
... | @@ -31,6 +33,9 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -31,6 +33,9 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
31 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 33 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
32 | protected StoreService storeService; | 34 | protected StoreService storeService; |
33 | 35 | ||
36 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
37 | + protected KryoSerializationService kryoSerializationService; | ||
38 | + | ||
34 | protected HazelcastInstance theInstance; | 39 | protected HazelcastInstance theInstance; |
35 | 40 | ||
36 | @Activate | 41 | @Activate |
... | @@ -45,7 +50,7 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -45,7 +50,7 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
45 | * @return serialized object | 50 | * @return serialized object |
46 | */ | 51 | */ |
47 | protected byte[] serialize(Object obj) { | 52 | protected byte[] serialize(Object obj) { |
48 | - return storeService.serialize(obj); | 53 | + return kryoSerializationService.serialize(obj); |
49 | } | 54 | } |
50 | 55 | ||
51 | /** | 56 | /** |
... | @@ -56,7 +61,7 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -56,7 +61,7 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
56 | * @return deserialized object | 61 | * @return deserialized object |
57 | */ | 62 | */ |
58 | protected <T> T deserialize(byte[] bytes) { | 63 | protected <T> T deserialize(byte[] bytes) { |
59 | - return storeService.deserialize(bytes); | 64 | + return kryoSerializationService.deserialize(bytes); |
60 | } | 65 | } |
61 | 66 | ||
62 | 67 | ||
... | @@ -66,8 +71,9 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -66,8 +71,9 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
66 | * @param <K> IMap key type after deserialization | 71 | * @param <K> IMap key type after deserialization |
67 | * @param <V> IMap value type after deserialization | 72 | * @param <V> IMap value type after deserialization |
68 | */ | 73 | */ |
69 | - public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> { | 74 | + public class RemoteCacheEventHandler<K, V> extends EntryAdapter<byte[], byte[]> { |
70 | 75 | ||
76 | + private final Member localMember; | ||
71 | private LoadingCache<K, Optional<V>> cache; | 77 | private LoadingCache<K, Optional<V>> cache; |
72 | 78 | ||
73 | /** | 79 | /** |
... | @@ -75,17 +81,26 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -75,17 +81,26 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
75 | * | 81 | * |
76 | * @param cache cache to update | 82 | * @param cache cache to update |
77 | */ | 83 | */ |
78 | - public RemoteEventHandler(LoadingCache<K, Optional<V>> cache) { | 84 | + public RemoteCacheEventHandler(LoadingCache<K, Optional<V>> cache) { |
85 | + this.localMember = theInstance.getCluster().getLocalMember(); | ||
79 | this.cache = checkNotNull(cache); | 86 | this.cache = checkNotNull(cache); |
80 | } | 87 | } |
81 | 88 | ||
82 | @Override | 89 | @Override |
83 | public void mapCleared(MapEvent event) { | 90 | public void mapCleared(MapEvent event) { |
91 | + if (localMember.equals(event.getMember())) { | ||
92 | + // ignore locally triggered event | ||
93 | + return; | ||
94 | + } | ||
84 | cache.invalidateAll(); | 95 | cache.invalidateAll(); |
85 | } | 96 | } |
86 | 97 | ||
87 | @Override | 98 | @Override |
88 | public void entryAdded(EntryEvent<byte[], byte[]> event) { | 99 | public void entryAdded(EntryEvent<byte[], byte[]> event) { |
100 | + if (localMember.equals(event.getMember())) { | ||
101 | + // ignore locally triggered event | ||
102 | + return; | ||
103 | + } | ||
89 | K key = deserialize(event.getKey()); | 104 | K key = deserialize(event.getKey()); |
90 | V newVal = deserialize(event.getValue()); | 105 | V newVal = deserialize(event.getValue()); |
91 | Optional<V> newValue = Optional.of(newVal); | 106 | Optional<V> newValue = Optional.of(newVal); |
... | @@ -95,6 +110,10 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -95,6 +110,10 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
95 | 110 | ||
96 | @Override | 111 | @Override |
97 | public void entryUpdated(EntryEvent<byte[], byte[]> event) { | 112 | public void entryUpdated(EntryEvent<byte[], byte[]> event) { |
113 | + if (localMember.equals(event.getMember())) { | ||
114 | + // ignore locally triggered event | ||
115 | + return; | ||
116 | + } | ||
98 | K key = deserialize(event.getKey()); | 117 | K key = deserialize(event.getKey()); |
99 | V oldVal = deserialize(event.getOldValue()); | 118 | V oldVal = deserialize(event.getOldValue()); |
100 | Optional<V> oldValue = Optional.fromNullable(oldVal); | 119 | Optional<V> oldValue = Optional.fromNullable(oldVal); |
... | @@ -106,6 +125,10 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -106,6 +125,10 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
106 | 125 | ||
107 | @Override | 126 | @Override |
108 | public void entryRemoved(EntryEvent<byte[], byte[]> event) { | 127 | public void entryRemoved(EntryEvent<byte[], byte[]> event) { |
128 | + if (localMember.equals(event.getMember())) { | ||
129 | + // ignore locally triggered event | ||
130 | + return; | ||
131 | + } | ||
109 | K key = deserialize(event.getKey()); | 132 | K key = deserialize(event.getKey()); |
110 | V val = deserialize(event.getOldValue()); | 133 | V val = deserialize(event.getOldValue()); |
111 | cache.invalidate(key); | 134 | cache.invalidate(key); |
... | @@ -141,4 +164,80 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -141,4 +164,80 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
141 | } | 164 | } |
142 | } | 165 | } |
143 | 166 | ||
167 | + /** | ||
168 | + * Distributed object remote event entry listener. | ||
169 | + * | ||
170 | + * @param <K> Entry key type after deserialization | ||
171 | + * @param <V> Entry value type after deserialization | ||
172 | + */ | ||
173 | + public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> { | ||
174 | + | ||
175 | + private final Member localMember; | ||
176 | + | ||
177 | + public RemoteEventHandler() { | ||
178 | + this.localMember = theInstance.getCluster().getLocalMember(); | ||
179 | + } | ||
180 | + @Override | ||
181 | + public void entryAdded(EntryEvent<byte[], byte[]> event) { | ||
182 | + if (localMember.equals(event.getMember())) { | ||
183 | + // ignore locally triggered event | ||
184 | + return; | ||
185 | + } | ||
186 | + K key = deserialize(event.getKey()); | ||
187 | + V newVal = deserialize(event.getValue()); | ||
188 | + onAdd(key, newVal); | ||
189 | + } | ||
190 | + | ||
191 | + @Override | ||
192 | + public void entryRemoved(EntryEvent<byte[], byte[]> event) { | ||
193 | + if (localMember.equals(event.getMember())) { | ||
194 | + // ignore locally triggered event | ||
195 | + return; | ||
196 | + } | ||
197 | + K key = deserialize(event.getKey()); | ||
198 | + V val = deserialize(event.getValue()); | ||
199 | + onRemove(key, val); | ||
200 | + } | ||
201 | + | ||
202 | + @Override | ||
203 | + public void entryUpdated(EntryEvent<byte[], byte[]> event) { | ||
204 | + if (localMember.equals(event.getMember())) { | ||
205 | + // ignore locally triggered event | ||
206 | + return; | ||
207 | + } | ||
208 | + K key = deserialize(event.getKey()); | ||
209 | + V oldVal = deserialize(event.getOldValue()); | ||
210 | + V newVal = deserialize(event.getValue()); | ||
211 | + onUpdate(key, oldVal, newVal); | ||
212 | + } | ||
213 | + | ||
214 | + /** | ||
215 | + * Remote entry addition hook. | ||
216 | + * | ||
217 | + * @param key new key | ||
218 | + * @param newVal new value | ||
219 | + */ | ||
220 | + protected void onAdd(K key, V newVal) { | ||
221 | + } | ||
222 | + | ||
223 | + /** | ||
224 | + * Remote entry update hook. | ||
225 | + * | ||
226 | + * @param key new key | ||
227 | + * @param oldValue old value | ||
228 | + * @param newVal new value | ||
229 | + */ | ||
230 | + protected void onUpdate(K key, V oldValue, V newVal) { | ||
231 | + } | ||
232 | + | ||
233 | + /** | ||
234 | + * Remote entry remove hook. | ||
235 | + * | ||
236 | + * @param key new key | ||
237 | + * @param val old value | ||
238 | + */ | ||
239 | + protected void onRemove(K key, V val) { | ||
240 | + } | ||
241 | + } | ||
242 | + | ||
144 | } | 243 | } | ... | ... |
... | @@ -2,6 +2,8 @@ package org.onlab.onos.store.common; | ... | @@ -2,6 +2,8 @@ package org.onlab.onos.store.common; |
2 | 2 | ||
3 | import static com.google.common.base.Preconditions.checkNotNull; | 3 | import static com.google.common.base.Preconditions.checkNotNull; |
4 | 4 | ||
5 | +import org.onlab.onos.store.serializers.KryoSerializationService; | ||
6 | + | ||
5 | import com.google.common.base.Optional; | 7 | import com.google.common.base.Optional; |
6 | import com.google.common.cache.CacheLoader; | 8 | import com.google.common.cache.CacheLoader; |
7 | import com.hazelcast.core.IMap; | 9 | import com.hazelcast.core.IMap; |
... | @@ -16,28 +18,28 @@ import com.hazelcast.core.IMap; | ... | @@ -16,28 +18,28 @@ import com.hazelcast.core.IMap; |
16 | public final class OptionalCacheLoader<K, V> extends | 18 | public final class OptionalCacheLoader<K, V> extends |
17 | CacheLoader<K, Optional<V>> { | 19 | CacheLoader<K, Optional<V>> { |
18 | 20 | ||
19 | - private final StoreService storeService; | 21 | + private final KryoSerializationService kryoSerializationService; |
20 | private IMap<byte[], byte[]> rawMap; | 22 | private IMap<byte[], byte[]> rawMap; |
21 | 23 | ||
22 | /** | 24 | /** |
23 | * Constructor. | 25 | * Constructor. |
24 | * | 26 | * |
25 | - * @param storeService to use for serialization | 27 | + * @param kryoSerializationService to use for serialization |
26 | * @param rawMap underlying IMap | 28 | * @param rawMap underlying IMap |
27 | */ | 29 | */ |
28 | - public OptionalCacheLoader(StoreService storeService, IMap<byte[], byte[]> rawMap) { | 30 | + public OptionalCacheLoader(KryoSerializationService kryoSerializationService, IMap<byte[], byte[]> rawMap) { |
29 | - this.storeService = checkNotNull(storeService); | 31 | + this.kryoSerializationService = checkNotNull(kryoSerializationService); |
30 | this.rawMap = checkNotNull(rawMap); | 32 | this.rawMap = checkNotNull(rawMap); |
31 | } | 33 | } |
32 | 34 | ||
33 | @Override | 35 | @Override |
34 | public Optional<V> load(K key) throws Exception { | 36 | public Optional<V> load(K key) throws Exception { |
35 | - byte[] keyBytes = storeService.serialize(key); | 37 | + byte[] keyBytes = kryoSerializationService.serialize(key); |
36 | byte[] valBytes = rawMap.get(keyBytes); | 38 | byte[] valBytes = rawMap.get(keyBytes); |
37 | if (valBytes == null) { | 39 | if (valBytes == null) { |
38 | return Optional.absent(); | 40 | return Optional.absent(); |
39 | } | 41 | } |
40 | - V dev = storeService.deserialize(valBytes); | 42 | + V dev = kryoSerializationService.deserialize(valBytes); |
41 | return Optional.of(dev); | 43 | return Optional.of(dev); |
42 | } | 44 | } |
43 | } | 45 | } | ... | ... |
... | @@ -5,46 +5,14 @@ import com.hazelcast.config.FileSystemXmlConfig; | ... | @@ -5,46 +5,14 @@ import com.hazelcast.config.FileSystemXmlConfig; |
5 | import com.hazelcast.core.Hazelcast; | 5 | import com.hazelcast.core.Hazelcast; |
6 | import com.hazelcast.core.HazelcastInstance; | 6 | import com.hazelcast.core.HazelcastInstance; |
7 | 7 | ||
8 | -import de.javakaffee.kryoserializers.URISerializer; | ||
9 | - | ||
10 | import org.apache.felix.scr.annotations.Activate; | 8 | import org.apache.felix.scr.annotations.Activate; |
11 | import org.apache.felix.scr.annotations.Component; | 9 | import org.apache.felix.scr.annotations.Component; |
12 | import org.apache.felix.scr.annotations.Deactivate; | 10 | import org.apache.felix.scr.annotations.Deactivate; |
13 | import org.apache.felix.scr.annotations.Service; | 11 | import org.apache.felix.scr.annotations.Service; |
14 | -import org.onlab.onos.cluster.ControllerNode; | ||
15 | -import org.onlab.onos.cluster.DefaultControllerNode; | ||
16 | -import org.onlab.onos.cluster.NodeId; | ||
17 | -import org.onlab.onos.net.ConnectPoint; | ||
18 | -import org.onlab.onos.net.DefaultDevice; | ||
19 | -import org.onlab.onos.net.DefaultLink; | ||
20 | -import org.onlab.onos.net.DefaultPort; | ||
21 | -import org.onlab.onos.net.Device; | ||
22 | -import org.onlab.onos.net.DeviceId; | ||
23 | -import org.onlab.onos.net.Element; | ||
24 | -import org.onlab.onos.net.Link; | ||
25 | -import org.onlab.onos.net.LinkKey; | ||
26 | -import org.onlab.onos.net.MastershipRole; | ||
27 | -import org.onlab.onos.net.Port; | ||
28 | -import org.onlab.onos.net.PortNumber; | ||
29 | -import org.onlab.onos.net.provider.ProviderId; | ||
30 | -import org.onlab.onos.store.serializers.ConnectPointSerializer; | ||
31 | -import org.onlab.onos.store.serializers.DefaultLinkSerializer; | ||
32 | -import org.onlab.onos.store.serializers.DefaultPortSerializer; | ||
33 | -import org.onlab.onos.store.serializers.DeviceIdSerializer; | ||
34 | -import org.onlab.onos.store.serializers.IpPrefixSerializer; | ||
35 | -import org.onlab.onos.store.serializers.LinkKeySerializer; | ||
36 | -import org.onlab.onos.store.serializers.NodeIdSerializer; | ||
37 | -import org.onlab.onos.store.serializers.PortNumberSerializer; | ||
38 | -import org.onlab.onos.store.serializers.ProviderIdSerializer; | ||
39 | -import org.onlab.packet.IpPrefix; | ||
40 | -import org.onlab.util.KryoPool; | ||
41 | import org.slf4j.Logger; | 12 | import org.slf4j.Logger; |
42 | import org.slf4j.LoggerFactory; | 13 | import org.slf4j.LoggerFactory; |
43 | 14 | ||
44 | import java.io.FileNotFoundException; | 15 | import java.io.FileNotFoundException; |
45 | -import java.net.URI; | ||
46 | -import java.util.ArrayList; | ||
47 | -import java.util.HashMap; | ||
48 | 16 | ||
49 | /** | 17 | /** |
50 | * Auxiliary bootstrap of distributed store. | 18 | * Auxiliary bootstrap of distributed store. |
... | @@ -58,55 +26,18 @@ public class StoreManager implements StoreService { | ... | @@ -58,55 +26,18 @@ public class StoreManager implements StoreService { |
58 | private final Logger log = LoggerFactory.getLogger(getClass()); | 26 | private final Logger log = LoggerFactory.getLogger(getClass()); |
59 | 27 | ||
60 | protected HazelcastInstance instance; | 28 | protected HazelcastInstance instance; |
61 | - private KryoPool serializerPool; | ||
62 | - | ||
63 | 29 | ||
64 | @Activate | 30 | @Activate |
65 | public void activate() { | 31 | public void activate() { |
66 | try { | 32 | try { |
67 | Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE); | 33 | Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE); |
68 | instance = Hazelcast.newHazelcastInstance(config); | 34 | instance = Hazelcast.newHazelcastInstance(config); |
69 | - setupKryoPool(); | ||
70 | log.info("Started"); | 35 | log.info("Started"); |
71 | } catch (FileNotFoundException e) { | 36 | } catch (FileNotFoundException e) { |
72 | log.error("Unable to configure Hazelcast", e); | 37 | log.error("Unable to configure Hazelcast", e); |
73 | } | 38 | } |
74 | } | 39 | } |
75 | 40 | ||
76 | - /** | ||
77 | - * Sets up the common serialzers pool. | ||
78 | - */ | ||
79 | - protected void setupKryoPool() { | ||
80 | - // FIXME Slice out types used in common to separate pool/namespace. | ||
81 | - serializerPool = KryoPool.newBuilder() | ||
82 | - .register(ArrayList.class, | ||
83 | - HashMap.class, | ||
84 | - | ||
85 | - ControllerNode.State.class, | ||
86 | - Device.Type.class, | ||
87 | - | ||
88 | - DefaultControllerNode.class, | ||
89 | - DefaultDevice.class, | ||
90 | - MastershipRole.class, | ||
91 | - Port.class, | ||
92 | - Element.class, | ||
93 | - | ||
94 | - Link.Type.class | ||
95 | - ) | ||
96 | - .register(IpPrefix.class, new IpPrefixSerializer()) | ||
97 | - .register(URI.class, new URISerializer()) | ||
98 | - .register(NodeId.class, new NodeIdSerializer()) | ||
99 | - .register(ProviderId.class, new ProviderIdSerializer()) | ||
100 | - .register(DeviceId.class, new DeviceIdSerializer()) | ||
101 | - .register(PortNumber.class, new PortNumberSerializer()) | ||
102 | - .register(DefaultPort.class, new DefaultPortSerializer()) | ||
103 | - .register(LinkKey.class, new LinkKeySerializer()) | ||
104 | - .register(ConnectPoint.class, new ConnectPointSerializer()) | ||
105 | - .register(DefaultLink.class, new DefaultLinkSerializer()) | ||
106 | - .build() | ||
107 | - .populate(10); | ||
108 | - } | ||
109 | - | ||
110 | @Deactivate | 41 | @Deactivate |
111 | public void deactivate() { | 42 | public void deactivate() { |
112 | instance.shutdown(); | 43 | instance.shutdown(); |
... | @@ -118,18 +49,4 @@ public class StoreManager implements StoreService { | ... | @@ -118,18 +49,4 @@ public class StoreManager implements StoreService { |
118 | return instance; | 49 | return instance; |
119 | } | 50 | } |
120 | 51 | ||
121 | - | ||
122 | - @Override | ||
123 | - public byte[] serialize(final Object obj) { | ||
124 | - return serializerPool.serialize(obj); | ||
125 | - } | ||
126 | - | ||
127 | - @Override | ||
128 | - public <T> T deserialize(final byte[] bytes) { | ||
129 | - if (bytes == null) { | ||
130 | - return null; | ||
131 | - } | ||
132 | - return serializerPool.deserialize(bytes); | ||
133 | - } | ||
134 | - | ||
135 | } | 52 | } | ... | ... |
... | @@ -15,22 +15,4 @@ public interface StoreService { | ... | @@ -15,22 +15,4 @@ public interface StoreService { |
15 | */ | 15 | */ |
16 | HazelcastInstance getHazelcastInstance(); | 16 | HazelcastInstance getHazelcastInstance(); |
17 | 17 | ||
18 | - /** | ||
19 | - * Serializes the specified object into bytes using one of the | ||
20 | - * pre-registered serializers. | ||
21 | - * | ||
22 | - * @param obj object to be serialized | ||
23 | - * @return serialized bytes | ||
24 | - */ | ||
25 | - public byte[] serialize(final Object obj); | ||
26 | - | ||
27 | - /** | ||
28 | - * Deserializes the specified bytes into an object using one of the | ||
29 | - * pre-registered serializers. | ||
30 | - * | ||
31 | - * @param bytes bytes to be deserialized | ||
32 | - * @return deserialized object | ||
33 | - */ | ||
34 | - public <T> T deserialize(final byte[] bytes); | ||
35 | - | ||
36 | } | 18 | } | ... | ... |
... | @@ -46,9 +46,8 @@ public class TestStoreManager extends StoreManager { | ... | @@ -46,9 +46,8 @@ public class TestStoreManager extends StoreManager { |
46 | this.instance = instance; | 46 | this.instance = instance; |
47 | } | 47 | } |
48 | 48 | ||
49 | - // Hazelcast setup removed from original code. | ||
50 | @Override | 49 | @Override |
51 | public void activate() { | 50 | public void activate() { |
52 | - setupKryoPool(); | 51 | + // Hazelcast setup removed from original code. |
53 | } | 52 | } |
54 | } | 53 | } | ... | ... |
... | @@ -72,6 +72,10 @@ public class DistributedDeviceStore | ... | @@ -72,6 +72,10 @@ public class DistributedDeviceStore |
72 | private IMap<byte[], byte[]> rawDevicePorts; | 72 | private IMap<byte[], byte[]> rawDevicePorts; |
73 | private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts; | 73 | private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts; |
74 | 74 | ||
75 | + private String devicesListener; | ||
76 | + | ||
77 | + private String portsListener; | ||
78 | + | ||
75 | @Override | 79 | @Override |
76 | @Activate | 80 | @Activate |
77 | public void activate() { | 81 | public void activate() { |
... | @@ -83,20 +87,20 @@ public class DistributedDeviceStore | ... | @@ -83,20 +87,20 @@ public class DistributedDeviceStore |
83 | // TODO decide on Map name scheme to avoid collision | 87 | // TODO decide on Map name scheme to avoid collision |
84 | rawDevices = theInstance.getMap("devices"); | 88 | rawDevices = theInstance.getMap("devices"); |
85 | final OptionalCacheLoader<DeviceId, DefaultDevice> deviceLoader | 89 | final OptionalCacheLoader<DeviceId, DefaultDevice> deviceLoader |
86 | - = new OptionalCacheLoader<>(storeService, rawDevices); | 90 | + = new OptionalCacheLoader<>(kryoSerializationService, rawDevices); |
87 | devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader)); | 91 | devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader)); |
88 | // refresh/populate cache based on notification from other instance | 92 | // refresh/populate cache based on notification from other instance |
89 | - rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue); | 93 | + devicesListener = rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue); |
90 | 94 | ||
91 | // TODO cache availableDevices | 95 | // TODO cache availableDevices |
92 | availableDevices = theInstance.getSet("availableDevices"); | 96 | availableDevices = theInstance.getSet("availableDevices"); |
93 | 97 | ||
94 | rawDevicePorts = theInstance.getMap("devicePorts"); | 98 | rawDevicePorts = theInstance.getMap("devicePorts"); |
95 | final OptionalCacheLoader<DeviceId, Map<PortNumber, Port>> devicePortLoader | 99 | final OptionalCacheLoader<DeviceId, Map<PortNumber, Port>> devicePortLoader |
96 | - = new OptionalCacheLoader<>(storeService, rawDevicePorts); | 100 | + = new OptionalCacheLoader<>(kryoSerializationService, rawDevicePorts); |
97 | devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader)); | 101 | devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader)); |
98 | // refresh/populate cache based on notification from other instance | 102 | // refresh/populate cache based on notification from other instance |
99 | - rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue); | 103 | + portsListener = rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue); |
100 | 104 | ||
101 | loadDeviceCache(); | 105 | loadDeviceCache(); |
102 | loadDevicePortsCache(); | 106 | loadDevicePortsCache(); |
... | @@ -106,6 +110,8 @@ public class DistributedDeviceStore | ... | @@ -106,6 +110,8 @@ public class DistributedDeviceStore |
106 | 110 | ||
107 | @Deactivate | 111 | @Deactivate |
108 | public void deactivate() { | 112 | public void deactivate() { |
113 | + rawDevicePorts.removeEntryListener(portsListener); | ||
114 | + rawDevices.removeEntryListener(devicesListener); | ||
109 | log.info("Stopped"); | 115 | log.info("Stopped"); |
110 | } | 116 | } |
111 | 117 | ||
... | @@ -354,7 +360,7 @@ public class DistributedDeviceStore | ... | @@ -354,7 +360,7 @@ public class DistributedDeviceStore |
354 | } | 360 | } |
355 | } | 361 | } |
356 | 362 | ||
357 | - private class RemoteDeviceEventHandler extends RemoteEventHandler<DeviceId, DefaultDevice> { | 363 | + private class RemoteDeviceEventHandler extends RemoteCacheEventHandler<DeviceId, DefaultDevice> { |
358 | public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) { | 364 | public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) { |
359 | super(cache); | 365 | super(cache); |
360 | } | 366 | } |
... | @@ -375,7 +381,7 @@ public class DistributedDeviceStore | ... | @@ -375,7 +381,7 @@ public class DistributedDeviceStore |
375 | } | 381 | } |
376 | } | 382 | } |
377 | 383 | ||
378 | - private class RemotePortEventHandler extends RemoteEventHandler<DeviceId, Map<PortNumber, Port>> { | 384 | + private class RemotePortEventHandler extends RemoteCacheEventHandler<DeviceId, Map<PortNumber, Port>> { |
379 | public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) { | 385 | public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) { |
380 | super(cache); | 386 | super(cache); |
381 | } | 387 | } | ... | ... |
... | @@ -58,6 +58,8 @@ public class DistributedLinkStore | ... | @@ -58,6 +58,8 @@ public class DistributedLinkStore |
58 | private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); | 58 | private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); |
59 | private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); | 59 | private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); |
60 | 60 | ||
61 | + private String linksListener; | ||
62 | + | ||
61 | @Override | 63 | @Override |
62 | @Activate | 64 | @Activate |
63 | public void activate() { | 65 | public void activate() { |
... | @@ -68,10 +70,10 @@ public class DistributedLinkStore | ... | @@ -68,10 +70,10 @@ public class DistributedLinkStore |
68 | // TODO decide on Map name scheme to avoid collision | 70 | // TODO decide on Map name scheme to avoid collision |
69 | rawLinks = theInstance.getMap("links"); | 71 | rawLinks = theInstance.getMap("links"); |
70 | final OptionalCacheLoader<LinkKey, DefaultLink> linkLoader | 72 | final OptionalCacheLoader<LinkKey, DefaultLink> linkLoader |
71 | - = new OptionalCacheLoader<>(storeService, rawLinks); | 73 | + = new OptionalCacheLoader<>(kryoSerializationService, rawLinks); |
72 | links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader)); | 74 | links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader)); |
73 | // refresh/populate cache based on notification from other instance | 75 | // refresh/populate cache based on notification from other instance |
74 | - rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue); | 76 | + linksListener = rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue); |
75 | 77 | ||
76 | loadLinkCache(); | 78 | loadLinkCache(); |
77 | 79 | ||
... | @@ -80,7 +82,7 @@ public class DistributedLinkStore | ... | @@ -80,7 +82,7 @@ public class DistributedLinkStore |
80 | 82 | ||
81 | @Deactivate | 83 | @Deactivate |
82 | public void deactivate() { | 84 | public void deactivate() { |
83 | - super.activate(); | 85 | + rawLinks.removeEntryListener(linksListener); |
84 | log.info("Stopped"); | 86 | log.info("Stopped"); |
85 | } | 87 | } |
86 | 88 | ||
... | @@ -233,7 +235,7 @@ public class DistributedLinkStore | ... | @@ -233,7 +235,7 @@ public class DistributedLinkStore |
233 | } | 235 | } |
234 | } | 236 | } |
235 | 237 | ||
236 | - private class RemoteLinkEventHandler extends RemoteEventHandler<LinkKey, DefaultLink> { | 238 | + private class RemoteLinkEventHandler extends RemoteCacheEventHandler<LinkKey, DefaultLink> { |
237 | public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) { | 239 | public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) { |
238 | super(cache); | 240 | super(cache); |
239 | } | 241 | } | ... | ... |
... | @@ -20,6 +20,7 @@ import org.junit.After; | ... | @@ -20,6 +20,7 @@ import org.junit.After; |
20 | import org.junit.AfterClass; | 20 | import org.junit.AfterClass; |
21 | import org.junit.Before; | 21 | import org.junit.Before; |
22 | import org.junit.BeforeClass; | 22 | import org.junit.BeforeClass; |
23 | +import org.junit.Ignore; | ||
23 | import org.junit.Test; | 24 | import org.junit.Test; |
24 | import org.onlab.onos.net.Device; | 25 | import org.onlab.onos.net.Device; |
25 | import org.onlab.onos.net.DeviceId; | 26 | import org.onlab.onos.net.DeviceId; |
... | @@ -35,12 +36,17 @@ import org.onlab.onos.net.provider.ProviderId; | ... | @@ -35,12 +36,17 @@ import org.onlab.onos.net.provider.ProviderId; |
35 | import org.onlab.onos.store.common.StoreManager; | 36 | import org.onlab.onos.store.common.StoreManager; |
36 | import org.onlab.onos.store.common.StoreService; | 37 | import org.onlab.onos.store.common.StoreService; |
37 | import org.onlab.onos.store.common.TestStoreManager; | 38 | import org.onlab.onos.store.common.TestStoreManager; |
39 | +import org.onlab.onos.store.serializers.KryoSerializationManager; | ||
40 | +import org.onlab.onos.store.serializers.KryoSerializationService; | ||
38 | 41 | ||
39 | import com.google.common.collect.Iterables; | 42 | import com.google.common.collect.Iterables; |
40 | import com.google.common.collect.Sets; | 43 | import com.google.common.collect.Sets; |
41 | import com.hazelcast.config.Config; | 44 | import com.hazelcast.config.Config; |
42 | import com.hazelcast.core.Hazelcast; | 45 | import com.hazelcast.core.Hazelcast; |
43 | 46 | ||
47 | +/** | ||
48 | + * Test of the Hazelcast based distributed DeviceStore implementation. | ||
49 | + */ | ||
44 | public class DistributedDeviceStoreTest { | 50 | public class DistributedDeviceStoreTest { |
45 | 51 | ||
46 | private static final ProviderId PID = new ProviderId("of", "foo"); | 52 | private static final ProviderId PID = new ProviderId("of", "foo"); |
... | @@ -57,6 +63,7 @@ public class DistributedDeviceStoreTest { | ... | @@ -57,6 +63,7 @@ public class DistributedDeviceStoreTest { |
57 | private static final PortNumber P3 = PortNumber.portNumber(3); | 63 | private static final PortNumber P3 = PortNumber.portNumber(3); |
58 | 64 | ||
59 | private DistributedDeviceStore deviceStore; | 65 | private DistributedDeviceStore deviceStore; |
66 | + private KryoSerializationManager serializationMgr; | ||
60 | 67 | ||
61 | private StoreManager storeManager; | 68 | private StoreManager storeManager; |
62 | 69 | ||
... | @@ -78,7 +85,10 @@ public class DistributedDeviceStoreTest { | ... | @@ -78,7 +85,10 @@ public class DistributedDeviceStoreTest { |
78 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); | 85 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); |
79 | storeManager.activate(); | 86 | storeManager.activate(); |
80 | 87 | ||
81 | - deviceStore = new TestDistributedDeviceStore(storeManager); | 88 | + serializationMgr = new KryoSerializationManager(); |
89 | + serializationMgr.activate(); | ||
90 | + | ||
91 | + deviceStore = new TestDistributedDeviceStore(storeManager, serializationMgr); | ||
82 | deviceStore.activate(); | 92 | deviceStore.activate(); |
83 | } | 93 | } |
84 | 94 | ||
... | @@ -86,6 +96,8 @@ public class DistributedDeviceStoreTest { | ... | @@ -86,6 +96,8 @@ public class DistributedDeviceStoreTest { |
86 | public void tearDown() throws Exception { | 96 | public void tearDown() throws Exception { |
87 | deviceStore.deactivate(); | 97 | deviceStore.deactivate(); |
88 | 98 | ||
99 | + serializationMgr.deactivate(); | ||
100 | + | ||
89 | storeManager.deactivate(); | 101 | storeManager.deactivate(); |
90 | } | 102 | } |
91 | 103 | ||
... | @@ -326,6 +338,7 @@ public class DistributedDeviceStoreTest { | ... | @@ -326,6 +338,7 @@ public class DistributedDeviceStoreTest { |
326 | } | 338 | } |
327 | 339 | ||
328 | // TODO add test for Port events when we have them | 340 | // TODO add test for Port events when we have them |
341 | + @Ignore("Ignore until Delegate spec. is clear.") | ||
329 | @Test | 342 | @Test |
330 | public final void testEvents() throws InterruptedException { | 343 | public final void testEvents() throws InterruptedException { |
331 | final CountDownLatch addLatch = new CountDownLatch(1); | 344 | final CountDownLatch addLatch = new CountDownLatch(1); |
... | @@ -379,8 +392,10 @@ public class DistributedDeviceStoreTest { | ... | @@ -379,8 +392,10 @@ public class DistributedDeviceStoreTest { |
379 | } | 392 | } |
380 | 393 | ||
381 | private class TestDistributedDeviceStore extends DistributedDeviceStore { | 394 | private class TestDistributedDeviceStore extends DistributedDeviceStore { |
382 | - public TestDistributedDeviceStore(StoreService storeService) { | 395 | + public TestDistributedDeviceStore(StoreService storeService, |
396 | + KryoSerializationService kryoSerializationService) { | ||
383 | this.storeService = storeService; | 397 | this.storeService = storeService; |
398 | + this.kryoSerializationService = kryoSerializationService; | ||
384 | } | 399 | } |
385 | } | 400 | } |
386 | } | 401 | } | ... | ... |
... | @@ -15,6 +15,7 @@ import org.junit.After; | ... | @@ -15,6 +15,7 @@ import org.junit.After; |
15 | import org.junit.AfterClass; | 15 | import org.junit.AfterClass; |
16 | import org.junit.Before; | 16 | import org.junit.Before; |
17 | import org.junit.BeforeClass; | 17 | import org.junit.BeforeClass; |
18 | +import org.junit.Ignore; | ||
18 | import org.junit.Test; | 19 | import org.junit.Test; |
19 | import org.onlab.onos.net.ConnectPoint; | 20 | import org.onlab.onos.net.ConnectPoint; |
20 | import org.onlab.onos.net.DeviceId; | 21 | import org.onlab.onos.net.DeviceId; |
... | @@ -29,27 +30,28 @@ import org.onlab.onos.net.provider.ProviderId; | ... | @@ -29,27 +30,28 @@ import org.onlab.onos.net.provider.ProviderId; |
29 | import org.onlab.onos.store.common.StoreManager; | 30 | import org.onlab.onos.store.common.StoreManager; |
30 | import org.onlab.onos.store.common.StoreService; | 31 | import org.onlab.onos.store.common.StoreService; |
31 | import org.onlab.onos.store.common.TestStoreManager; | 32 | import org.onlab.onos.store.common.TestStoreManager; |
33 | +import org.onlab.onos.store.serializers.KryoSerializationManager; | ||
34 | +import org.onlab.onos.store.serializers.KryoSerializationService; | ||
32 | 35 | ||
33 | import com.google.common.collect.Iterables; | 36 | import com.google.common.collect.Iterables; |
34 | import com.hazelcast.config.Config; | 37 | import com.hazelcast.config.Config; |
35 | import com.hazelcast.core.Hazelcast; | 38 | import com.hazelcast.core.Hazelcast; |
36 | 39 | ||
40 | +/** | ||
41 | + * Test of the Hazelcast based distributed LinkStore implementation. | ||
42 | + */ | ||
37 | public class DistributedLinkStoreTest { | 43 | public class DistributedLinkStoreTest { |
38 | 44 | ||
39 | private static final ProviderId PID = new ProviderId("of", "foo"); | 45 | private static final ProviderId PID = new ProviderId("of", "foo"); |
40 | private static final DeviceId DID1 = deviceId("of:foo"); | 46 | private static final DeviceId DID1 = deviceId("of:foo"); |
41 | private static final DeviceId DID2 = deviceId("of:bar"); | 47 | private static final DeviceId DID2 = deviceId("of:bar"); |
42 | -// private static final String MFR = "whitebox"; | ||
43 | -// private static final String HW = "1.1.x"; | ||
44 | -// private static final String SW1 = "3.8.1"; | ||
45 | -// private static final String SW2 = "3.9.5"; | ||
46 | -// private static final String SN = "43311-12345"; | ||
47 | 48 | ||
48 | private static final PortNumber P1 = PortNumber.portNumber(1); | 49 | private static final PortNumber P1 = PortNumber.portNumber(1); |
49 | private static final PortNumber P2 = PortNumber.portNumber(2); | 50 | private static final PortNumber P2 = PortNumber.portNumber(2); |
50 | private static final PortNumber P3 = PortNumber.portNumber(3); | 51 | private static final PortNumber P3 = PortNumber.portNumber(3); |
51 | 52 | ||
52 | private StoreManager storeManager; | 53 | private StoreManager storeManager; |
54 | + private KryoSerializationManager serializationMgr; | ||
53 | 55 | ||
54 | private DistributedLinkStore linkStore; | 56 | private DistributedLinkStore linkStore; |
55 | 57 | ||
... | @@ -69,13 +71,17 @@ public class DistributedLinkStoreTest { | ... | @@ -69,13 +71,17 @@ public class DistributedLinkStoreTest { |
69 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); | 71 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); |
70 | storeManager.activate(); | 72 | storeManager.activate(); |
71 | 73 | ||
72 | - linkStore = new TestDistributedLinkStore(storeManager); | 74 | + serializationMgr = new KryoSerializationManager(); |
75 | + serializationMgr.activate(); | ||
76 | + | ||
77 | + linkStore = new TestDistributedLinkStore(storeManager, serializationMgr); | ||
73 | linkStore.activate(); | 78 | linkStore.activate(); |
74 | } | 79 | } |
75 | 80 | ||
76 | @After | 81 | @After |
77 | public void tearDown() throws Exception { | 82 | public void tearDown() throws Exception { |
78 | linkStore.deactivate(); | 83 | linkStore.deactivate(); |
84 | + serializationMgr.deactivate(); | ||
79 | storeManager.deactivate(); | 85 | storeManager.deactivate(); |
80 | } | 86 | } |
81 | 87 | ||
... | @@ -302,6 +308,7 @@ public class DistributedLinkStoreTest { | ... | @@ -302,6 +308,7 @@ public class DistributedLinkStoreTest { |
302 | assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1)); | 308 | assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1)); |
303 | } | 309 | } |
304 | 310 | ||
311 | + @Ignore("Ignore until Delegate spec. is clear.") | ||
305 | @Test | 312 | @Test |
306 | public final void testEvents() throws InterruptedException { | 313 | public final void testEvents() throws InterruptedException { |
307 | 314 | ||
... | @@ -354,8 +361,10 @@ public class DistributedLinkStoreTest { | ... | @@ -354,8 +361,10 @@ public class DistributedLinkStoreTest { |
354 | 361 | ||
355 | 362 | ||
356 | class TestDistributedLinkStore extends DistributedLinkStore { | 363 | class TestDistributedLinkStore extends DistributedLinkStore { |
357 | - TestDistributedLinkStore(StoreService storeService) { | 364 | + TestDistributedLinkStore(StoreService storeService, |
365 | + KryoSerializationService kryoSerializationService) { | ||
358 | this.storeService = storeService; | 366 | this.storeService = storeService; |
367 | + this.kryoSerializationService = kryoSerializationService; | ||
359 | } | 368 | } |
360 | } | 369 | } |
361 | } | 370 | } | ... | ... |
core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoSerializationManager.java
0 → 100644
1 | +package org.onlab.onos.store.serializers; | ||
2 | + | ||
3 | +import java.net.URI; | ||
4 | +import java.util.ArrayList; | ||
5 | +import java.util.HashMap; | ||
6 | + | ||
7 | +import org.apache.felix.scr.annotations.Activate; | ||
8 | +import org.apache.felix.scr.annotations.Component; | ||
9 | +import org.apache.felix.scr.annotations.Deactivate; | ||
10 | +import org.apache.felix.scr.annotations.Service; | ||
11 | +import org.onlab.onos.cluster.ControllerNode; | ||
12 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
13 | +import org.onlab.onos.cluster.NodeId; | ||
14 | +import org.onlab.onos.net.ConnectPoint; | ||
15 | +import org.onlab.onos.net.DefaultDevice; | ||
16 | +import org.onlab.onos.net.DefaultLink; | ||
17 | +import org.onlab.onos.net.DefaultPort; | ||
18 | +import org.onlab.onos.net.Device; | ||
19 | +import org.onlab.onos.net.DeviceId; | ||
20 | +import org.onlab.onos.net.Element; | ||
21 | +import org.onlab.onos.net.Link; | ||
22 | +import org.onlab.onos.net.LinkKey; | ||
23 | +import org.onlab.onos.net.MastershipRole; | ||
24 | +import org.onlab.onos.net.Port; | ||
25 | +import org.onlab.onos.net.PortNumber; | ||
26 | +import org.onlab.onos.net.provider.ProviderId; | ||
27 | +import org.onlab.packet.IpPrefix; | ||
28 | +import org.onlab.util.KryoPool; | ||
29 | +import org.slf4j.Logger; | ||
30 | +import org.slf4j.LoggerFactory; | ||
31 | + | ||
32 | +import de.javakaffee.kryoserializers.URISerializer; | ||
33 | + | ||
34 | +/** | ||
35 | + * Serialization service using Kryo. | ||
36 | + */ | ||
37 | +@Component(immediate = true) | ||
38 | +@Service | ||
39 | +public class KryoSerializationManager implements KryoSerializationService { | ||
40 | + | ||
41 | + private final Logger log = LoggerFactory.getLogger(getClass()); | ||
42 | + private KryoPool serializerPool; | ||
43 | + | ||
44 | + | ||
45 | + @Activate | ||
46 | + public void activate() { | ||
47 | + setupKryoPool(); | ||
48 | + log.info("Started"); | ||
49 | + } | ||
50 | + | ||
51 | + @Deactivate | ||
52 | + public void deactivate() { | ||
53 | + log.info("Stopped"); | ||
54 | + } | ||
55 | + | ||
56 | + /** | ||
57 | + * Sets up the common serialzers pool. | ||
58 | + */ | ||
59 | + protected void setupKryoPool() { | ||
60 | + // FIXME Slice out types used in common to separate pool/namespace. | ||
61 | + serializerPool = KryoPool.newBuilder() | ||
62 | + .register(ArrayList.class, | ||
63 | + HashMap.class, | ||
64 | + | ||
65 | + ControllerNode.State.class, | ||
66 | + Device.Type.class, | ||
67 | + | ||
68 | + DefaultControllerNode.class, | ||
69 | + DefaultDevice.class, | ||
70 | + MastershipRole.class, | ||
71 | + Port.class, | ||
72 | + Element.class, | ||
73 | + | ||
74 | + Link.Type.class | ||
75 | + ) | ||
76 | + .register(IpPrefix.class, new IpPrefixSerializer()) | ||
77 | + .register(URI.class, new URISerializer()) | ||
78 | + .register(NodeId.class, new NodeIdSerializer()) | ||
79 | + .register(ProviderId.class, new ProviderIdSerializer()) | ||
80 | + .register(DeviceId.class, new DeviceIdSerializer()) | ||
81 | + .register(PortNumber.class, new PortNumberSerializer()) | ||
82 | + .register(DefaultPort.class, new DefaultPortSerializer()) | ||
83 | + .register(LinkKey.class, new LinkKeySerializer()) | ||
84 | + .register(ConnectPoint.class, new ConnectPointSerializer()) | ||
85 | + .register(DefaultLink.class, new DefaultLinkSerializer()) | ||
86 | + .build() | ||
87 | + .populate(1); | ||
88 | + } | ||
89 | + | ||
90 | + @Override | ||
91 | + public byte[] serialize(final Object obj) { | ||
92 | + return serializerPool.serialize(obj); | ||
93 | + } | ||
94 | + | ||
95 | + @Override | ||
96 | + public <T> T deserialize(final byte[] bytes) { | ||
97 | + if (bytes == null) { | ||
98 | + return null; | ||
99 | + } | ||
100 | + return serializerPool.deserialize(bytes); | ||
101 | + } | ||
102 | + | ||
103 | +} |
core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoSerializationService.java
0 → 100644
1 | +package org.onlab.onos.store.serializers; | ||
2 | + | ||
3 | +// TODO: To be replaced with SerializationService from IOLoop activity | ||
4 | +/** | ||
5 | + * Service to serialize Objects into byte array. | ||
6 | + */ | ||
7 | +public interface KryoSerializationService { | ||
8 | + | ||
9 | + /** | ||
10 | + * Serializes the specified object into bytes using one of the | ||
11 | + * pre-registered serializers. | ||
12 | + * | ||
13 | + * @param obj object to be serialized | ||
14 | + * @return serialized bytes | ||
15 | + */ | ||
16 | + public byte[] serialize(final Object obj); | ||
17 | + | ||
18 | + /** | ||
19 | + * Deserializes the specified bytes into an object using one of the | ||
20 | + * pre-registered serializers. | ||
21 | + * | ||
22 | + * @param bytes bytes to be deserialized | ||
23 | + * @return deserialized object | ||
24 | + */ | ||
25 | + public <T> T deserialize(final byte[] bytes); | ||
26 | + | ||
27 | +} |
... | @@ -20,7 +20,7 @@ import java.util.Set; | ... | @@ -20,7 +20,7 @@ import java.util.Set; |
20 | import static org.slf4j.LoggerFactory.getLogger; | 20 | import static org.slf4j.LoggerFactory.getLogger; |
21 | 21 | ||
22 | /** | 22 | /** |
23 | - * Manages inventory of infrastructure DEVICES using trivial in-memory | 23 | + * Manages inventory of infrastructure devices using trivial in-memory |
24 | * structures implementation. | 24 | * structures implementation. |
25 | */ | 25 | */ |
26 | @Component(immediate = true) | 26 | @Component(immediate = true) | ... | ... |
... | @@ -101,9 +101,6 @@ public class SimpleDeviceStore | ... | @@ -101,9 +101,6 @@ public class SimpleDeviceStore |
101 | synchronized (this) { | 101 | synchronized (this) { |
102 | devices.put(deviceId, device); | 102 | devices.put(deviceId, device); |
103 | availableDevices.add(deviceId); | 103 | availableDevices.add(deviceId); |
104 | - | ||
105 | - // For now claim the device as a master automatically. | ||
106 | - // roles.put(deviceId, MastershipRole.MASTER); | ||
107 | } | 104 | } |
108 | return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, device, null); | 105 | return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, device, null); |
109 | } | 106 | } |
... | @@ -189,7 +186,7 @@ public class SimpleDeviceStore | ... | @@ -189,7 +186,7 @@ public class SimpleDeviceStore |
189 | new DefaultPort(device, portDescription.portNumber(), | 186 | new DefaultPort(device, portDescription.portNumber(), |
190 | portDescription.isEnabled()); | 187 | portDescription.isEnabled()); |
191 | ports.put(port.number(), updatedPort); | 188 | ports.put(port.number(), updatedPort); |
192 | - return new DeviceEvent(PORT_UPDATED, device, port); | 189 | + return new DeviceEvent(PORT_UPDATED, device, updatedPort); |
193 | } | 190 | } |
194 | return null; | 191 | return null; |
195 | } | 192 | } | ... | ... |
... | @@ -51,8 +51,6 @@ public class SimpleLinkStore | ... | @@ -51,8 +51,6 @@ public class SimpleLinkStore |
51 | private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); | 51 | private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); |
52 | private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); | 52 | private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); |
53 | 53 | ||
54 | - private static final Set<Link> EMPTY = ImmutableSet.of(); | ||
55 | - | ||
56 | @Activate | 54 | @Activate |
57 | public void activate() { | 55 | public void activate() { |
58 | log.info("Started"); | 56 | log.info("Started"); | ... | ... |
core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStoreTest.java
0 → 100644
This diff is collapsed. Click to expand it.
core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/SimpleLinkStoreTest.java
0 → 100644
This diff is collapsed. Click to expand it.
... | @@ -9,7 +9,6 @@ | ... | @@ -9,7 +9,6 @@ |
9 | <bundle>mvn:org.apache.commons/commons-lang3/3.3.2</bundle> | 9 | <bundle>mvn:org.apache.commons/commons-lang3/3.3.2</bundle> |
10 | <bundle>mvn:com.google.guava/guava/18.0</bundle> | 10 | <bundle>mvn:com.google.guava/guava/18.0</bundle> |
11 | <bundle>mvn:io.netty/netty/3.9.2.Final</bundle> | 11 | <bundle>mvn:io.netty/netty/3.9.2.Final</bundle> |
12 | - <bundle>mvn:org.livetribe.slp/livetribe-slp-osgi/2.2.1</bundle> | ||
13 | 12 | ||
14 | <bundle>mvn:com.hazelcast/hazelcast/3.3</bundle> | 13 | <bundle>mvn:com.hazelcast/hazelcast/3.3</bundle> |
15 | <bundle>mvn:com.eclipsesource.minimal-json/minimal-json/0.9.1</bundle> | 14 | <bundle>mvn:com.eclipsesource.minimal-json/minimal-json/0.9.1</bundle> |
... | @@ -20,6 +19,9 @@ | ... | @@ -20,6 +19,9 @@ |
20 | <bundle>mvn:de.javakaffee/kryo-serializers/0.27</bundle> | 19 | <bundle>mvn:de.javakaffee/kryo-serializers/0.27</bundle> |
21 | 20 | ||
22 | <bundle>mvn:org.onlab.onos/onlab-nio/1.0.0-SNAPSHOT</bundle> | 21 | <bundle>mvn:org.onlab.onos/onlab-nio/1.0.0-SNAPSHOT</bundle> |
22 | + | ||
23 | + <bundle>mvn:org.codehaus.jackson/jackson-core-asl/1.9.13</bundle> | ||
24 | + <bundle>mvn:org.codehaus.jackson/jackson-mapper-asl/1.9.13</bundle> | ||
23 | </feature> | 25 | </feature> |
24 | 26 | ||
25 | <feature name="onos-thirdparty-web" version="1.0.0" | 27 | <feature name="onos-thirdparty-web" version="1.0.0" |
... | @@ -49,20 +51,17 @@ | ... | @@ -49,20 +51,17 @@ |
49 | description="ONOS core components"> | 51 | description="ONOS core components"> |
50 | <feature>onos-api</feature> | 52 | <feature>onos-api</feature> |
51 | <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle> | 53 | <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle> |
52 | - <bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle> | 54 | + <bundle>mvn:org.onlab.onos/onos-core-dist/1.0.0-SNAPSHOT</bundle> |
53 | - <bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle> | ||
54 | - <bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle> | ||
55 | - <bundle>mvn:org.onlab.onos/onos-core-hz-net/1.0.0-SNAPSHOT</bundle> | ||
56 | </feature> | 55 | </feature> |
57 | 56 | ||
58 | - <feature name="onos-core-dist" version="1.0.0" | 57 | + <feature name="onos-core-hazelcast" version="1.0.0" |
59 | - description="ONOS core components"> | 58 | + description="ONOS core components built on hazelcast"> |
60 | <feature>onos-api</feature> | 59 | <feature>onos-api</feature> |
61 | <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle> | 60 | <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle> |
62 | <bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle> | 61 | <bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle> |
63 | <bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle> | 62 | <bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle> |
64 | <bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle> | 63 | <bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle> |
65 | - <bundle>mvn:org.onlab.onos/onos-core-dist/1.0.0-SNAPSHOT</bundle> | 64 | + <bundle>mvn:org.onlab.onos/onos-core-hz-net/1.0.0-SNAPSHOT</bundle> |
66 | </feature> | 65 | </feature> |
67 | 66 | ||
68 | <feature name="onos-core-trivial" version="1.0.0" | 67 | <feature name="onos-core-trivial" version="1.0.0" |
... | @@ -126,4 +125,10 @@ | ... | @@ -126,4 +125,10 @@ |
126 | <bundle>mvn:org.onlab.onos/onos-app-foo/1.0.0-SNAPSHOT</bundle> | 125 | <bundle>mvn:org.onlab.onos/onos-app-foo/1.0.0-SNAPSHOT</bundle> |
127 | </feature> | 126 | </feature> |
128 | 127 | ||
128 | + <feature name="onos-app-config" version="1.0.0" | ||
129 | + description="ONOS network config reader"> | ||
130 | + <feature>onos-api</feature> | ||
131 | + <bundle>mvn:org.onlab.onos/onos-app-config/1.0.0-SNAPSHOT</bundle> | ||
132 | + </feature> | ||
133 | + | ||
129 | </features> | 134 | </features> | ... | ... |
... | @@ -92,6 +92,17 @@ | ... | @@ -92,6 +92,17 @@ |
92 | <version>3.3.2</version> | 92 | <version>3.3.2</version> |
93 | </dependency> | 93 | </dependency> |
94 | 94 | ||
95 | + <dependency> | ||
96 | + <groupId>org.codehaus.jackson</groupId> | ||
97 | + <artifactId>jackson-core-asl</artifactId> | ||
98 | + <version>1.9.13</version> | ||
99 | + </dependency> | ||
100 | + <dependency> | ||
101 | + <groupId>org.codehaus.jackson</groupId> | ||
102 | + <artifactId>jackson-mapper-asl</artifactId> | ||
103 | + <version>1.9.13</version> | ||
104 | + </dependency> | ||
105 | + | ||
95 | 106 | ||
96 | <!-- Web related --> | 107 | <!-- Web related --> |
97 | <dependency> | 108 | <dependency> | ... | ... |
... | @@ -106,10 +106,10 @@ public class OpenFlowPacketProvider extends AbstractProvider implements PacketPr | ... | @@ -106,10 +106,10 @@ public class OpenFlowPacketProvider extends AbstractProvider implements PacketPr |
106 | for (Instruction inst : packet.treatment().instructions()) { | 106 | for (Instruction inst : packet.treatment().instructions()) { |
107 | if (inst.type().equals(Instruction.Type.OUTPUT)) { | 107 | if (inst.type().equals(Instruction.Type.OUTPUT)) { |
108 | p = portDesc(((OutputInstruction) inst).port()); | 108 | p = portDesc(((OutputInstruction) inst).port()); |
109 | - if (!sw.getPorts().contains(p)) { | 109 | + /*if (!sw.getPorts().contains(p)) { |
110 | - log.warn("Tried to write out non-existint port {}", p.getPortNo()); | 110 | + log.warn("Tried to write out non-existent port {}", p.getPortNo()); |
111 | continue; | 111 | continue; |
112 | - } | 112 | + }*/ |
113 | OFPacketOut po = packetOut(sw, eth, p.getPortNo()); | 113 | OFPacketOut po = packetOut(sw, eth, p.getPortNo()); |
114 | sw.sendMsg(po); | 114 | sw.sendMsg(po); |
115 | } | 115 | } | ... | ... |
... | @@ -154,9 +154,9 @@ public class OpenFlowPacketProviderTest { | ... | @@ -154,9 +154,9 @@ public class OpenFlowPacketProviderTest { |
154 | assertEquals("message sent incorrectly", 0, sw.sent.size()); | 154 | assertEquals("message sent incorrectly", 0, sw.sent.size()); |
155 | 155 | ||
156 | //to missing port | 156 | //to missing port |
157 | - OutboundPacket portFailPkt = outPacket(DID, TR_MISSING, eth); | 157 | + //OutboundPacket portFailPkt = outPacket(DID, TR_MISSING, eth); |
158 | - provider.emit(portFailPkt); | 158 | + //provider.emit(portFailPkt); |
159 | - assertEquals("extra message sent", 1, sw.sent.size()); | 159 | + //assertEquals("extra message sent", 1, sw.sent.size()); |
160 | 160 | ||
161 | } | 161 | } |
162 | 162 | ... | ... |
... | @@ -9,5 +9,5 @@ | ... | @@ -9,5 +9,5 @@ |
9 | nodes=$(env | sort | egrep "OC[0-9]+" | cut -d= -f2) | 9 | nodes=$(env | sort | egrep "OC[0-9]+" | cut -d= -f2) |
10 | 10 | ||
11 | onos-package | 11 | onos-package |
12 | -for node in $nodes; do printf "%s: " $node; onos-install -f $node; done | 12 | +for node in $nodes; do (printf "%s: %s\n" "$node" "`onos-install -f $node`")& done |
13 | for node in $nodes; do onos-wait-for-start $node; done | 13 | for node in $nodes; do onos-wait-for-start $node; done | ... | ... |
... | @@ -15,7 +15,7 @@ env JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 | ... | @@ -15,7 +15,7 @@ env JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 |
15 | 15 | ||
16 | pre-stop script | 16 | pre-stop script |
17 | /opt/onos/bin/onos halt 2>/opt/onos/var/stderr.log | 17 | /opt/onos/bin/onos halt 2>/opt/onos/var/stderr.log |
18 | - sleep 3 | 18 | + sleep 2 |
19 | end script | 19 | end script |
20 | 20 | ||
21 | script | 21 | script | ... | ... |
... | @@ -8,8 +8,21 @@ | ... | @@ -8,8 +8,21 @@ |
8 | 8 | ||
9 | remote=$ONOS_USER@${1:-$OCI} | 9 | remote=$ONOS_USER@${1:-$OCI} |
10 | 10 | ||
11 | +# Generate a cluster.json from the ON* environment variables | ||
12 | +CDEF_FILE=/tmp/cluster.json | ||
13 | +echo "{ \"nodes\":[" > $CDEF_FILE | ||
14 | +for node in $(env | sort | egrep "OC[2-9]+" | cut -d= -f2); do | ||
15 | + echo " { \"id\": \"$node\", \"ip\": \"$node\", \"tcpPort\": 9876 }," >> $CDEF_FILE | ||
16 | +done | ||
17 | +echo " { \"id\": \"$OC1\", \"ip\": \"$OC1\", \"tcpPort\": 9876 }" >> $CDEF_FILE | ||
18 | +echo "]}" >> $CDEF_FILE | ||
19 | + | ||
11 | ssh $remote " | 20 | ssh $remote " |
12 | sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \ | 21 | sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \ |
13 | $ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml | 22 | $ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml |
14 | - echo \"onos.ip=\$(ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties | 23 | + |
24 | + echo \"onos.ip = \$(ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \ | ||
25 | + >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties | ||
15 | " | 26 | " |
27 | + | ||
28 | +scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/ | ||
... | \ No newline at end of file | ... | \ No newline at end of file | ... | ... |
... | @@ -24,6 +24,7 @@ ssh $remote " | ... | @@ -24,6 +24,7 @@ ssh $remote " |
24 | # Make a link to the log file directory and make a home for auxiliaries | 24 | # Make a link to the log file directory and make a home for auxiliaries |
25 | ln -s $ONOS_INSTALL_DIR/$KARAF_DIST/data/log /opt/onos/log | 25 | ln -s $ONOS_INSTALL_DIR/$KARAF_DIST/data/log /opt/onos/log |
26 | mkdir $ONOS_INSTALL_DIR/var | 26 | mkdir $ONOS_INSTALL_DIR/var |
27 | + mkdir $ONOS_INSTALL_DIR/config | ||
27 | 28 | ||
28 | # Install the upstart configuration file and setup options for debugging | 29 | # Install the upstart configuration file and setup options for debugging |
29 | sudo cp $ONOS_INSTALL_DIR/debian/onos.conf /etc/init/onos.conf | 30 | sudo cp $ONOS_INSTALL_DIR/debian/onos.conf /etc/init/onos.conf | ... | ... |
1 | package org.onlab.util; | 1 | package org.onlab.util; |
2 | 2 | ||
3 | +import java.nio.ByteBuffer; | ||
3 | import java.util.ArrayList; | 4 | import java.util.ArrayList; |
4 | import java.util.List; | 5 | import java.util.List; |
5 | import java.util.concurrent.ConcurrentLinkedQueue; | 6 | import java.util.concurrent.ConcurrentLinkedQueue; |
... | @@ -8,6 +9,8 @@ import org.apache.commons.lang3.tuple.Pair; | ... | @@ -8,6 +9,8 @@ import org.apache.commons.lang3.tuple.Pair; |
8 | 9 | ||
9 | import com.esotericsoftware.kryo.Kryo; | 10 | import com.esotericsoftware.kryo.Kryo; |
10 | import com.esotericsoftware.kryo.Serializer; | 11 | import com.esotericsoftware.kryo.Serializer; |
12 | +import com.esotericsoftware.kryo.io.ByteBufferInput; | ||
13 | +import com.esotericsoftware.kryo.io.ByteBufferOutput; | ||
11 | import com.esotericsoftware.kryo.io.Input; | 14 | import com.esotericsoftware.kryo.io.Input; |
12 | import com.esotericsoftware.kryo.io.Output; | 15 | import com.esotericsoftware.kryo.io.Output; |
13 | import com.google.common.collect.ImmutableList; | 16 | import com.google.common.collect.ImmutableList; |
... | @@ -174,6 +177,22 @@ public final class KryoPool { | ... | @@ -174,6 +177,22 @@ public final class KryoPool { |
174 | } | 177 | } |
175 | 178 | ||
176 | /** | 179 | /** |
180 | + * Serializes given object to byte buffer using Kryo instance in pool. | ||
181 | + * | ||
182 | + * @param obj Object to serialize | ||
183 | + * @param buffer to write to | ||
184 | + */ | ||
185 | + public void serialize(final Object obj, final ByteBuffer buffer) { | ||
186 | + ByteBufferOutput out = new ByteBufferOutput(buffer); | ||
187 | + Kryo kryo = getKryo(); | ||
188 | + try { | ||
189 | + kryo.writeClassAndObject(out, obj); | ||
190 | + } finally { | ||
191 | + putKryo(kryo); | ||
192 | + } | ||
193 | + } | ||
194 | + | ||
195 | + /** | ||
177 | * Deserializes given byte array to Object using Kryo instance in pool. | 196 | * Deserializes given byte array to Object using Kryo instance in pool. |
178 | * | 197 | * |
179 | * @param bytes serialized bytes | 198 | * @param bytes serialized bytes |
... | @@ -192,6 +211,24 @@ public final class KryoPool { | ... | @@ -192,6 +211,24 @@ public final class KryoPool { |
192 | } | 211 | } |
193 | } | 212 | } |
194 | 213 | ||
214 | + /** | ||
215 | + * Deserializes given byte buffer to Object using Kryo instance in pool. | ||
216 | + * | ||
217 | + * @param buffer input with serialized bytes | ||
218 | + * @param <T> deserialized Object type | ||
219 | + * @return deserialized Object | ||
220 | + */ | ||
221 | + public <T> T deserialize(final ByteBuffer buffer) { | ||
222 | + ByteBufferInput in = new ByteBufferInput(buffer); | ||
223 | + Kryo kryo = getKryo(); | ||
224 | + try { | ||
225 | + @SuppressWarnings("unchecked") | ||
226 | + T obj = (T) kryo.readClassAndObject(in); | ||
227 | + return obj; | ||
228 | + } finally { | ||
229 | + putKryo(kryo); | ||
230 | + } | ||
231 | + } | ||
195 | 232 | ||
196 | /** | 233 | /** |
197 | * Creates a Kryo instance with {@link #registeredTypes} pre-registered. | 234 | * Creates a Kryo instance with {@link #registeredTypes} pre-registered. | ... | ... |
... | @@ -54,9 +54,9 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> | ... | @@ -54,9 +54,9 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> |
54 | } | 54 | } |
55 | 55 | ||
56 | /** | 56 | /** |
57 | - * Returns the number of streams in custody of the IO loop. | 57 | + * Returns the number of message stream in custody of the loop. |
58 | * | 58 | * |
59 | - * @return number of message streams using this loop | 59 | + * @return number of message streams |
60 | */ | 60 | */ |
61 | public int streamCount() { | 61 | public int streamCount() { |
62 | return streams.size(); | 62 | return streams.size(); |
... | @@ -93,14 +93,9 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> | ... | @@ -93,14 +93,9 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> |
93 | * | 93 | * |
94 | * @param key selection key holding the pending connect operation. | 94 | * @param key selection key holding the pending connect operation. |
95 | */ | 95 | */ |
96 | - protected void connect(SelectionKey key) { | 96 | + protected void connect(SelectionKey key) throws IOException { |
97 | - try { | ||
98 | SocketChannel ch = (SocketChannel) key.channel(); | 97 | SocketChannel ch = (SocketChannel) key.channel(); |
99 | ch.finishConnect(); | 98 | ch.finishConnect(); |
100 | - } catch (IOException | IllegalStateException e) { | ||
101 | - log.warn("Unable to complete connection", e); | ||
102 | - } | ||
103 | - | ||
104 | if (key.isValid()) { | 99 | if (key.isValid()) { |
105 | key.interestOps(SelectionKey.OP_READ); | 100 | key.interestOps(SelectionKey.OP_READ); |
106 | } | 101 | } |
... | @@ -124,7 +119,11 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> | ... | @@ -124,7 +119,11 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> |
124 | 119 | ||
125 | // If there is a pending connect operation, complete it. | 120 | // If there is a pending connect operation, complete it. |
126 | if (key.isConnectable()) { | 121 | if (key.isConnectable()) { |
122 | + try { | ||
127 | connect(key); | 123 | connect(key); |
124 | + } catch (IOException | IllegalStateException e) { | ||
125 | + log.warn("Unable to complete connection", e); | ||
126 | + } | ||
128 | } | 127 | } |
129 | 128 | ||
130 | // If there is a read operation, slurp as much data as possible. | 129 | // If there is a read operation, slurp as much data as possible. | ... | ... |
... | @@ -10,6 +10,7 @@ import java.nio.channels.ByteChannel; | ... | @@ -10,6 +10,7 @@ import java.nio.channels.ByteChannel; |
10 | import java.nio.channels.SelectionKey; | 10 | import java.nio.channels.SelectionKey; |
11 | import java.util.ArrayList; | 11 | import java.util.ArrayList; |
12 | import java.util.List; | 12 | import java.util.List; |
13 | +import java.util.Objects; | ||
13 | 14 | ||
14 | import static com.google.common.base.Preconditions.checkArgument; | 15 | import static com.google.common.base.Preconditions.checkArgument; |
15 | import static com.google.common.base.Preconditions.checkNotNull; | 16 | import static com.google.common.base.Preconditions.checkNotNull; |
... | @@ -170,7 +171,7 @@ public abstract class MessageStream<M extends Message> { | ... | @@ -170,7 +171,7 @@ public abstract class MessageStream<M extends Message> { |
170 | } | 171 | } |
171 | 172 | ||
172 | /** | 173 | /** |
173 | - * Reads, withouth blocking, a list of messages from the stream. | 174 | + * Reads, without blocking, a list of messages from the stream. |
174 | * The list will be empty if there were not messages pending. | 175 | * The list will be empty if there were not messages pending. |
175 | * | 176 | * |
176 | * @return list of messages or null if backing channel has been closed | 177 | * @return list of messages or null if backing channel has been closed |
... | @@ -262,7 +263,7 @@ public abstract class MessageStream<M extends Message> { | ... | @@ -262,7 +263,7 @@ public abstract class MessageStream<M extends Message> { |
262 | try { | 263 | try { |
263 | channel.write(outbound); | 264 | channel.write(outbound); |
264 | } catch (IOException e) { | 265 | } catch (IOException e) { |
265 | - if (!closed && !e.getMessage().equals("Broken pipe")) { | 266 | + if (!closed && !Objects.equals(e.getMessage(), "Broken pipe")) { |
266 | log.warn("Unable to write data", e); | 267 | log.warn("Unable to write data", e); |
267 | ioError = e; | 268 | ioError = e; |
268 | } | 269 | } | ... | ... |
... | @@ -230,7 +230,7 @@ public class IOLoopTestClient { | ... | @@ -230,7 +230,7 @@ public class IOLoopTestClient { |
230 | } | 230 | } |
231 | 231 | ||
232 | @Override | 232 | @Override |
233 | - protected void connect(SelectionKey key) { | 233 | + protected void connect(SelectionKey key) throws IOException { |
234 | super.connect(key); | 234 | super.connect(key); |
235 | TestMessageStream b = (TestMessageStream) key.attachment(); | 235 | TestMessageStream b = (TestMessageStream) key.attachment(); |
236 | Worker w = ((CustomIOLoop) b.loop()).worker; | 236 | Worker w = ((CustomIOLoop) b.loop()).worker; | ... | ... |
-
Please register or login to post a comment