remove Hazelcast based Device, Link, .. Store implementation bundle
Change-Id: I352ebaed2d51b51201a8f3abc609be7c793cc3be
Showing
13 changed files
with
0 additions
and
2596 deletions
core/store/hz/net/pom.xml
deleted
100644 → 0
1 | -<?xml version="1.0" encoding="UTF-8"?> | ||
2 | -<project xmlns="http://maven.apache.org/POM/4.0.0" | ||
3 | - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" | ||
4 | - xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> | ||
5 | - <modelVersion>4.0.0</modelVersion> | ||
6 | - | ||
7 | - <parent> | ||
8 | - <groupId>org.onlab.onos</groupId> | ||
9 | - <artifactId>onos-core-hz</artifactId> | ||
10 | - <version>1.0.0-SNAPSHOT</version> | ||
11 | - <relativePath>../pom.xml</relativePath> | ||
12 | - </parent> | ||
13 | - | ||
14 | - <artifactId>onos-core-hz-net</artifactId> | ||
15 | - <packaging>bundle</packaging> | ||
16 | - | ||
17 | - <description>ONOS Hazelcast based distributed store subsystems</description> | ||
18 | - | ||
19 | - <dependencies> | ||
20 | - <dependency> | ||
21 | - <groupId>org.onlab.onos</groupId> | ||
22 | - <artifactId>onos-api</artifactId> | ||
23 | - </dependency> | ||
24 | - <dependency> | ||
25 | - <groupId>org.onlab.onos</groupId> | ||
26 | - <artifactId>onos-core-hz-common</artifactId> | ||
27 | - <version>${project.version}</version> | ||
28 | - </dependency> | ||
29 | - <dependency> | ||
30 | - <groupId>org.onlab.onos</groupId> | ||
31 | - <artifactId>onos-core-hz-common</artifactId> | ||
32 | - <classifier>tests</classifier> | ||
33 | - <scope>test</scope> | ||
34 | - <version>${project.version}</version> | ||
35 | - </dependency> | ||
36 | - <dependency> | ||
37 | - <groupId>org.apache.felix</groupId> | ||
38 | - <artifactId>org.apache.felix.scr.annotations</artifactId> | ||
39 | - </dependency> | ||
40 | - <dependency> | ||
41 | - <groupId>com.hazelcast</groupId> | ||
42 | - <artifactId>hazelcast</artifactId> | ||
43 | - </dependency> | ||
44 | - </dependencies> | ||
45 | - | ||
46 | - <build> | ||
47 | - <plugins> | ||
48 | - <plugin> | ||
49 | - <groupId>org.apache.felix</groupId> | ||
50 | - <artifactId>maven-scr-plugin</artifactId> | ||
51 | - </plugin> | ||
52 | - </plugins> | ||
53 | - </build> | ||
54 | - | ||
55 | -</project> |
core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java
deleted
100644 → 0
1 | -package org.onlab.onos.store.device.impl; | ||
2 | - | ||
3 | -import static com.google.common.base.Predicates.notNull; | ||
4 | - | ||
5 | -import com.google.common.base.Optional; | ||
6 | -import com.google.common.cache.LoadingCache; | ||
7 | -import com.google.common.collect.FluentIterable; | ||
8 | -import com.google.common.collect.ImmutableList; | ||
9 | -import com.google.common.collect.ImmutableSet; | ||
10 | -import com.google.common.collect.ImmutableSet.Builder; | ||
11 | -import com.hazelcast.core.IMap; | ||
12 | -import com.hazelcast.core.ISet; | ||
13 | - | ||
14 | -import org.apache.felix.scr.annotations.Activate; | ||
15 | -import org.apache.felix.scr.annotations.Component; | ||
16 | -import org.apache.felix.scr.annotations.Deactivate; | ||
17 | -import org.apache.felix.scr.annotations.Service; | ||
18 | -import org.onlab.onos.net.DefaultDevice; | ||
19 | -import org.onlab.onos.net.DefaultPort; | ||
20 | -import org.onlab.onos.net.Device; | ||
21 | -import org.onlab.onos.net.DeviceId; | ||
22 | -import org.onlab.onos.net.Port; | ||
23 | -import org.onlab.onos.net.PortNumber; | ||
24 | -import org.onlab.onos.net.device.DeviceDescription; | ||
25 | -import org.onlab.onos.net.device.DeviceEvent; | ||
26 | -import org.onlab.onos.net.device.DeviceStore; | ||
27 | -import org.onlab.onos.net.device.DeviceStoreDelegate; | ||
28 | -import org.onlab.onos.net.device.PortDescription; | ||
29 | -import org.onlab.onos.net.provider.ProviderId; | ||
30 | -import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache; | ||
31 | -import org.onlab.onos.store.common.AbstractHazelcastStore; | ||
32 | -import org.onlab.onos.store.common.OptionalCacheLoader; | ||
33 | -import org.slf4j.Logger; | ||
34 | - | ||
35 | -import java.util.ArrayList; | ||
36 | -import java.util.Collections; | ||
37 | -import java.util.HashMap; | ||
38 | -import java.util.HashSet; | ||
39 | -import java.util.Iterator; | ||
40 | -import java.util.List; | ||
41 | -import java.util.Map; | ||
42 | -import java.util.Objects; | ||
43 | -import java.util.Set; | ||
44 | - | ||
45 | -import static com.google.common.base.Preconditions.checkArgument; | ||
46 | -import static com.google.common.cache.CacheBuilder.newBuilder; | ||
47 | -import static org.onlab.onos.net.device.DeviceEvent.Type.*; | ||
48 | -import static org.slf4j.LoggerFactory.getLogger; | ||
49 | - | ||
50 | -//TODO: Add support for multiple provider and annotations | ||
51 | -/** | ||
52 | - * Manages inventory of infrastructure devices using Hazelcast-backed map. | ||
53 | - */ | ||
54 | -@Component(immediate = true) | ||
55 | -@Service | ||
56 | -public class DistributedDeviceStore | ||
57 | - extends AbstractHazelcastStore<DeviceEvent, DeviceStoreDelegate> | ||
58 | - implements DeviceStore { | ||
59 | - | ||
60 | - private final Logger log = getLogger(getClass()); | ||
61 | - | ||
62 | - public static final String DEVICE_NOT_FOUND = "Device with ID %s not found"; | ||
63 | - | ||
64 | - // private IMap<DeviceId, DefaultDevice> cache; | ||
65 | - private IMap<byte[], byte[]> rawDevices; | ||
66 | - private LoadingCache<DeviceId, Optional<DefaultDevice>> devices; | ||
67 | - | ||
68 | - // private ISet<DeviceId> availableDevices; | ||
69 | - private ISet<byte[]> availableDevices; | ||
70 | - | ||
71 | - // TODO DevicePorts is very inefficient consider restructuring. | ||
72 | - // private IMap<DeviceId, Map<PortNumber, Port>> devicePorts; | ||
73 | - private IMap<byte[], byte[]> rawDevicePorts; | ||
74 | - private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts; | ||
75 | - | ||
76 | - private String devicesListener; | ||
77 | - | ||
78 | - private String portsListener; | ||
79 | - | ||
80 | - @Override | ||
81 | - @Activate | ||
82 | - public void activate() { | ||
83 | - super.activate(); | ||
84 | - | ||
85 | - // IMap event handler needs value | ||
86 | - final boolean includeValue = true; | ||
87 | - | ||
88 | - // TODO decide on Map name scheme to avoid collision | ||
89 | - rawDevices = theInstance.getMap("devices"); | ||
90 | - final OptionalCacheLoader<DeviceId, DefaultDevice> deviceLoader | ||
91 | - = new OptionalCacheLoader<>(serializer, rawDevices); | ||
92 | - devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader)); | ||
93 | - // refresh/populate cache based on notification from other instance | ||
94 | - devicesListener = rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue); | ||
95 | - | ||
96 | - // TODO cache availableDevices | ||
97 | - availableDevices = theInstance.getSet("availableDevices"); | ||
98 | - | ||
99 | - rawDevicePorts = theInstance.getMap("devicePorts"); | ||
100 | - final OptionalCacheLoader<DeviceId, Map<PortNumber, Port>> devicePortLoader | ||
101 | - = new OptionalCacheLoader<>(serializer, rawDevicePorts); | ||
102 | - devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader)); | ||
103 | - // refresh/populate cache based on notification from other instance | ||
104 | - portsListener = rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue); | ||
105 | - | ||
106 | - loadDeviceCache(); | ||
107 | - loadDevicePortsCache(); | ||
108 | - | ||
109 | - log.info("Started"); | ||
110 | - } | ||
111 | - | ||
112 | - @Deactivate | ||
113 | - public void deactivate() { | ||
114 | - rawDevicePorts.removeEntryListener(portsListener); | ||
115 | - rawDevices.removeEntryListener(devicesListener); | ||
116 | - log.info("Stopped"); | ||
117 | - } | ||
118 | - | ||
119 | - @Override | ||
120 | - public int getDeviceCount() { | ||
121 | - return devices.asMap().size(); | ||
122 | - } | ||
123 | - | ||
124 | - @Override | ||
125 | - public Iterable<Device> getDevices() { | ||
126 | - // TODO builder v.s. copyOf. Guava semms to be using copyOf? | ||
127 | - Builder<Device> builder = ImmutableSet.builder(); | ||
128 | - for (Optional<DefaultDevice> e : devices.asMap().values()) { | ||
129 | - if (e.isPresent()) { | ||
130 | - builder.add(e.get()); | ||
131 | - } | ||
132 | - } | ||
133 | - return builder.build(); | ||
134 | - } | ||
135 | - | ||
136 | - private void loadDeviceCache() { | ||
137 | - for (byte[] keyBytes : rawDevices.keySet()) { | ||
138 | - final DeviceId id = deserialize(keyBytes); | ||
139 | - devices.refresh(id); | ||
140 | - } | ||
141 | - } | ||
142 | - | ||
143 | - private void loadDevicePortsCache() { | ||
144 | - for (byte[] keyBytes : rawDevicePorts.keySet()) { | ||
145 | - final DeviceId id = deserialize(keyBytes); | ||
146 | - devicePorts.refresh(id); | ||
147 | - } | ||
148 | - } | ||
149 | - | ||
150 | - @Override | ||
151 | - public Device getDevice(DeviceId deviceId) { | ||
152 | - // TODO revisit if ignoring exception is safe. | ||
153 | - return devices.getUnchecked(deviceId).orNull(); | ||
154 | - } | ||
155 | - | ||
156 | - @Override | ||
157 | - public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId, | ||
158 | - DeviceDescription deviceDescription) { | ||
159 | - DefaultDevice device = devices.getUnchecked(deviceId).orNull(); | ||
160 | - if (device == null) { | ||
161 | - return createDevice(providerId, deviceId, deviceDescription); | ||
162 | - } | ||
163 | - return updateDevice(providerId, device, deviceDescription); | ||
164 | - } | ||
165 | - | ||
166 | - // Creates the device and returns the appropriate event if necessary. | ||
167 | - private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId, | ||
168 | - DeviceDescription desc) { | ||
169 | - DefaultDevice device = new DefaultDevice(providerId, deviceId, desc.type(), | ||
170 | - desc.manufacturer(), | ||
171 | - desc.hwVersion(), desc.swVersion(), | ||
172 | - desc.serialNumber()); | ||
173 | - | ||
174 | - synchronized (this) { | ||
175 | - final byte[] deviceIdBytes = serialize(deviceId); | ||
176 | - rawDevices.put(deviceIdBytes, serialize(device)); | ||
177 | - devices.put(deviceId, Optional.of(device)); | ||
178 | - | ||
179 | - availableDevices.add(deviceIdBytes); | ||
180 | - } | ||
181 | - return new DeviceEvent(DEVICE_ADDED, device, null); | ||
182 | - } | ||
183 | - | ||
184 | - // Updates the device and returns the appropriate event if necessary. | ||
185 | - private DeviceEvent updateDevice(ProviderId providerId, DefaultDevice device, | ||
186 | - DeviceDescription desc) { | ||
187 | - // We allow only certain attributes to trigger update | ||
188 | - if (!Objects.equals(device.hwVersion(), desc.hwVersion()) || | ||
189 | - !Objects.equals(device.swVersion(), desc.swVersion())) { | ||
190 | - | ||
191 | - DefaultDevice updated = new DefaultDevice(providerId, device.id(), | ||
192 | - desc.type(), | ||
193 | - desc.manufacturer(), | ||
194 | - desc.hwVersion(), | ||
195 | - desc.swVersion(), | ||
196 | - desc.serialNumber()); | ||
197 | - synchronized (this) { | ||
198 | - final byte[] deviceIdBytes = serialize(device.id()); | ||
199 | - rawDevices.put(deviceIdBytes, serialize(updated)); | ||
200 | - devices.put(device.id(), Optional.of(updated)); | ||
201 | - availableDevices.add(serialize(device.id())); | ||
202 | - } | ||
203 | - return new DeviceEvent(DeviceEvent.Type.DEVICE_UPDATED, updated, null); | ||
204 | - } | ||
205 | - | ||
206 | - // Otherwise merely attempt to change availability | ||
207 | - synchronized (this) { | ||
208 | - boolean added = availableDevices.add(serialize(device.id())); | ||
209 | - return !added ? null : | ||
210 | - new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device, null); | ||
211 | - } | ||
212 | - } | ||
213 | - | ||
214 | - @Override | ||
215 | - public DeviceEvent markOffline(DeviceId deviceId) { | ||
216 | - synchronized (this) { | ||
217 | - Device device = devices.getUnchecked(deviceId).orNull(); | ||
218 | - boolean removed = device != null && availableDevices.remove(serialize(deviceId)); | ||
219 | - return !removed ? null : | ||
220 | - new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device, null); | ||
221 | - } | ||
222 | - } | ||
223 | - | ||
224 | - @Override | ||
225 | - public List<DeviceEvent> updatePorts(ProviderId providerId, DeviceId deviceId, | ||
226 | - List<PortDescription> portDescriptions) { | ||
227 | - List<DeviceEvent> events = new ArrayList<>(); | ||
228 | - synchronized (this) { | ||
229 | - Device device = devices.getUnchecked(deviceId).orNull(); | ||
230 | - checkArgument(device != null, DEVICE_NOT_FOUND, deviceId); | ||
231 | - Map<PortNumber, Port> ports = getPortMap(deviceId); | ||
232 | - | ||
233 | - // Add new ports | ||
234 | - Set<PortNumber> processed = new HashSet<>(); | ||
235 | - for (PortDescription portDescription : portDescriptions) { | ||
236 | - Port port = ports.get(portDescription.portNumber()); | ||
237 | - events.add(port == null ? | ||
238 | - createPort(device, portDescription, ports) : | ||
239 | - updatePort(device, port, portDescription, ports)); | ||
240 | - processed.add(portDescription.portNumber()); | ||
241 | - } | ||
242 | - | ||
243 | - updatePortMap(deviceId, ports); | ||
244 | - | ||
245 | - events.addAll(pruneOldPorts(device, ports, processed)); | ||
246 | - } | ||
247 | - return FluentIterable.from(events).filter(notNull()).toList(); | ||
248 | - } | ||
249 | - | ||
250 | - // Creates a new port based on the port description adds it to the map and | ||
251 | - // Returns corresponding event. | ||
252 | - //@GuardedBy("this") | ||
253 | - private DeviceEvent createPort(Device device, PortDescription portDescription, | ||
254 | - Map<PortNumber, Port> ports) { | ||
255 | - DefaultPort port = new DefaultPort(device, portDescription.portNumber(), | ||
256 | - portDescription.isEnabled()); | ||
257 | - ports.put(port.number(), port); | ||
258 | - updatePortMap(device.id(), ports); | ||
259 | - return new DeviceEvent(PORT_ADDED, device, port); | ||
260 | - } | ||
261 | - | ||
262 | - // Checks if the specified port requires update and if so, it replaces the | ||
263 | - // existing entry in the map and returns corresponding event. | ||
264 | - //@GuardedBy("this") | ||
265 | - private DeviceEvent updatePort(Device device, Port port, | ||
266 | - PortDescription portDescription, | ||
267 | - Map<PortNumber, Port> ports) { | ||
268 | - if (port.isEnabled() != portDescription.isEnabled()) { | ||
269 | - DefaultPort updatedPort = | ||
270 | - new DefaultPort(device, portDescription.portNumber(), | ||
271 | - portDescription.isEnabled()); | ||
272 | - ports.put(port.number(), updatedPort); | ||
273 | - updatePortMap(device.id(), ports); | ||
274 | - return new DeviceEvent(PORT_UPDATED, device, updatedPort); | ||
275 | - } | ||
276 | - return null; | ||
277 | - } | ||
278 | - | ||
279 | - // Prunes the specified list of ports based on which ports are in the | ||
280 | - // processed list and returns list of corresponding events. | ||
281 | - //@GuardedBy("this") | ||
282 | - private List<DeviceEvent> pruneOldPorts(Device device, | ||
283 | - Map<PortNumber, Port> ports, | ||
284 | - Set<PortNumber> processed) { | ||
285 | - List<DeviceEvent> events = new ArrayList<>(); | ||
286 | - Iterator<PortNumber> iterator = ports.keySet().iterator(); | ||
287 | - while (iterator.hasNext()) { | ||
288 | - PortNumber portNumber = iterator.next(); | ||
289 | - if (!processed.contains(portNumber)) { | ||
290 | - events.add(new DeviceEvent(PORT_REMOVED, device, | ||
291 | - ports.get(portNumber))); | ||
292 | - iterator.remove(); | ||
293 | - } | ||
294 | - } | ||
295 | - if (!events.isEmpty()) { | ||
296 | - updatePortMap(device.id(), ports); | ||
297 | - } | ||
298 | - return events; | ||
299 | - } | ||
300 | - | ||
301 | - // Gets the map of ports for the specified device; if one does not already | ||
302 | - // exist, it creates and registers a new one. | ||
303 | - // WARN: returned value is a copy, changes made to the Map | ||
304 | - // needs to be written back using updatePortMap | ||
305 | - //@GuardedBy("this") | ||
306 | - private Map<PortNumber, Port> getPortMap(DeviceId deviceId) { | ||
307 | - Map<PortNumber, Port> ports = devicePorts.getUnchecked(deviceId).orNull(); | ||
308 | - if (ports == null) { | ||
309 | - ports = new HashMap<>(); | ||
310 | - // this probably is waste of time in most cases. | ||
311 | - updatePortMap(deviceId, ports); | ||
312 | - } | ||
313 | - return ports; | ||
314 | - } | ||
315 | - | ||
316 | - //@GuardedBy("this") | ||
317 | - private void updatePortMap(DeviceId deviceId, Map<PortNumber, Port> ports) { | ||
318 | - rawDevicePorts.put(serialize(deviceId), serialize(ports)); | ||
319 | - devicePorts.put(deviceId, Optional.of(ports)); | ||
320 | - } | ||
321 | - | ||
322 | - @Override | ||
323 | - public DeviceEvent updatePortStatus(ProviderId providerId, DeviceId deviceId, | ||
324 | - PortDescription portDescription) { | ||
325 | - synchronized (this) { | ||
326 | - Device device = devices.getUnchecked(deviceId).orNull(); | ||
327 | - checkArgument(device != null, DEVICE_NOT_FOUND, deviceId); | ||
328 | - Map<PortNumber, Port> ports = getPortMap(deviceId); | ||
329 | - Port port = ports.get(portDescription.portNumber()); | ||
330 | - return updatePort(device, port, portDescription, ports); | ||
331 | - } | ||
332 | - } | ||
333 | - | ||
334 | - @Override | ||
335 | - public List<Port> getPorts(DeviceId deviceId) { | ||
336 | - Map<PortNumber, Port> ports = devicePorts.getUnchecked(deviceId).orNull(); | ||
337 | - return ports == null ? Collections.<Port>emptyList() : ImmutableList.copyOf(ports.values()); | ||
338 | - } | ||
339 | - | ||
340 | - @Override | ||
341 | - public Port getPort(DeviceId deviceId, PortNumber portNumber) { | ||
342 | - Map<PortNumber, Port> ports = devicePorts.getUnchecked(deviceId).orNull(); | ||
343 | - return ports == null ? null : ports.get(portNumber); | ||
344 | - } | ||
345 | - | ||
346 | - @Override | ||
347 | - public boolean isAvailable(DeviceId deviceId) { | ||
348 | - return availableDevices.contains(serialize(deviceId)); | ||
349 | - } | ||
350 | - | ||
351 | - @Override | ||
352 | - public DeviceEvent removeDevice(DeviceId deviceId) { | ||
353 | - synchronized (this) { | ||
354 | - byte[] deviceIdBytes = serialize(deviceId); | ||
355 | - | ||
356 | - // TODO conditional remove? | ||
357 | - Device device = deserialize(rawDevices.remove(deviceIdBytes)); | ||
358 | - devices.invalidate(deviceId); | ||
359 | - return device == null ? null : | ||
360 | - new DeviceEvent(DEVICE_REMOVED, device, null); | ||
361 | - } | ||
362 | - } | ||
363 | - | ||
364 | - private class RemoteDeviceEventHandler extends RemoteCacheEventHandler<DeviceId, DefaultDevice> { | ||
365 | - public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) { | ||
366 | - super(cache); | ||
367 | - } | ||
368 | - | ||
369 | - @Override | ||
370 | - protected void onAdd(DeviceId deviceId, DefaultDevice device) { | ||
371 | - notifyDelegate(new DeviceEvent(DEVICE_ADDED, device)); | ||
372 | - } | ||
373 | - | ||
374 | - @Override | ||
375 | - protected void onRemove(DeviceId deviceId, DefaultDevice device) { | ||
376 | - notifyDelegate(new DeviceEvent(DEVICE_REMOVED, device)); | ||
377 | - } | ||
378 | - | ||
379 | - @Override | ||
380 | - protected void onUpdate(DeviceId deviceId, DefaultDevice oldDevice, DefaultDevice device) { | ||
381 | - notifyDelegate(new DeviceEvent(DEVICE_UPDATED, device)); | ||
382 | - } | ||
383 | - } | ||
384 | - | ||
385 | - private class RemotePortEventHandler extends RemoteCacheEventHandler<DeviceId, Map<PortNumber, Port>> { | ||
386 | - public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) { | ||
387 | - super(cache); | ||
388 | - } | ||
389 | - | ||
390 | - @Override | ||
391 | - protected void onAdd(DeviceId deviceId, Map<PortNumber, Port> ports) { | ||
392 | -// notifyDelegate(new DeviceEvent(PORT_ADDED, getDevice(deviceId))); | ||
393 | - } | ||
394 | - | ||
395 | - @Override | ||
396 | - protected void onRemove(DeviceId deviceId, Map<PortNumber, Port> ports) { | ||
397 | -// notifyDelegate(new DeviceEvent(PORT_REMOVED, getDevice(deviceId))); | ||
398 | - } | ||
399 | - | ||
400 | - @Override | ||
401 | - protected void onUpdate(DeviceId deviceId, Map<PortNumber, Port> oldPorts, Map<PortNumber, Port> ports) { | ||
402 | -// notifyDelegate(new DeviceEvent(PORT_UPDATED, getDevice(deviceId))); | ||
403 | - } | ||
404 | - } | ||
405 | - | ||
406 | - | ||
407 | - // TODO cache serialized DeviceID if we suffer from serialization cost | ||
408 | -} |
1 | -package org.onlab.onos.store.device.impl; | ||
2 | - | ||
3 | -import org.apache.felix.scr.annotations.Component; | ||
4 | -import org.apache.felix.scr.annotations.Service; | ||
5 | -import org.onlab.onos.mastership.MastershipTerm; | ||
6 | -import org.onlab.onos.net.DeviceId; | ||
7 | -import org.onlab.onos.net.device.DeviceClockProviderService; | ||
8 | - | ||
9 | -// FIXME: Code clone in onos-core-trivial, onos-core-hz-net | ||
10 | -/** | ||
11 | - * Dummy implementation of {@link DeviceClockProviderService}. | ||
12 | - */ | ||
13 | -@Component(immediate = true) | ||
14 | -@Service | ||
15 | -public class NoOpClockProviderService implements DeviceClockProviderService { | ||
16 | - | ||
17 | - @Override | ||
18 | - public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) { | ||
19 | - } | ||
20 | -} |
core/store/hz/net/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
deleted
100644 → 0
1 | -package org.onlab.onos.store.flow.impl; | ||
2 | - | ||
3 | -import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED; | ||
4 | -import static org.slf4j.LoggerFactory.getLogger; | ||
5 | - | ||
6 | -import java.util.Collection; | ||
7 | -import java.util.Collections; | ||
8 | - | ||
9 | -import org.apache.felix.scr.annotations.Activate; | ||
10 | -import org.apache.felix.scr.annotations.Component; | ||
11 | -import org.apache.felix.scr.annotations.Deactivate; | ||
12 | -import org.apache.felix.scr.annotations.Service; | ||
13 | -import org.onlab.onos.ApplicationId; | ||
14 | -import org.onlab.onos.net.DeviceId; | ||
15 | -import org.onlab.onos.net.flow.DefaultFlowEntry; | ||
16 | -import org.onlab.onos.net.flow.FlowEntry; | ||
17 | -import org.onlab.onos.net.flow.FlowEntry.FlowEntryState; | ||
18 | -import org.onlab.onos.net.flow.FlowRule; | ||
19 | -import org.onlab.onos.net.flow.FlowRuleEvent; | ||
20 | -import org.onlab.onos.net.flow.FlowRuleEvent.Type; | ||
21 | -import org.onlab.onos.net.flow.FlowRuleStore; | ||
22 | -import org.onlab.onos.net.flow.FlowRuleStoreDelegate; | ||
23 | -import org.onlab.onos.store.AbstractStore; | ||
24 | -import org.slf4j.Logger; | ||
25 | - | ||
26 | -import com.google.common.collect.ArrayListMultimap; | ||
27 | -import com.google.common.collect.ImmutableSet; | ||
28 | -import com.google.common.collect.Multimap; | ||
29 | - | ||
30 | -/** | ||
31 | - * Manages inventory of flow rules using trivial in-memory implementation. | ||
32 | - */ | ||
33 | -//FIXME I LIE. I AIN'T DISTRIBUTED | ||
34 | -@Component(immediate = true) | ||
35 | -@Service | ||
36 | -public class DistributedFlowRuleStore | ||
37 | - extends AbstractStore<FlowRuleEvent, FlowRuleStoreDelegate> | ||
38 | - implements FlowRuleStore { | ||
39 | - | ||
40 | - private final Logger log = getLogger(getClass()); | ||
41 | - | ||
42 | - // store entries as a pile of rules, no info about device tables | ||
43 | - private final Multimap<DeviceId, FlowEntry> flowEntries = | ||
44 | - ArrayListMultimap.<DeviceId, FlowEntry>create(); | ||
45 | - | ||
46 | - private final Multimap<Short, FlowRule> flowEntriesById = | ||
47 | - ArrayListMultimap.<Short, FlowRule>create(); | ||
48 | - | ||
49 | - @Activate | ||
50 | - public void activate() { | ||
51 | - log.info("Started"); | ||
52 | - } | ||
53 | - | ||
54 | - @Deactivate | ||
55 | - public void deactivate() { | ||
56 | - log.info("Stopped"); | ||
57 | - } | ||
58 | - | ||
59 | - | ||
60 | - @Override | ||
61 | - public int getFlowRuleCount() { | ||
62 | - return flowEntries.size(); | ||
63 | - } | ||
64 | - | ||
65 | - @Override | ||
66 | - public synchronized FlowEntry getFlowEntry(FlowRule rule) { | ||
67 | - for (FlowEntry f : flowEntries.get(rule.deviceId())) { | ||
68 | - if (f.equals(rule)) { | ||
69 | - return f; | ||
70 | - } | ||
71 | - } | ||
72 | - return null; | ||
73 | - } | ||
74 | - | ||
75 | - @Override | ||
76 | - public synchronized Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) { | ||
77 | - Collection<FlowEntry> rules = flowEntries.get(deviceId); | ||
78 | - if (rules == null) { | ||
79 | - return Collections.emptyList(); | ||
80 | - } | ||
81 | - return ImmutableSet.copyOf(rules); | ||
82 | - } | ||
83 | - | ||
84 | - @Override | ||
85 | - public synchronized Iterable<FlowRule> getFlowRulesByAppId(ApplicationId appId) { | ||
86 | - Collection<FlowRule> rules = flowEntriesById.get(appId.id()); | ||
87 | - if (rules == null) { | ||
88 | - return Collections.emptyList(); | ||
89 | - } | ||
90 | - return ImmutableSet.copyOf(rules); | ||
91 | - } | ||
92 | - | ||
93 | - @Override | ||
94 | - public synchronized void storeFlowRule(FlowRule rule) { | ||
95 | - FlowEntry f = new DefaultFlowEntry(rule); | ||
96 | - DeviceId did = f.deviceId(); | ||
97 | - if (!flowEntries.containsEntry(did, f)) { | ||
98 | - flowEntries.put(did, f); | ||
99 | - flowEntriesById.put(rule.appId(), f); | ||
100 | - } | ||
101 | - } | ||
102 | - | ||
103 | - @Override | ||
104 | - public synchronized void deleteFlowRule(FlowRule rule) { | ||
105 | - FlowEntry entry = getFlowEntry(rule); | ||
106 | - if (entry == null) { | ||
107 | - return; | ||
108 | - } | ||
109 | - entry.setState(FlowEntryState.PENDING_REMOVE); | ||
110 | - } | ||
111 | - | ||
112 | - @Override | ||
113 | - public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowEntry rule) { | ||
114 | - DeviceId did = rule.deviceId(); | ||
115 | - | ||
116 | - // check if this new rule is an update to an existing entry | ||
117 | - FlowEntry stored = getFlowEntry(rule); | ||
118 | - if (stored != null) { | ||
119 | - stored.setBytes(rule.bytes()); | ||
120 | - stored.setLife(rule.life()); | ||
121 | - stored.setPackets(rule.packets()); | ||
122 | - if (stored.state() == FlowEntryState.PENDING_ADD) { | ||
123 | - stored.setState(FlowEntryState.ADDED); | ||
124 | - return new FlowRuleEvent(Type.RULE_ADDED, rule); | ||
125 | - } | ||
126 | - return new FlowRuleEvent(Type.RULE_UPDATED, rule); | ||
127 | - } | ||
128 | - | ||
129 | - flowEntries.put(did, rule); | ||
130 | - return null; | ||
131 | - } | ||
132 | - | ||
133 | - @Override | ||
134 | - public synchronized FlowRuleEvent removeFlowRule(FlowEntry rule) { | ||
135 | - // This is where one could mark a rule as removed and still keep it in the store. | ||
136 | - if (flowEntries.remove(rule.deviceId(), rule)) { | ||
137 | - return new FlowRuleEvent(RULE_REMOVED, rule); | ||
138 | - } else { | ||
139 | - return null; | ||
140 | - } | ||
141 | - } | ||
142 | -} |
core/store/hz/net/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java
deleted
100644 → 0
1 | -package org.onlab.onos.store.host.impl; | ||
2 | - | ||
3 | -import com.google.common.collect.HashMultimap; | ||
4 | -import com.google.common.collect.ImmutableSet; | ||
5 | -import com.google.common.collect.Multimap; | ||
6 | -import com.google.common.collect.Sets; | ||
7 | -import org.apache.felix.scr.annotations.Activate; | ||
8 | -import org.apache.felix.scr.annotations.Component; | ||
9 | -import org.apache.felix.scr.annotations.Deactivate; | ||
10 | -import org.apache.felix.scr.annotations.Service; | ||
11 | -import org.onlab.onos.net.Annotations; | ||
12 | -import org.onlab.onos.net.ConnectPoint; | ||
13 | -import org.onlab.onos.net.DefaultHost; | ||
14 | -import org.onlab.onos.net.DeviceId; | ||
15 | -import org.onlab.onos.net.Host; | ||
16 | -import org.onlab.onos.net.HostId; | ||
17 | -import org.onlab.onos.net.HostLocation; | ||
18 | -import org.onlab.onos.net.host.HostDescription; | ||
19 | -import org.onlab.onos.net.host.HostEvent; | ||
20 | -import org.onlab.onos.net.host.HostStore; | ||
21 | -import org.onlab.onos.net.host.HostStoreDelegate; | ||
22 | -import org.onlab.onos.net.host.PortAddresses; | ||
23 | -import org.onlab.onos.net.provider.ProviderId; | ||
24 | -import org.onlab.onos.store.AbstractStore; | ||
25 | -import org.onlab.packet.IpPrefix; | ||
26 | -import org.onlab.packet.MacAddress; | ||
27 | -import org.onlab.packet.VlanId; | ||
28 | -import org.slf4j.Logger; | ||
29 | - | ||
30 | -import java.util.HashSet; | ||
31 | -import java.util.Map; | ||
32 | -import java.util.Set; | ||
33 | -import java.util.concurrent.ConcurrentHashMap; | ||
34 | - | ||
35 | -import static org.onlab.onos.net.host.HostEvent.Type.*; | ||
36 | -import static org.slf4j.LoggerFactory.getLogger; | ||
37 | - | ||
38 | -/** | ||
39 | - * TEMPORARY: Manages inventory of end-station hosts using distributed | ||
40 | - * structures implementation. | ||
41 | - */ | ||
42 | -//FIXME: I LIE I AM NOT DISTRIBUTED | ||
43 | -@Component(immediate = true) | ||
44 | -@Service | ||
45 | -public class DistributedHostStore | ||
46 | - extends AbstractStore<HostEvent, HostStoreDelegate> | ||
47 | - implements HostStore { | ||
48 | - | ||
49 | - private final Logger log = getLogger(getClass()); | ||
50 | - | ||
51 | - // Host inventory | ||
52 | - private final Map<HostId, StoredHost> hosts = new ConcurrentHashMap<>(2000000, 0.75f, 16); | ||
53 | - | ||
54 | - // Hosts tracked by their location | ||
55 | - private final Multimap<ConnectPoint, Host> locations = HashMultimap.create(); | ||
56 | - | ||
57 | - private final Map<ConnectPoint, PortAddresses> portAddresses = | ||
58 | - new ConcurrentHashMap<>(); | ||
59 | - | ||
60 | - @Activate | ||
61 | - public void activate() { | ||
62 | - log.info("Started"); | ||
63 | - } | ||
64 | - | ||
65 | - @Deactivate | ||
66 | - public void deactivate() { | ||
67 | - log.info("Stopped"); | ||
68 | - } | ||
69 | - | ||
70 | - @Override | ||
71 | - public HostEvent createOrUpdateHost(ProviderId providerId, HostId hostId, | ||
72 | - HostDescription hostDescription) { | ||
73 | - StoredHost host = hosts.get(hostId); | ||
74 | - if (host == null) { | ||
75 | - return createHost(providerId, hostId, hostDescription); | ||
76 | - } | ||
77 | - return updateHost(providerId, host, hostDescription); | ||
78 | - } | ||
79 | - | ||
80 | - // creates a new host and sends HOST_ADDED | ||
81 | - private HostEvent createHost(ProviderId providerId, HostId hostId, | ||
82 | - HostDescription descr) { | ||
83 | - StoredHost newhost = new StoredHost(providerId, hostId, | ||
84 | - descr.hwAddress(), | ||
85 | - descr.vlan(), | ||
86 | - descr.location(), | ||
87 | - ImmutableSet.copyOf(descr.ipAddress())); | ||
88 | - synchronized (this) { | ||
89 | - hosts.put(hostId, newhost); | ||
90 | - locations.put(descr.location(), newhost); | ||
91 | - } | ||
92 | - return new HostEvent(HOST_ADDED, newhost); | ||
93 | - } | ||
94 | - | ||
95 | - // checks for type of update to host, sends appropriate event | ||
96 | - private HostEvent updateHost(ProviderId providerId, StoredHost host, | ||
97 | - HostDescription descr) { | ||
98 | - HostEvent event; | ||
99 | - if (!host.location().equals(descr.location())) { | ||
100 | - host.setLocation(descr.location()); | ||
101 | - return new HostEvent(HOST_MOVED, host); | ||
102 | - } | ||
103 | - | ||
104 | - if (host.ipAddresses().containsAll(descr.ipAddress())) { | ||
105 | - return null; | ||
106 | - } | ||
107 | - | ||
108 | - Set<IpPrefix> addresses = new HashSet<>(host.ipAddresses()); | ||
109 | - addresses.addAll(descr.ipAddress()); | ||
110 | - StoredHost updated = new StoredHost(providerId, host.id(), | ||
111 | - host.mac(), host.vlan(), | ||
112 | - descr.location(), addresses); | ||
113 | - event = new HostEvent(HOST_UPDATED, updated); | ||
114 | - synchronized (this) { | ||
115 | - hosts.put(host.id(), updated); | ||
116 | - locations.remove(host.location(), host); | ||
117 | - locations.put(updated.location(), updated); | ||
118 | - } | ||
119 | - return event; | ||
120 | - } | ||
121 | - | ||
122 | - @Override | ||
123 | - public HostEvent removeHost(HostId hostId) { | ||
124 | - synchronized (this) { | ||
125 | - Host host = hosts.remove(hostId); | ||
126 | - if (host != null) { | ||
127 | - locations.remove((host.location()), host); | ||
128 | - return new HostEvent(HOST_REMOVED, host); | ||
129 | - } | ||
130 | - return null; | ||
131 | - } | ||
132 | - } | ||
133 | - | ||
134 | - @Override | ||
135 | - public int getHostCount() { | ||
136 | - return hosts.size(); | ||
137 | - } | ||
138 | - | ||
139 | - @Override | ||
140 | - public Iterable<Host> getHosts() { | ||
141 | - return ImmutableSet.<Host>copyOf(hosts.values()); | ||
142 | - } | ||
143 | - | ||
144 | - @Override | ||
145 | - public Host getHost(HostId hostId) { | ||
146 | - return hosts.get(hostId); | ||
147 | - } | ||
148 | - | ||
149 | - @Override | ||
150 | - public Set<Host> getHosts(VlanId vlanId) { | ||
151 | - Set<Host> vlanset = new HashSet<>(); | ||
152 | - for (Host h : hosts.values()) { | ||
153 | - if (h.vlan().equals(vlanId)) { | ||
154 | - vlanset.add(h); | ||
155 | - } | ||
156 | - } | ||
157 | - return vlanset; | ||
158 | - } | ||
159 | - | ||
160 | - @Override | ||
161 | - public Set<Host> getHosts(MacAddress mac) { | ||
162 | - Set<Host> macset = new HashSet<>(); | ||
163 | - for (Host h : hosts.values()) { | ||
164 | - if (h.mac().equals(mac)) { | ||
165 | - macset.add(h); | ||
166 | - } | ||
167 | - } | ||
168 | - return macset; | ||
169 | - } | ||
170 | - | ||
171 | - @Override | ||
172 | - public Set<Host> getHosts(IpPrefix ip) { | ||
173 | - Set<Host> ipset = new HashSet<>(); | ||
174 | - for (Host h : hosts.values()) { | ||
175 | - if (h.ipAddresses().contains(ip)) { | ||
176 | - ipset.add(h); | ||
177 | - } | ||
178 | - } | ||
179 | - return ipset; | ||
180 | - } | ||
181 | - | ||
182 | - @Override | ||
183 | - public Set<Host> getConnectedHosts(ConnectPoint connectPoint) { | ||
184 | - return ImmutableSet.copyOf(locations.get(connectPoint)); | ||
185 | - } | ||
186 | - | ||
187 | - @Override | ||
188 | - public Set<Host> getConnectedHosts(DeviceId deviceId) { | ||
189 | - Set<Host> hostset = new HashSet<>(); | ||
190 | - for (ConnectPoint p : locations.keySet()) { | ||
191 | - if (p.deviceId().equals(deviceId)) { | ||
192 | - hostset.addAll(locations.get(p)); | ||
193 | - } | ||
194 | - } | ||
195 | - return hostset; | ||
196 | - } | ||
197 | - | ||
198 | - @Override | ||
199 | - public void updateAddressBindings(PortAddresses addresses) { | ||
200 | - synchronized (portAddresses) { | ||
201 | - PortAddresses existing = portAddresses.get(addresses.connectPoint()); | ||
202 | - if (existing == null) { | ||
203 | - portAddresses.put(addresses.connectPoint(), addresses); | ||
204 | - } else { | ||
205 | - Set<IpPrefix> union = Sets.union(existing.ips(), addresses.ips()) | ||
206 | - .immutableCopy(); | ||
207 | - | ||
208 | - MacAddress newMac = (addresses.mac() == null) ? existing.mac() | ||
209 | - : addresses.mac(); | ||
210 | - | ||
211 | - PortAddresses newAddresses = | ||
212 | - new PortAddresses(addresses.connectPoint(), union, newMac); | ||
213 | - | ||
214 | - portAddresses.put(newAddresses.connectPoint(), newAddresses); | ||
215 | - } | ||
216 | - } | ||
217 | - } | ||
218 | - | ||
219 | - @Override | ||
220 | - public void removeAddressBindings(PortAddresses addresses) { | ||
221 | - synchronized (portAddresses) { | ||
222 | - PortAddresses existing = portAddresses.get(addresses.connectPoint()); | ||
223 | - if (existing != null) { | ||
224 | - Set<IpPrefix> difference = | ||
225 | - Sets.difference(existing.ips(), addresses.ips()).immutableCopy(); | ||
226 | - | ||
227 | - // If they removed the existing mac, set the new mac to null. | ||
228 | - // Otherwise, keep the existing mac. | ||
229 | - MacAddress newMac = existing.mac(); | ||
230 | - if (addresses.mac() != null && addresses.mac().equals(existing.mac())) { | ||
231 | - newMac = null; | ||
232 | - } | ||
233 | - | ||
234 | - PortAddresses newAddresses = | ||
235 | - new PortAddresses(addresses.connectPoint(), difference, newMac); | ||
236 | - | ||
237 | - portAddresses.put(newAddresses.connectPoint(), newAddresses); | ||
238 | - } | ||
239 | - } | ||
240 | - } | ||
241 | - | ||
242 | - @Override | ||
243 | - public void clearAddressBindings(ConnectPoint connectPoint) { | ||
244 | - synchronized (portAddresses) { | ||
245 | - portAddresses.remove(connectPoint); | ||
246 | - } | ||
247 | - } | ||
248 | - | ||
249 | - @Override | ||
250 | - public Set<PortAddresses> getAddressBindings() { | ||
251 | - synchronized (portAddresses) { | ||
252 | - return new HashSet<>(portAddresses.values()); | ||
253 | - } | ||
254 | - } | ||
255 | - | ||
256 | - @Override | ||
257 | - public PortAddresses getAddressBindingsForPort(ConnectPoint connectPoint) { | ||
258 | - PortAddresses addresses; | ||
259 | - | ||
260 | - synchronized (portAddresses) { | ||
261 | - addresses = portAddresses.get(connectPoint); | ||
262 | - } | ||
263 | - | ||
264 | - if (addresses == null) { | ||
265 | - addresses = new PortAddresses(connectPoint, null, null); | ||
266 | - } | ||
267 | - | ||
268 | - return addresses; | ||
269 | - } | ||
270 | - | ||
271 | - // Auxiliary extension to allow location to mutate. | ||
272 | - private class StoredHost extends DefaultHost { | ||
273 | - private HostLocation location; | ||
274 | - | ||
275 | - /** | ||
276 | - * Creates an end-station host using the supplied information. | ||
277 | - * | ||
278 | - * @param providerId provider identity | ||
279 | - * @param id host identifier | ||
280 | - * @param mac host MAC address | ||
281 | - * @param vlan host VLAN identifier | ||
282 | - * @param location host location | ||
283 | - * @param ips host IP addresses | ||
284 | - * @param annotations optional key/value annotations | ||
285 | - */ | ||
286 | - public StoredHost(ProviderId providerId, HostId id, | ||
287 | - MacAddress mac, VlanId vlan, HostLocation location, | ||
288 | - Set<IpPrefix> ips, Annotations... annotations) { | ||
289 | - super(providerId, id, mac, vlan, location, ips, annotations); | ||
290 | - this.location = location; | ||
291 | - } | ||
292 | - | ||
293 | - void setLocation(HostLocation location) { | ||
294 | - this.location = location; | ||
295 | - } | ||
296 | - | ||
297 | - @Override | ||
298 | - public HostLocation location() { | ||
299 | - return location; | ||
300 | - } | ||
301 | - } | ||
302 | -} |
core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java
deleted
100644 → 0
1 | -package org.onlab.onos.store.link.impl; | ||
2 | - | ||
3 | -import static com.google.common.cache.CacheBuilder.newBuilder; | ||
4 | -import static org.onlab.onos.net.Link.Type.DIRECT; | ||
5 | -import static org.onlab.onos.net.Link.Type.INDIRECT; | ||
6 | -import static org.onlab.onos.net.LinkKey.linkKey; | ||
7 | -import static org.onlab.onos.net.link.LinkEvent.Type.LINK_ADDED; | ||
8 | -import static org.onlab.onos.net.link.LinkEvent.Type.LINK_REMOVED; | ||
9 | -import static org.onlab.onos.net.link.LinkEvent.Type.LINK_UPDATED; | ||
10 | -import static org.slf4j.LoggerFactory.getLogger; | ||
11 | - | ||
12 | -import java.util.HashSet; | ||
13 | -import java.util.Set; | ||
14 | - | ||
15 | -import org.apache.felix.scr.annotations.Activate; | ||
16 | -import org.apache.felix.scr.annotations.Component; | ||
17 | -import org.apache.felix.scr.annotations.Deactivate; | ||
18 | -import org.apache.felix.scr.annotations.Service; | ||
19 | -import org.onlab.onos.net.ConnectPoint; | ||
20 | -import org.onlab.onos.net.DefaultLink; | ||
21 | -import org.onlab.onos.net.DeviceId; | ||
22 | -import org.onlab.onos.net.Link; | ||
23 | -import org.onlab.onos.net.LinkKey; | ||
24 | -import org.onlab.onos.net.link.LinkDescription; | ||
25 | -import org.onlab.onos.net.link.LinkEvent; | ||
26 | -import org.onlab.onos.net.link.LinkStore; | ||
27 | -import org.onlab.onos.net.link.LinkStoreDelegate; | ||
28 | -import org.onlab.onos.net.provider.ProviderId; | ||
29 | -import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache; | ||
30 | -import org.onlab.onos.store.common.AbstractHazelcastStore; | ||
31 | -import org.onlab.onos.store.common.OptionalCacheLoader; | ||
32 | -import org.slf4j.Logger; | ||
33 | - | ||
34 | -import com.google.common.base.Optional; | ||
35 | -import com.google.common.cache.LoadingCache; | ||
36 | -import com.google.common.collect.HashMultimap; | ||
37 | -import com.google.common.collect.ImmutableSet; | ||
38 | -import com.google.common.collect.Multimap; | ||
39 | -import com.google.common.collect.ImmutableSet.Builder; | ||
40 | -import com.hazelcast.core.IMap; | ||
41 | - | ||
42 | -//TODO: Add support for multiple provider and annotations | ||
43 | -/** | ||
44 | - * Manages inventory of infrastructure links using Hazelcast-backed map. | ||
45 | - */ | ||
46 | -@Component(immediate = true) | ||
47 | -@Service | ||
48 | -public class DistributedLinkStore | ||
49 | - extends AbstractHazelcastStore<LinkEvent, LinkStoreDelegate> | ||
50 | - implements LinkStore { | ||
51 | - | ||
52 | - private final Logger log = getLogger(getClass()); | ||
53 | - | ||
54 | - // Link inventory | ||
55 | - private IMap<byte[], byte[]> rawLinks; | ||
56 | - private LoadingCache<LinkKey, Optional<DefaultLink>> links; | ||
57 | - | ||
58 | - // TODO synchronize? | ||
59 | - // Egress and ingress link sets | ||
60 | - private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); | ||
61 | - private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); | ||
62 | - | ||
63 | - private String linksListener; | ||
64 | - | ||
65 | - @Override | ||
66 | - @Activate | ||
67 | - public void activate() { | ||
68 | - super.activate(); | ||
69 | - | ||
70 | - boolean includeValue = true; | ||
71 | - | ||
72 | - // TODO decide on Map name scheme to avoid collision | ||
73 | - rawLinks = theInstance.getMap("links"); | ||
74 | - final OptionalCacheLoader<LinkKey, DefaultLink> linkLoader | ||
75 | - = new OptionalCacheLoader<>(serializer, rawLinks); | ||
76 | - links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader)); | ||
77 | - // refresh/populate cache based on notification from other instance | ||
78 | - linksListener = rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue); | ||
79 | - | ||
80 | - loadLinkCache(); | ||
81 | - | ||
82 | - log.info("Started"); | ||
83 | - } | ||
84 | - | ||
85 | - @Deactivate | ||
86 | - public void deactivate() { | ||
87 | - rawLinks.removeEntryListener(linksListener); | ||
88 | - log.info("Stopped"); | ||
89 | - } | ||
90 | - | ||
91 | - private void loadLinkCache() { | ||
92 | - for (byte[] keyBytes : rawLinks.keySet()) { | ||
93 | - final LinkKey id = deserialize(keyBytes); | ||
94 | - links.refresh(id); | ||
95 | - } | ||
96 | - } | ||
97 | - | ||
98 | - @Override | ||
99 | - public int getLinkCount() { | ||
100 | - return links.asMap().size(); | ||
101 | - } | ||
102 | - | ||
103 | - @Override | ||
104 | - public Iterable<Link> getLinks() { | ||
105 | - Builder<Link> builder = ImmutableSet.builder(); | ||
106 | - for (Optional<DefaultLink> e : links.asMap().values()) { | ||
107 | - if (e.isPresent()) { | ||
108 | - builder.add(e.get()); | ||
109 | - } | ||
110 | - } | ||
111 | - return builder.build(); | ||
112 | - } | ||
113 | - | ||
114 | - @Override | ||
115 | - public Set<Link> getDeviceEgressLinks(DeviceId deviceId) { | ||
116 | - return ImmutableSet.copyOf(srcLinks.get(deviceId)); | ||
117 | - } | ||
118 | - | ||
119 | - @Override | ||
120 | - public Set<Link> getDeviceIngressLinks(DeviceId deviceId) { | ||
121 | - return ImmutableSet.copyOf(dstLinks.get(deviceId)); | ||
122 | - } | ||
123 | - | ||
124 | - @Override | ||
125 | - public Link getLink(ConnectPoint src, ConnectPoint dst) { | ||
126 | - return links.getUnchecked(linkKey(src, dst)).orNull(); | ||
127 | - } | ||
128 | - | ||
129 | - @Override | ||
130 | - public Set<Link> getEgressLinks(ConnectPoint src) { | ||
131 | - Set<Link> egress = new HashSet<>(); | ||
132 | - for (Link link : srcLinks.get(src.deviceId())) { | ||
133 | - if (link.src().equals(src)) { | ||
134 | - egress.add(link); | ||
135 | - } | ||
136 | - } | ||
137 | - return egress; | ||
138 | - } | ||
139 | - | ||
140 | - @Override | ||
141 | - public Set<Link> getIngressLinks(ConnectPoint dst) { | ||
142 | - Set<Link> ingress = new HashSet<>(); | ||
143 | - for (Link link : dstLinks.get(dst.deviceId())) { | ||
144 | - if (link.dst().equals(dst)) { | ||
145 | - ingress.add(link); | ||
146 | - } | ||
147 | - } | ||
148 | - return ingress; | ||
149 | - } | ||
150 | - | ||
151 | - @Override | ||
152 | - public LinkEvent createOrUpdateLink(ProviderId providerId, | ||
153 | - LinkDescription linkDescription) { | ||
154 | - LinkKey key = linkKey(linkDescription.src(), linkDescription.dst()); | ||
155 | - Optional<DefaultLink> link = links.getUnchecked(key); | ||
156 | - if (!link.isPresent()) { | ||
157 | - return createLink(providerId, key, linkDescription); | ||
158 | - } | ||
159 | - return updateLink(providerId, link.get(), key, linkDescription); | ||
160 | - } | ||
161 | - | ||
162 | - // Creates and stores the link and returns the appropriate event. | ||
163 | - private LinkEvent createLink(ProviderId providerId, LinkKey key, | ||
164 | - LinkDescription linkDescription) { | ||
165 | - DefaultLink link = new DefaultLink(providerId, key.src(), key.dst(), | ||
166 | - linkDescription.type()); | ||
167 | - synchronized (this) { | ||
168 | - final byte[] keyBytes = serialize(key); | ||
169 | - rawLinks.put(keyBytes, serialize(link)); | ||
170 | - links.asMap().putIfAbsent(key, Optional.of(link)); | ||
171 | - | ||
172 | - addNewLink(link); | ||
173 | - } | ||
174 | - return new LinkEvent(LINK_ADDED, link); | ||
175 | - } | ||
176 | - | ||
177 | - // update Egress and ingress link sets | ||
178 | - private void addNewLink(DefaultLink link) { | ||
179 | - synchronized (this) { | ||
180 | - srcLinks.put(link.src().deviceId(), link); | ||
181 | - dstLinks.put(link.dst().deviceId(), link); | ||
182 | - } | ||
183 | - } | ||
184 | - | ||
185 | - // Updates, if necessary the specified link and returns the appropriate event. | ||
186 | - private LinkEvent updateLink(ProviderId providerId, DefaultLink link, | ||
187 | - LinkKey key, LinkDescription linkDescription) { | ||
188 | - // FIXME confirm Link update condition is OK | ||
189 | - if (link.type() == INDIRECT && linkDescription.type() == DIRECT) { | ||
190 | - synchronized (this) { | ||
191 | - | ||
192 | - DefaultLink updated = | ||
193 | - new DefaultLink(providerId, link.src(), link.dst(), | ||
194 | - linkDescription.type()); | ||
195 | - final byte[] keyBytes = serialize(key); | ||
196 | - rawLinks.put(keyBytes, serialize(updated)); | ||
197 | - links.asMap().replace(key, Optional.of(link), Optional.of(updated)); | ||
198 | - | ||
199 | - replaceLink(link, updated); | ||
200 | - return new LinkEvent(LINK_UPDATED, updated); | ||
201 | - } | ||
202 | - } | ||
203 | - return null; | ||
204 | - } | ||
205 | - | ||
206 | - // update Egress and ingress link sets | ||
207 | - private void replaceLink(DefaultLink link, DefaultLink updated) { | ||
208 | - synchronized (this) { | ||
209 | - srcLinks.remove(link.src().deviceId(), link); | ||
210 | - dstLinks.remove(link.dst().deviceId(), link); | ||
211 | - | ||
212 | - srcLinks.put(link.src().deviceId(), updated); | ||
213 | - dstLinks.put(link.dst().deviceId(), updated); | ||
214 | - } | ||
215 | - } | ||
216 | - | ||
217 | - @Override | ||
218 | - public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) { | ||
219 | - synchronized (this) { | ||
220 | - LinkKey key = linkKey(src, dst); | ||
221 | - byte[] keyBytes = serialize(key); | ||
222 | - Link link = deserialize(rawLinks.remove(keyBytes)); | ||
223 | - links.invalidate(key); | ||
224 | - if (link != null) { | ||
225 | - removeLink(link); | ||
226 | - return new LinkEvent(LINK_REMOVED, link); | ||
227 | - } | ||
228 | - return null; | ||
229 | - } | ||
230 | - } | ||
231 | - | ||
232 | - // update Egress and ingress link sets | ||
233 | - private void removeLink(Link link) { | ||
234 | - synchronized (this) { | ||
235 | - srcLinks.remove(link.src().deviceId(), link); | ||
236 | - dstLinks.remove(link.dst().deviceId(), link); | ||
237 | - } | ||
238 | - } | ||
239 | - | ||
240 | - private class RemoteLinkEventHandler extends RemoteCacheEventHandler<LinkKey, DefaultLink> { | ||
241 | - public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) { | ||
242 | - super(cache); | ||
243 | - } | ||
244 | - | ||
245 | - @Override | ||
246 | - protected void onAdd(LinkKey key, DefaultLink newVal) { | ||
247 | - addNewLink(newVal); | ||
248 | - notifyDelegate(new LinkEvent(LINK_ADDED, newVal)); | ||
249 | - } | ||
250 | - | ||
251 | - @Override | ||
252 | - protected void onUpdate(LinkKey key, DefaultLink oldVal, DefaultLink newVal) { | ||
253 | - replaceLink(oldVal, newVal); | ||
254 | - notifyDelegate(new LinkEvent(LINK_UPDATED, newVal)); | ||
255 | - } | ||
256 | - | ||
257 | - @Override | ||
258 | - protected void onRemove(LinkKey key, DefaultLink val) { | ||
259 | - removeLink(val); | ||
260 | - notifyDelegate(new LinkEvent(LINK_REMOVED, val)); | ||
261 | - } | ||
262 | - } | ||
263 | -} |
core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java
deleted
100644 → 0
1 | -package org.onlab.onos.store.topology.impl; | ||
2 | - | ||
3 | -import com.google.common.collect.ImmutableMap; | ||
4 | -import com.google.common.collect.ImmutableSet; | ||
5 | -import com.google.common.collect.ImmutableSetMultimap; | ||
6 | -import org.onlab.graph.DijkstraGraphSearch; | ||
7 | -import org.onlab.graph.GraphPathSearch; | ||
8 | -import org.onlab.graph.TarjanGraphSearch; | ||
9 | -import org.onlab.onos.net.AbstractModel; | ||
10 | -import org.onlab.onos.net.ConnectPoint; | ||
11 | -import org.onlab.onos.net.DefaultPath; | ||
12 | -import org.onlab.onos.net.DeviceId; | ||
13 | -import org.onlab.onos.net.Link; | ||
14 | -import org.onlab.onos.net.Path; | ||
15 | -import org.onlab.onos.net.provider.ProviderId; | ||
16 | -import org.onlab.onos.net.topology.ClusterId; | ||
17 | -import org.onlab.onos.net.topology.DefaultTopologyCluster; | ||
18 | -import org.onlab.onos.net.topology.DefaultTopologyVertex; | ||
19 | -import org.onlab.onos.net.topology.GraphDescription; | ||
20 | -import org.onlab.onos.net.topology.LinkWeight; | ||
21 | -import org.onlab.onos.net.topology.Topology; | ||
22 | -import org.onlab.onos.net.topology.TopologyCluster; | ||
23 | -import org.onlab.onos.net.topology.TopologyEdge; | ||
24 | -import org.onlab.onos.net.topology.TopologyGraph; | ||
25 | -import org.onlab.onos.net.topology.TopologyVertex; | ||
26 | - | ||
27 | -import java.util.ArrayList; | ||
28 | -import java.util.List; | ||
29 | -import java.util.Map; | ||
30 | -import java.util.Set; | ||
31 | - | ||
32 | -import static com.google.common.base.MoreObjects.toStringHelper; | ||
33 | -import static com.google.common.collect.ImmutableSetMultimap.Builder; | ||
34 | -import static org.onlab.graph.GraphPathSearch.Result; | ||
35 | -import static org.onlab.graph.TarjanGraphSearch.SCCResult; | ||
36 | -import static org.onlab.onos.net.Link.Type.INDIRECT; | ||
37 | - | ||
38 | -/** | ||
39 | - * Default implementation of the topology descriptor. This carries the | ||
40 | - * backing topology data. | ||
41 | - */ | ||
42 | -public class DefaultTopology extends AbstractModel implements Topology { | ||
43 | - | ||
44 | - private static final DijkstraGraphSearch<TopologyVertex, TopologyEdge> DIJKSTRA = | ||
45 | - new DijkstraGraphSearch<>(); | ||
46 | - private static final TarjanGraphSearch<TopologyVertex, TopologyEdge> TARJAN = | ||
47 | - new TarjanGraphSearch<>(); | ||
48 | - | ||
49 | - private static final ProviderId PID = new ProviderId("core", "org.onlab.onos.net"); | ||
50 | - | ||
51 | - private final long time; | ||
52 | - private final TopologyGraph graph; | ||
53 | - | ||
54 | - private final SCCResult<TopologyVertex, TopologyEdge> clusterResults; | ||
55 | - private final ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> results; | ||
56 | - private final ImmutableSetMultimap<PathKey, Path> paths; | ||
57 | - | ||
58 | - private final ImmutableMap<ClusterId, TopologyCluster> clusters; | ||
59 | - private final ImmutableSet<ConnectPoint> infrastructurePoints; | ||
60 | - private final ImmutableSetMultimap<ClusterId, ConnectPoint> broadcastSets; | ||
61 | - | ||
62 | - private ImmutableMap<DeviceId, TopologyCluster> clustersByDevice; | ||
63 | - private ImmutableSetMultimap<TopologyCluster, DeviceId> devicesByCluster; | ||
64 | - private ImmutableSetMultimap<TopologyCluster, Link> linksByCluster; | ||
65 | - | ||
66 | - | ||
67 | - /** | ||
68 | - * Creates a topology descriptor attributed to the specified provider. | ||
69 | - * | ||
70 | - * @param providerId identity of the provider | ||
71 | - * @param description data describing the new topology | ||
72 | - */ | ||
73 | - DefaultTopology(ProviderId providerId, GraphDescription description) { | ||
74 | - super(providerId); | ||
75 | - this.time = description.timestamp(); | ||
76 | - | ||
77 | - // Build the graph | ||
78 | - this.graph = new DefaultTopologyGraph(description.vertexes(), | ||
79 | - description.edges()); | ||
80 | - | ||
81 | - this.results = searchForShortestPaths(); | ||
82 | - this.paths = buildPaths(); | ||
83 | - | ||
84 | - this.clusterResults = searchForClusters(); | ||
85 | - this.clusters = buildTopologyClusters(); | ||
86 | - | ||
87 | - buildIndexes(); | ||
88 | - | ||
89 | - this.broadcastSets = buildBroadcastSets(); | ||
90 | - this.infrastructurePoints = findInfrastructurePoints(); | ||
91 | - } | ||
92 | - | ||
93 | - @Override | ||
94 | - public long time() { | ||
95 | - return time; | ||
96 | - } | ||
97 | - | ||
98 | - @Override | ||
99 | - public int clusterCount() { | ||
100 | - return clusters.size(); | ||
101 | - } | ||
102 | - | ||
103 | - @Override | ||
104 | - public int deviceCount() { | ||
105 | - return graph.getVertexes().size(); | ||
106 | - } | ||
107 | - | ||
108 | - @Override | ||
109 | - public int linkCount() { | ||
110 | - return graph.getEdges().size(); | ||
111 | - } | ||
112 | - | ||
113 | - @Override | ||
114 | - public int pathCount() { | ||
115 | - return paths.size(); | ||
116 | - } | ||
117 | - | ||
118 | - /** | ||
119 | - * Returns the backing topology graph. | ||
120 | - * | ||
121 | - * @return topology graph | ||
122 | - */ | ||
123 | - TopologyGraph getGraph() { | ||
124 | - return graph; | ||
125 | - } | ||
126 | - | ||
127 | - /** | ||
128 | - * Returns the set of topology clusters. | ||
129 | - * | ||
130 | - * @return set of clusters | ||
131 | - */ | ||
132 | - Set<TopologyCluster> getClusters() { | ||
133 | - return ImmutableSet.copyOf(clusters.values()); | ||
134 | - } | ||
135 | - | ||
136 | - /** | ||
137 | - * Returns the specified topology cluster. | ||
138 | - * | ||
139 | - * @param clusterId cluster identifier | ||
140 | - * @return topology cluster | ||
141 | - */ | ||
142 | - TopologyCluster getCluster(ClusterId clusterId) { | ||
143 | - return clusters.get(clusterId); | ||
144 | - } | ||
145 | - | ||
146 | - /** | ||
147 | - * Returns the topology cluster that contains the given device. | ||
148 | - * | ||
149 | - * @param deviceId device identifier | ||
150 | - * @return topology cluster | ||
151 | - */ | ||
152 | - TopologyCluster getCluster(DeviceId deviceId) { | ||
153 | - return clustersByDevice.get(deviceId); | ||
154 | - } | ||
155 | - | ||
156 | - /** | ||
157 | - * Returns the set of cluster devices. | ||
158 | - * | ||
159 | - * @param cluster topology cluster | ||
160 | - * @return cluster devices | ||
161 | - */ | ||
162 | - Set<DeviceId> getClusterDevices(TopologyCluster cluster) { | ||
163 | - return devicesByCluster.get(cluster); | ||
164 | - } | ||
165 | - | ||
166 | - /** | ||
167 | - * Returns the set of cluster links. | ||
168 | - * | ||
169 | - * @param cluster topology cluster | ||
170 | - * @return cluster links | ||
171 | - */ | ||
172 | - Set<Link> getClusterLinks(TopologyCluster cluster) { | ||
173 | - return linksByCluster.get(cluster); | ||
174 | - } | ||
175 | - | ||
176 | - /** | ||
177 | - * Indicates whether the given point is an infrastructure link end-point. | ||
178 | - * | ||
179 | - * @param connectPoint connection point | ||
180 | - * @return true if infrastructure | ||
181 | - */ | ||
182 | - boolean isInfrastructure(ConnectPoint connectPoint) { | ||
183 | - return infrastructurePoints.contains(connectPoint); | ||
184 | - } | ||
185 | - | ||
186 | - /** | ||
187 | - * Indicates whether the given point is part of a broadcast set. | ||
188 | - * | ||
189 | - * @param connectPoint connection point | ||
190 | - * @return true if in broadcast set | ||
191 | - */ | ||
192 | - boolean isBroadcastPoint(ConnectPoint connectPoint) { | ||
193 | - // Any non-infrastructure, i.e. edge points are assumed to be OK. | ||
194 | - if (!isInfrastructure(connectPoint)) { | ||
195 | - return true; | ||
196 | - } | ||
197 | - | ||
198 | - // Find the cluster to which the device belongs. | ||
199 | - TopologyCluster cluster = clustersByDevice.get(connectPoint.deviceId()); | ||
200 | - if (cluster == null) { | ||
201 | - throw new IllegalArgumentException("No cluster found for device " + connectPoint.deviceId()); | ||
202 | - } | ||
203 | - | ||
204 | - // If the broadcast set is null or empty, or if the point explicitly | ||
205 | - // belongs to it, return true; | ||
206 | - Set<ConnectPoint> points = broadcastSets.get(cluster.id()); | ||
207 | - return points == null || points.isEmpty() || points.contains(connectPoint); | ||
208 | - } | ||
209 | - | ||
210 | - /** | ||
211 | - * Returns the size of the cluster broadcast set. | ||
212 | - * | ||
213 | - * @param clusterId cluster identifier | ||
214 | - * @return size of the cluster broadcast set | ||
215 | - */ | ||
216 | - int broadcastSetSize(ClusterId clusterId) { | ||
217 | - return broadcastSets.get(clusterId).size(); | ||
218 | - } | ||
219 | - | ||
220 | - /** | ||
221 | - * Returns the set of pre-computed shortest paths between source and | ||
222 | - * destination devices. | ||
223 | - * | ||
224 | - * @param src source device | ||
225 | - * @param dst destination device | ||
226 | - * @return set of shortest paths | ||
227 | - */ | ||
228 | - Set<Path> getPaths(DeviceId src, DeviceId dst) { | ||
229 | - return paths.get(new PathKey(src, dst)); | ||
230 | - } | ||
231 | - | ||
232 | - /** | ||
233 | - * Computes on-demand the set of shortest paths between source and | ||
234 | - * destination devices. | ||
235 | - * | ||
236 | - * @param src source device | ||
237 | - * @param dst destination device | ||
238 | - * @return set of shortest paths | ||
239 | - */ | ||
240 | - Set<Path> getPaths(DeviceId src, DeviceId dst, LinkWeight weight) { | ||
241 | - GraphPathSearch.Result<TopologyVertex, TopologyEdge> result = | ||
242 | - DIJKSTRA.search(graph, new DefaultTopologyVertex(src), | ||
243 | - new DefaultTopologyVertex(dst), weight); | ||
244 | - ImmutableSet.Builder<Path> builder = ImmutableSet.builder(); | ||
245 | - for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) { | ||
246 | - builder.add(networkPath(path)); | ||
247 | - } | ||
248 | - return builder.build(); | ||
249 | - } | ||
250 | - | ||
251 | - | ||
252 | - // Searches the graph for all shortest paths and returns the search results. | ||
253 | - private ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> searchForShortestPaths() { | ||
254 | - ImmutableMap.Builder<DeviceId, Result<TopologyVertex, TopologyEdge>> builder = ImmutableMap.builder(); | ||
255 | - | ||
256 | - // Search graph paths for each source to all destinations. | ||
257 | - LinkWeight weight = new HopCountLinkWeight(graph.getVertexes().size()); | ||
258 | - for (TopologyVertex src : graph.getVertexes()) { | ||
259 | - builder.put(src.deviceId(), DIJKSTRA.search(graph, src, null, weight)); | ||
260 | - } | ||
261 | - return builder.build(); | ||
262 | - } | ||
263 | - | ||
264 | - // Builds network paths from the graph path search results | ||
265 | - private ImmutableSetMultimap<PathKey, Path> buildPaths() { | ||
266 | - Builder<PathKey, Path> builder = ImmutableSetMultimap.builder(); | ||
267 | - for (DeviceId deviceId : results.keySet()) { | ||
268 | - Result<TopologyVertex, TopologyEdge> result = results.get(deviceId); | ||
269 | - for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) { | ||
270 | - builder.put(new PathKey(path.src().deviceId(), path.dst().deviceId()), | ||
271 | - networkPath(path)); | ||
272 | - } | ||
273 | - } | ||
274 | - return builder.build(); | ||
275 | - } | ||
276 | - | ||
277 | - // Converts graph path to a network path with the same cost. | ||
278 | - private Path networkPath(org.onlab.graph.Path<TopologyVertex, TopologyEdge> path) { | ||
279 | - List<Link> links = new ArrayList<>(); | ||
280 | - for (TopologyEdge edge : path.edges()) { | ||
281 | - links.add(edge.link()); | ||
282 | - } | ||
283 | - return new DefaultPath(PID, links, path.cost()); | ||
284 | - } | ||
285 | - | ||
286 | - | ||
287 | - // Searches for SCC clusters in the network topology graph using Tarjan | ||
288 | - // algorithm. | ||
289 | - private SCCResult<TopologyVertex, TopologyEdge> searchForClusters() { | ||
290 | - return TARJAN.search(graph, new NoIndirectLinksWeight()); | ||
291 | - } | ||
292 | - | ||
293 | - // Builds the topology clusters and returns the id-cluster bindings. | ||
294 | - private ImmutableMap<ClusterId, TopologyCluster> buildTopologyClusters() { | ||
295 | - ImmutableMap.Builder<ClusterId, TopologyCluster> clusterBuilder = ImmutableMap.builder(); | ||
296 | - SCCResult<TopologyVertex, TopologyEdge> result = | ||
297 | - TARJAN.search(graph, new NoIndirectLinksWeight()); | ||
298 | - | ||
299 | - // Extract both vertexes and edges from the results; the lists form | ||
300 | - // pairs along the same index. | ||
301 | - List<Set<TopologyVertex>> clusterVertexes = result.clusterVertexes(); | ||
302 | - List<Set<TopologyEdge>> clusterEdges = result.clusterEdges(); | ||
303 | - | ||
304 | - // Scan over the lists and create a cluster from the results. | ||
305 | - for (int i = 0, n = result.clusterCount(); i < n; i++) { | ||
306 | - Set<TopologyVertex> vertexSet = clusterVertexes.get(i); | ||
307 | - Set<TopologyEdge> edgeSet = clusterEdges.get(i); | ||
308 | - | ||
309 | - ClusterId cid = ClusterId.clusterId(i); | ||
310 | - DefaultTopologyCluster cluster = | ||
311 | - new DefaultTopologyCluster(cid, vertexSet.size(), edgeSet.size(), | ||
312 | - findRoot(vertexSet).deviceId()); | ||
313 | - clusterBuilder.put(cid, cluster); | ||
314 | - } | ||
315 | - return clusterBuilder.build(); | ||
316 | - } | ||
317 | - | ||
318 | - // Finds the vertex whose device id is the lexicographical minimum in the | ||
319 | - // specified set. | ||
320 | - private TopologyVertex findRoot(Set<TopologyVertex> vertexSet) { | ||
321 | - TopologyVertex minVertex = null; | ||
322 | - for (TopologyVertex vertex : vertexSet) { | ||
323 | - if (minVertex == null || | ||
324 | - minVertex.deviceId().toString() | ||
325 | - .compareTo(minVertex.deviceId().toString()) < 0) { | ||
326 | - minVertex = vertex; | ||
327 | - } | ||
328 | - } | ||
329 | - return minVertex; | ||
330 | - } | ||
331 | - | ||
332 | - // Processes a map of broadcast sets for each cluster. | ||
333 | - private ImmutableSetMultimap<ClusterId, ConnectPoint> buildBroadcastSets() { | ||
334 | - Builder<ClusterId, ConnectPoint> builder = ImmutableSetMultimap.builder(); | ||
335 | - for (TopologyCluster cluster : clusters.values()) { | ||
336 | - addClusterBroadcastSet(cluster, builder); | ||
337 | - } | ||
338 | - return builder.build(); | ||
339 | - } | ||
340 | - | ||
341 | - // Finds all broadcast points for the cluster. These are those connection | ||
342 | - // points which lie along the shortest paths between the cluster root and | ||
343 | - // all other devices within the cluster. | ||
344 | - private void addClusterBroadcastSet(TopologyCluster cluster, | ||
345 | - Builder<ClusterId, ConnectPoint> builder) { | ||
346 | - // Use the graph root search results to build the broadcast set. | ||
347 | - Result<TopologyVertex, TopologyEdge> result = results.get(cluster.root()); | ||
348 | - for (Map.Entry<TopologyVertex, Set<TopologyEdge>> entry : result.parents().entrySet()) { | ||
349 | - TopologyVertex vertex = entry.getKey(); | ||
350 | - | ||
351 | - // Ignore any parents that lead outside the cluster. | ||
352 | - if (clustersByDevice.get(vertex.deviceId()) != cluster) { | ||
353 | - continue; | ||
354 | - } | ||
355 | - | ||
356 | - // Ignore any back-link sets that are empty. | ||
357 | - Set<TopologyEdge> parents = entry.getValue(); | ||
358 | - if (parents.isEmpty()) { | ||
359 | - continue; | ||
360 | - } | ||
361 | - | ||
362 | - // Use the first back-link source and destinations to add to the | ||
363 | - // broadcast set. | ||
364 | - Link link = parents.iterator().next().link(); | ||
365 | - builder.put(cluster.id(), link.src()); | ||
366 | - builder.put(cluster.id(), link.dst()); | ||
367 | - } | ||
368 | - } | ||
369 | - | ||
370 | - // Collects and returns an set of all infrastructure link end-points. | ||
371 | - private ImmutableSet<ConnectPoint> findInfrastructurePoints() { | ||
372 | - ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder(); | ||
373 | - for (TopologyEdge edge : graph.getEdges()) { | ||
374 | - builder.add(edge.link().src()); | ||
375 | - builder.add(edge.link().dst()); | ||
376 | - } | ||
377 | - return builder.build(); | ||
378 | - } | ||
379 | - | ||
380 | - // Builds cluster-devices, cluster-links and device-cluster indexes. | ||
381 | - private void buildIndexes() { | ||
382 | - // Prepare the index builders | ||
383 | - ImmutableMap.Builder<DeviceId, TopologyCluster> clusterBuilder = ImmutableMap.builder(); | ||
384 | - ImmutableSetMultimap.Builder<TopologyCluster, DeviceId> devicesBuilder = ImmutableSetMultimap.builder(); | ||
385 | - ImmutableSetMultimap.Builder<TopologyCluster, Link> linksBuilder = ImmutableSetMultimap.builder(); | ||
386 | - | ||
387 | - // Now scan through all the clusters | ||
388 | - for (TopologyCluster cluster : clusters.values()) { | ||
389 | - int i = cluster.id().index(); | ||
390 | - | ||
391 | - // Scan through all the cluster vertexes. | ||
392 | - for (TopologyVertex vertex : clusterResults.clusterVertexes().get(i)) { | ||
393 | - devicesBuilder.put(cluster, vertex.deviceId()); | ||
394 | - clusterBuilder.put(vertex.deviceId(), cluster); | ||
395 | - } | ||
396 | - | ||
397 | - // Scan through all the cluster edges. | ||
398 | - for (TopologyEdge edge : clusterResults.clusterEdges().get(i)) { | ||
399 | - linksBuilder.put(cluster, edge.link()); | ||
400 | - } | ||
401 | - } | ||
402 | - | ||
403 | - // Finalize all indexes. | ||
404 | - clustersByDevice = clusterBuilder.build(); | ||
405 | - devicesByCluster = devicesBuilder.build(); | ||
406 | - linksByCluster = linksBuilder.build(); | ||
407 | - } | ||
408 | - | ||
409 | - // Link weight for measuring link cost as hop count with indirect links | ||
410 | - // being as expensive as traversing the entire graph to assume the worst. | ||
411 | - private static class HopCountLinkWeight implements LinkWeight { | ||
412 | - private final int indirectLinkCost; | ||
413 | - | ||
414 | - HopCountLinkWeight(int indirectLinkCost) { | ||
415 | - this.indirectLinkCost = indirectLinkCost; | ||
416 | - } | ||
417 | - | ||
418 | - @Override | ||
419 | - public double weight(TopologyEdge edge) { | ||
420 | - // To force preference to use direct paths first, make indirect | ||
421 | - // links as expensive as the linear vertex traversal. | ||
422 | - return edge.link().type() == INDIRECT ? indirectLinkCost : 1; | ||
423 | - } | ||
424 | - } | ||
425 | - | ||
426 | - // Link weight for preventing traversal over indirect links. | ||
427 | - private static class NoIndirectLinksWeight implements LinkWeight { | ||
428 | - @Override | ||
429 | - public double weight(TopologyEdge edge) { | ||
430 | - return edge.link().type() == INDIRECT ? -1 : 1; | ||
431 | - } | ||
432 | - } | ||
433 | - | ||
434 | - @Override | ||
435 | - public String toString() { | ||
436 | - return toStringHelper(this) | ||
437 | - .add("time", time) | ||
438 | - .add("clusters", clusterCount()) | ||
439 | - .add("devices", deviceCount()) | ||
440 | - .add("links", linkCount()) | ||
441 | - .add("pathCount", pathCount()) | ||
442 | - .toString(); | ||
443 | - } | ||
444 | -} |
core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java
deleted
100644 → 0
1 | -package org.onlab.onos.store.topology.impl; | ||
2 | - | ||
3 | -import org.onlab.graph.AdjacencyListsGraph; | ||
4 | -import org.onlab.onos.net.topology.TopologyEdge; | ||
5 | -import org.onlab.onos.net.topology.TopologyGraph; | ||
6 | -import org.onlab.onos.net.topology.TopologyVertex; | ||
7 | - | ||
8 | -import java.util.Set; | ||
9 | - | ||
10 | -/** | ||
11 | - * Default implementation of an immutable topology graph based on a generic | ||
12 | - * implementation of adjacency lists graph. | ||
13 | - */ | ||
14 | -public class DefaultTopologyGraph | ||
15 | - extends AdjacencyListsGraph<TopologyVertex, TopologyEdge> | ||
16 | - implements TopologyGraph { | ||
17 | - | ||
18 | - /** | ||
19 | - * Creates a topology graph comprising of the specified vertexes and edges. | ||
20 | - * | ||
21 | - * @param vertexes set of graph vertexes | ||
22 | - * @param edges set of graph edges | ||
23 | - */ | ||
24 | - public DefaultTopologyGraph(Set<TopologyVertex> vertexes, Set<TopologyEdge> edges) { | ||
25 | - super(vertexes, edges); | ||
26 | - } | ||
27 | - | ||
28 | -} |
1 | -package org.onlab.onos.store.topology.impl; | ||
2 | - | ||
3 | -import static org.slf4j.LoggerFactory.getLogger; | ||
4 | - | ||
5 | -import java.util.List; | ||
6 | -import java.util.Set; | ||
7 | - | ||
8 | -import org.apache.felix.scr.annotations.Activate; | ||
9 | -import org.apache.felix.scr.annotations.Component; | ||
10 | -import org.apache.felix.scr.annotations.Deactivate; | ||
11 | -import org.apache.felix.scr.annotations.Service; | ||
12 | -import org.onlab.onos.event.Event; | ||
13 | -import org.onlab.onos.net.ConnectPoint; | ||
14 | -import org.onlab.onos.net.DeviceId; | ||
15 | -import org.onlab.onos.net.Link; | ||
16 | -import org.onlab.onos.net.Path; | ||
17 | -import org.onlab.onos.net.provider.ProviderId; | ||
18 | -import org.onlab.onos.net.topology.ClusterId; | ||
19 | -import org.onlab.onos.net.topology.GraphDescription; | ||
20 | -import org.onlab.onos.net.topology.LinkWeight; | ||
21 | -import org.onlab.onos.net.topology.Topology; | ||
22 | -import org.onlab.onos.net.topology.TopologyCluster; | ||
23 | -import org.onlab.onos.net.topology.TopologyEvent; | ||
24 | -import org.onlab.onos.net.topology.TopologyGraph; | ||
25 | -import org.onlab.onos.net.topology.TopologyStore; | ||
26 | -import org.onlab.onos.net.topology.TopologyStoreDelegate; | ||
27 | -import org.onlab.onos.store.AbstractStore; | ||
28 | -import org.slf4j.Logger; | ||
29 | - | ||
30 | -/** | ||
31 | - * TEMPORARY: Manages inventory of topology snapshots using distributed | ||
32 | - * structures implementation. | ||
33 | - */ | ||
34 | -//FIXME: I LIE I AM NOT DISTRIBUTED | ||
35 | -@Component(immediate = true) | ||
36 | -@Service | ||
37 | -public class DistributedTopologyStore | ||
38 | -extends AbstractStore<TopologyEvent, TopologyStoreDelegate> | ||
39 | -implements TopologyStore { | ||
40 | - | ||
41 | - private final Logger log = getLogger(getClass()); | ||
42 | - | ||
43 | - private volatile DefaultTopology current; | ||
44 | - | ||
45 | - @Activate | ||
46 | - public void activate() { | ||
47 | - log.info("Started"); | ||
48 | - } | ||
49 | - | ||
50 | - @Deactivate | ||
51 | - public void deactivate() { | ||
52 | - log.info("Stopped"); | ||
53 | - } | ||
54 | - @Override | ||
55 | - public Topology currentTopology() { | ||
56 | - return current; | ||
57 | - } | ||
58 | - | ||
59 | - @Override | ||
60 | - public boolean isLatest(Topology topology) { | ||
61 | - // Topology is current only if it is the same as our current topology | ||
62 | - return topology == current; | ||
63 | - } | ||
64 | - | ||
65 | - @Override | ||
66 | - public TopologyGraph getGraph(Topology topology) { | ||
67 | - return defaultTopology(topology).getGraph(); | ||
68 | - } | ||
69 | - | ||
70 | - @Override | ||
71 | - public Set<TopologyCluster> getClusters(Topology topology) { | ||
72 | - return defaultTopology(topology).getClusters(); | ||
73 | - } | ||
74 | - | ||
75 | - @Override | ||
76 | - public TopologyCluster getCluster(Topology topology, ClusterId clusterId) { | ||
77 | - return defaultTopology(topology).getCluster(clusterId); | ||
78 | - } | ||
79 | - | ||
80 | - @Override | ||
81 | - public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) { | ||
82 | - return defaultTopology(topology).getClusterDevices(cluster); | ||
83 | - } | ||
84 | - | ||
85 | - @Override | ||
86 | - public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) { | ||
87 | - return defaultTopology(topology).getClusterLinks(cluster); | ||
88 | - } | ||
89 | - | ||
90 | - @Override | ||
91 | - public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) { | ||
92 | - return defaultTopology(topology).getPaths(src, dst); | ||
93 | - } | ||
94 | - | ||
95 | - @Override | ||
96 | - public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst, | ||
97 | - LinkWeight weight) { | ||
98 | - return defaultTopology(topology).getPaths(src, dst, weight); | ||
99 | - } | ||
100 | - | ||
101 | - @Override | ||
102 | - public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) { | ||
103 | - return defaultTopology(topology).isInfrastructure(connectPoint); | ||
104 | - } | ||
105 | - | ||
106 | - @Override | ||
107 | - public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) { | ||
108 | - return defaultTopology(topology).isBroadcastPoint(connectPoint); | ||
109 | - } | ||
110 | - | ||
111 | - @Override | ||
112 | - public TopologyEvent updateTopology(ProviderId providerId, | ||
113 | - GraphDescription graphDescription, | ||
114 | - List<Event> reasons) { | ||
115 | - // First off, make sure that what we're given is indeed newer than | ||
116 | - // what we already have. | ||
117 | - if (current != null && graphDescription.timestamp() < current.time()) { | ||
118 | - return null; | ||
119 | - } | ||
120 | - | ||
121 | - // Have the default topology construct self from the description data. | ||
122 | - DefaultTopology newTopology = | ||
123 | - new DefaultTopology(providerId, graphDescription); | ||
124 | - | ||
125 | - // Promote the new topology to current and return a ready-to-send event. | ||
126 | - synchronized (this) { | ||
127 | - current = newTopology; | ||
128 | - return new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED, | ||
129 | - current, reasons); | ||
130 | - } | ||
131 | - } | ||
132 | - | ||
133 | - // Validates the specified topology and returns it as a default | ||
134 | - private DefaultTopology defaultTopology(Topology topology) { | ||
135 | - if (topology instanceof DefaultTopology) { | ||
136 | - return (DefaultTopology) topology; | ||
137 | - } | ||
138 | - throw new IllegalArgumentException("Topology class " + topology.getClass() + | ||
139 | - " not supported"); | ||
140 | - } | ||
141 | - | ||
142 | -} |
1 | -package org.onlab.onos.store.topology.impl; | ||
2 | - | ||
3 | -import org.onlab.onos.net.DeviceId; | ||
4 | - | ||
5 | -import java.util.Objects; | ||
6 | - | ||
7 | -/** | ||
8 | - * Key for filing pre-computed paths between source and destination devices. | ||
9 | - */ | ||
10 | -class PathKey { | ||
11 | - private final DeviceId src; | ||
12 | - private final DeviceId dst; | ||
13 | - | ||
14 | - /** | ||
15 | - * Creates a path key from the given source/dest pair. | ||
16 | - * @param src source device | ||
17 | - * @param dst destination device | ||
18 | - */ | ||
19 | - PathKey(DeviceId src, DeviceId dst) { | ||
20 | - this.src = src; | ||
21 | - this.dst = dst; | ||
22 | - } | ||
23 | - | ||
24 | - @Override | ||
25 | - public int hashCode() { | ||
26 | - return Objects.hash(src, dst); | ||
27 | - } | ||
28 | - | ||
29 | - @Override | ||
30 | - public boolean equals(Object obj) { | ||
31 | - if (this == obj) { | ||
32 | - return true; | ||
33 | - } | ||
34 | - if (obj instanceof PathKey) { | ||
35 | - final PathKey other = (PathKey) obj; | ||
36 | - return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst); | ||
37 | - } | ||
38 | - return false; | ||
39 | - } | ||
40 | -} |
1 | -/** | ||
2 | - * | ||
3 | - */ | ||
4 | -package org.onlab.onos.store.device.impl; | ||
5 | - | ||
6 | -import static org.junit.Assert.*; | ||
7 | -import static org.onlab.onos.net.Device.Type.SWITCH; | ||
8 | -import static org.onlab.onos.net.DeviceId.deviceId; | ||
9 | -import static org.onlab.onos.net.device.DeviceEvent.Type.*; | ||
10 | - | ||
11 | -import java.util.Arrays; | ||
12 | -import java.util.HashMap; | ||
13 | -import java.util.List; | ||
14 | -import java.util.Map; | ||
15 | -import java.util.Set; | ||
16 | -import java.util.concurrent.CountDownLatch; | ||
17 | -import java.util.concurrent.TimeUnit; | ||
18 | - | ||
19 | -import org.junit.After; | ||
20 | -import org.junit.AfterClass; | ||
21 | -import org.junit.Before; | ||
22 | -import org.junit.BeforeClass; | ||
23 | -import org.junit.Ignore; | ||
24 | -import org.junit.Test; | ||
25 | -import org.onlab.onos.net.Device; | ||
26 | -import org.onlab.onos.net.DeviceId; | ||
27 | -import org.onlab.onos.net.Port; | ||
28 | -import org.onlab.onos.net.PortNumber; | ||
29 | -import org.onlab.onos.net.device.DefaultDeviceDescription; | ||
30 | -import org.onlab.onos.net.device.DefaultPortDescription; | ||
31 | -import org.onlab.onos.net.device.DeviceDescription; | ||
32 | -import org.onlab.onos.net.device.DeviceEvent; | ||
33 | -import org.onlab.onos.net.device.DeviceStoreDelegate; | ||
34 | -import org.onlab.onos.net.device.PortDescription; | ||
35 | -import org.onlab.onos.net.provider.ProviderId; | ||
36 | -import org.onlab.onos.store.common.StoreManager; | ||
37 | -import org.onlab.onos.store.common.StoreService; | ||
38 | -import org.onlab.onos.store.common.TestStoreManager; | ||
39 | -import com.google.common.collect.Iterables; | ||
40 | -import com.google.common.collect.Sets; | ||
41 | -import com.hazelcast.config.Config; | ||
42 | -import com.hazelcast.core.Hazelcast; | ||
43 | - | ||
44 | -/** | ||
45 | - * Test of the Hazelcast based distributed DeviceStore implementation. | ||
46 | - */ | ||
47 | -public class DistributedDeviceStoreTest { | ||
48 | - | ||
49 | - private static final ProviderId PID = new ProviderId("of", "foo"); | ||
50 | - private static final DeviceId DID1 = deviceId("of:foo"); | ||
51 | - private static final DeviceId DID2 = deviceId("of:bar"); | ||
52 | - private static final String MFR = "whitebox"; | ||
53 | - private static final String HW = "1.1.x"; | ||
54 | - private static final String SW1 = "3.8.1"; | ||
55 | - private static final String SW2 = "3.9.5"; | ||
56 | - private static final String SN = "43311-12345"; | ||
57 | - | ||
58 | - private static final PortNumber P1 = PortNumber.portNumber(1); | ||
59 | - private static final PortNumber P2 = PortNumber.portNumber(2); | ||
60 | - private static final PortNumber P3 = PortNumber.portNumber(3); | ||
61 | - | ||
62 | - private DistributedDeviceStore deviceStore; | ||
63 | - | ||
64 | - private StoreManager storeManager; | ||
65 | - | ||
66 | - | ||
67 | - @BeforeClass | ||
68 | - public static void setUpBeforeClass() throws Exception { | ||
69 | - } | ||
70 | - | ||
71 | - @AfterClass | ||
72 | - public static void tearDownAfterClass() throws Exception { | ||
73 | - } | ||
74 | - | ||
75 | - | ||
76 | - @Before | ||
77 | - public void setUp() throws Exception { | ||
78 | - // TODO should find a way to clean Hazelcast instance without shutdown. | ||
79 | - Config config = TestStoreManager.getTestConfig(); | ||
80 | - | ||
81 | - storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); | ||
82 | - storeManager.activate(); | ||
83 | - | ||
84 | - deviceStore = new TestDistributedDeviceStore(storeManager); | ||
85 | - deviceStore.activate(); | ||
86 | - } | ||
87 | - | ||
88 | - @After | ||
89 | - public void tearDown() throws Exception { | ||
90 | - deviceStore.deactivate(); | ||
91 | - | ||
92 | - storeManager.deactivate(); | ||
93 | - } | ||
94 | - | ||
95 | - private void putDevice(DeviceId deviceId, String swVersion) { | ||
96 | - DeviceDescription description = | ||
97 | - new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR, | ||
98 | - HW, swVersion, SN); | ||
99 | - deviceStore.createOrUpdateDevice(PID, deviceId, description); | ||
100 | - } | ||
101 | - | ||
102 | - private static void assertDevice(DeviceId id, String swVersion, Device device) { | ||
103 | - assertNotNull(device); | ||
104 | - assertEquals(id, device.id()); | ||
105 | - assertEquals(MFR, device.manufacturer()); | ||
106 | - assertEquals(HW, device.hwVersion()); | ||
107 | - assertEquals(swVersion, device.swVersion()); | ||
108 | - assertEquals(SN, device.serialNumber()); | ||
109 | - } | ||
110 | - | ||
111 | - @Test | ||
112 | - public final void testGetDeviceCount() { | ||
113 | - assertEquals("initialy empty", 0, deviceStore.getDeviceCount()); | ||
114 | - | ||
115 | - putDevice(DID1, SW1); | ||
116 | - putDevice(DID2, SW2); | ||
117 | - putDevice(DID1, SW1); | ||
118 | - | ||
119 | - assertEquals("expect 2 uniq devices", 2, deviceStore.getDeviceCount()); | ||
120 | - } | ||
121 | - | ||
122 | - @Test | ||
123 | - public final void testGetDevices() { | ||
124 | - assertEquals("initialy empty", 0, Iterables.size(deviceStore.getDevices())); | ||
125 | - | ||
126 | - putDevice(DID1, SW1); | ||
127 | - putDevice(DID2, SW2); | ||
128 | - putDevice(DID1, SW1); | ||
129 | - | ||
130 | - assertEquals("expect 2 uniq devices", | ||
131 | - 2, Iterables.size(deviceStore.getDevices())); | ||
132 | - | ||
133 | - Map<DeviceId, Device> devices = new HashMap<>(); | ||
134 | - for (Device device : deviceStore.getDevices()) { | ||
135 | - devices.put(device.id(), device); | ||
136 | - } | ||
137 | - | ||
138 | - assertDevice(DID1, SW1, devices.get(DID1)); | ||
139 | - assertDevice(DID2, SW2, devices.get(DID2)); | ||
140 | - | ||
141 | - // add case for new node? | ||
142 | - } | ||
143 | - | ||
144 | - @Test | ||
145 | - public final void testGetDevice() { | ||
146 | - | ||
147 | - putDevice(DID1, SW1); | ||
148 | - | ||
149 | - assertDevice(DID1, SW1, deviceStore.getDevice(DID1)); | ||
150 | - assertNull("DID2 shouldn't be there", deviceStore.getDevice(DID2)); | ||
151 | - } | ||
152 | - | ||
153 | - @Test | ||
154 | - public final void testCreateOrUpdateDevice() { | ||
155 | - DeviceDescription description = | ||
156 | - new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR, | ||
157 | - HW, SW1, SN); | ||
158 | - DeviceEvent event = deviceStore.createOrUpdateDevice(PID, DID1, description); | ||
159 | - assertEquals(DEVICE_ADDED, event.type()); | ||
160 | - assertDevice(DID1, SW1, event.subject()); | ||
161 | - | ||
162 | - DeviceDescription description2 = | ||
163 | - new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR, | ||
164 | - HW, SW2, SN); | ||
165 | - DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2); | ||
166 | - assertEquals(DEVICE_UPDATED, event2.type()); | ||
167 | - assertDevice(DID1, SW2, event2.subject()); | ||
168 | - | ||
169 | - assertNull("No change expected", deviceStore.createOrUpdateDevice(PID, DID1, description2)); | ||
170 | - } | ||
171 | - | ||
172 | - @Test | ||
173 | - public final void testMarkOffline() { | ||
174 | - | ||
175 | - putDevice(DID1, SW1); | ||
176 | - assertTrue(deviceStore.isAvailable(DID1)); | ||
177 | - | ||
178 | - DeviceEvent event = deviceStore.markOffline(DID1); | ||
179 | - assertEquals(DEVICE_AVAILABILITY_CHANGED, event.type()); | ||
180 | - assertDevice(DID1, SW1, event.subject()); | ||
181 | - assertFalse(deviceStore.isAvailable(DID1)); | ||
182 | - | ||
183 | - DeviceEvent event2 = deviceStore.markOffline(DID1); | ||
184 | - assertNull("No change, no event", event2); | ||
185 | -} | ||
186 | - | ||
187 | - @Test | ||
188 | - public final void testUpdatePorts() { | ||
189 | - putDevice(DID1, SW1); | ||
190 | - List<PortDescription> pds = Arrays.<PortDescription>asList( | ||
191 | - new DefaultPortDescription(P1, true), | ||
192 | - new DefaultPortDescription(P2, true) | ||
193 | - ); | ||
194 | - | ||
195 | - List<DeviceEvent> events = deviceStore.updatePorts(PID, DID1, pds); | ||
196 | - | ||
197 | - Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2); | ||
198 | - for (DeviceEvent event : events) { | ||
199 | - assertEquals(PORT_ADDED, event.type()); | ||
200 | - assertDevice(DID1, SW1, event.subject()); | ||
201 | - assertTrue("PortNumber is one of expected", | ||
202 | - expectedPorts.remove(event.port().number())); | ||
203 | - assertTrue("Port is enabled", event.port().isEnabled()); | ||
204 | - } | ||
205 | - assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty()); | ||
206 | - | ||
207 | - | ||
208 | - List<PortDescription> pds2 = Arrays.<PortDescription>asList( | ||
209 | - new DefaultPortDescription(P1, false), | ||
210 | - new DefaultPortDescription(P2, true), | ||
211 | - new DefaultPortDescription(P3, true) | ||
212 | - ); | ||
213 | - | ||
214 | - events = deviceStore.updatePorts(PID, DID1, pds2); | ||
215 | - assertFalse("event should be triggered", events.isEmpty()); | ||
216 | - for (DeviceEvent event : events) { | ||
217 | - PortNumber num = event.port().number(); | ||
218 | - if (P1.equals(num)) { | ||
219 | - assertEquals(PORT_UPDATED, event.type()); | ||
220 | - assertDevice(DID1, SW1, event.subject()); | ||
221 | - assertFalse("Port is disabled", event.port().isEnabled()); | ||
222 | - } else if (P2.equals(num)) { | ||
223 | - fail("P2 event not expected."); | ||
224 | - } else if (P3.equals(num)) { | ||
225 | - assertEquals(PORT_ADDED, event.type()); | ||
226 | - assertDevice(DID1, SW1, event.subject()); | ||
227 | - assertTrue("Port is enabled", event.port().isEnabled()); | ||
228 | - } else { | ||
229 | - fail("Unknown port number encountered: " + num); | ||
230 | - } | ||
231 | - } | ||
232 | - | ||
233 | - List<PortDescription> pds3 = Arrays.<PortDescription>asList( | ||
234 | - new DefaultPortDescription(P1, false), | ||
235 | - new DefaultPortDescription(P2, true) | ||
236 | - ); | ||
237 | - events = deviceStore.updatePorts(PID, DID1, pds3); | ||
238 | - assertFalse("event should be triggered", events.isEmpty()); | ||
239 | - for (DeviceEvent event : events) { | ||
240 | - PortNumber num = event.port().number(); | ||
241 | - if (P1.equals(num)) { | ||
242 | - fail("P1 event not expected."); | ||
243 | - } else if (P2.equals(num)) { | ||
244 | - fail("P2 event not expected."); | ||
245 | - } else if (P3.equals(num)) { | ||
246 | - assertEquals(PORT_REMOVED, event.type()); | ||
247 | - assertDevice(DID1, SW1, event.subject()); | ||
248 | - assertTrue("Port was enabled", event.port().isEnabled()); | ||
249 | - } else { | ||
250 | - fail("Unknown port number encountered: " + num); | ||
251 | - } | ||
252 | - } | ||
253 | - | ||
254 | - } | ||
255 | - | ||
256 | - @Test | ||
257 | - public final void testUpdatePortStatus() { | ||
258 | - putDevice(DID1, SW1); | ||
259 | - List<PortDescription> pds = Arrays.<PortDescription>asList( | ||
260 | - new DefaultPortDescription(P1, true) | ||
261 | - ); | ||
262 | - deviceStore.updatePorts(PID, DID1, pds); | ||
263 | - | ||
264 | - DeviceEvent event = deviceStore.updatePortStatus(PID, DID1, | ||
265 | - new DefaultPortDescription(P1, false)); | ||
266 | - assertEquals(PORT_UPDATED, event.type()); | ||
267 | - assertDevice(DID1, SW1, event.subject()); | ||
268 | - assertEquals(P1, event.port().number()); | ||
269 | - assertFalse("Port is disabled", event.port().isEnabled()); | ||
270 | - } | ||
271 | - | ||
272 | - @Test | ||
273 | - public final void testGetPorts() { | ||
274 | - putDevice(DID1, SW1); | ||
275 | - putDevice(DID2, SW1); | ||
276 | - List<PortDescription> pds = Arrays.<PortDescription>asList( | ||
277 | - new DefaultPortDescription(P1, true), | ||
278 | - new DefaultPortDescription(P2, true) | ||
279 | - ); | ||
280 | - deviceStore.updatePorts(PID, DID1, pds); | ||
281 | - | ||
282 | - Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2); | ||
283 | - List<Port> ports = deviceStore.getPorts(DID1); | ||
284 | - for (Port port : ports) { | ||
285 | - assertTrue("Port is enabled", port.isEnabled()); | ||
286 | - assertTrue("PortNumber is one of expected", | ||
287 | - expectedPorts.remove(port.number())); | ||
288 | - } | ||
289 | - assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty()); | ||
290 | - | ||
291 | - | ||
292 | - assertTrue("DID2 has no ports", deviceStore.getPorts(DID2).isEmpty()); | ||
293 | - } | ||
294 | - | ||
295 | - @Test | ||
296 | - public final void testGetPort() { | ||
297 | - putDevice(DID1, SW1); | ||
298 | - putDevice(DID2, SW1); | ||
299 | - List<PortDescription> pds = Arrays.<PortDescription>asList( | ||
300 | - new DefaultPortDescription(P1, true), | ||
301 | - new DefaultPortDescription(P2, false) | ||
302 | - ); | ||
303 | - deviceStore.updatePorts(PID, DID1, pds); | ||
304 | - | ||
305 | - Port port1 = deviceStore.getPort(DID1, P1); | ||
306 | - assertEquals(P1, port1.number()); | ||
307 | - assertTrue("Port is enabled", port1.isEnabled()); | ||
308 | - | ||
309 | - Port port2 = deviceStore.getPort(DID1, P2); | ||
310 | - assertEquals(P2, port2.number()); | ||
311 | - assertFalse("Port is disabled", port2.isEnabled()); | ||
312 | - | ||
313 | - Port port3 = deviceStore.getPort(DID1, P3); | ||
314 | - assertNull("P3 not expected", port3); | ||
315 | - } | ||
316 | - | ||
317 | - @Test | ||
318 | - public final void testRemoveDevice() { | ||
319 | - putDevice(DID1, SW1); | ||
320 | - putDevice(DID2, SW1); | ||
321 | - | ||
322 | - assertEquals(2, deviceStore.getDeviceCount()); | ||
323 | - | ||
324 | - DeviceEvent event = deviceStore.removeDevice(DID1); | ||
325 | - assertEquals(DEVICE_REMOVED, event.type()); | ||
326 | - assertDevice(DID1, SW1, event.subject()); | ||
327 | - | ||
328 | - assertEquals(1, deviceStore.getDeviceCount()); | ||
329 | - } | ||
330 | - | ||
331 | - // TODO add test for Port events when we have them | ||
332 | - @Ignore("Ignore until Delegate spec. is clear.") | ||
333 | - @Test | ||
334 | - public final void testEvents() throws InterruptedException { | ||
335 | - final CountDownLatch addLatch = new CountDownLatch(1); | ||
336 | - DeviceStoreDelegate checkAdd = new DeviceStoreDelegate() { | ||
337 | - @Override | ||
338 | - public void notify(DeviceEvent event) { | ||
339 | - assertEquals(DEVICE_ADDED, event.type()); | ||
340 | - assertDevice(DID1, SW1, event.subject()); | ||
341 | - addLatch.countDown(); | ||
342 | - } | ||
343 | - }; | ||
344 | - final CountDownLatch updateLatch = new CountDownLatch(1); | ||
345 | - DeviceStoreDelegate checkUpdate = new DeviceStoreDelegate() { | ||
346 | - @Override | ||
347 | - public void notify(DeviceEvent event) { | ||
348 | - assertEquals(DEVICE_UPDATED, event.type()); | ||
349 | - assertDevice(DID1, SW2, event.subject()); | ||
350 | - updateLatch.countDown(); | ||
351 | - } | ||
352 | - }; | ||
353 | - final CountDownLatch removeLatch = new CountDownLatch(1); | ||
354 | - DeviceStoreDelegate checkRemove = new DeviceStoreDelegate() { | ||
355 | - @Override | ||
356 | - public void notify(DeviceEvent event) { | ||
357 | - assertEquals(DEVICE_REMOVED, event.type()); | ||
358 | - assertDevice(DID1, SW2, event.subject()); | ||
359 | - removeLatch.countDown(); | ||
360 | - } | ||
361 | - }; | ||
362 | - | ||
363 | - DeviceDescription description = | ||
364 | - new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR, | ||
365 | - HW, SW1, SN); | ||
366 | - deviceStore.setDelegate(checkAdd); | ||
367 | - deviceStore.createOrUpdateDevice(PID, DID1, description); | ||
368 | - assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS)); | ||
369 | - | ||
370 | - | ||
371 | - DeviceDescription description2 = | ||
372 | - new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR, | ||
373 | - HW, SW2, SN); | ||
374 | - deviceStore.unsetDelegate(checkAdd); | ||
375 | - deviceStore.setDelegate(checkUpdate); | ||
376 | - deviceStore.createOrUpdateDevice(PID, DID1, description2); | ||
377 | - assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS)); | ||
378 | - | ||
379 | - deviceStore.unsetDelegate(checkUpdate); | ||
380 | - deviceStore.setDelegate(checkRemove); | ||
381 | - deviceStore.removeDevice(DID1); | ||
382 | - assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS)); | ||
383 | - } | ||
384 | - | ||
385 | - private class TestDistributedDeviceStore extends DistributedDeviceStore { | ||
386 | - public TestDistributedDeviceStore(StoreService storeService) { | ||
387 | - this.storeService = storeService; | ||
388 | - } | ||
389 | - } | ||
390 | -} |
core/store/hz/net/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
deleted
100644 → 0
1 | -package org.onlab.onos.store.link.impl; | ||
2 | - | ||
3 | -import static org.junit.Assert.*; | ||
4 | -import static org.onlab.onos.net.DeviceId.deviceId; | ||
5 | -import static org.onlab.onos.net.Link.Type.*; | ||
6 | -import static org.onlab.onos.net.LinkKey.linkKey; | ||
7 | -import static org.onlab.onos.net.link.LinkEvent.Type.*; | ||
8 | - | ||
9 | -import java.util.HashMap; | ||
10 | -import java.util.Map; | ||
11 | -import java.util.Set; | ||
12 | -import java.util.concurrent.CountDownLatch; | ||
13 | -import java.util.concurrent.TimeUnit; | ||
14 | - | ||
15 | -import org.junit.After; | ||
16 | -import org.junit.AfterClass; | ||
17 | -import org.junit.Before; | ||
18 | -import org.junit.BeforeClass; | ||
19 | -import org.junit.Ignore; | ||
20 | -import org.junit.Test; | ||
21 | -import org.onlab.onos.net.ConnectPoint; | ||
22 | -import org.onlab.onos.net.DeviceId; | ||
23 | -import org.onlab.onos.net.Link; | ||
24 | -import org.onlab.onos.net.LinkKey; | ||
25 | -import org.onlab.onos.net.PortNumber; | ||
26 | -import org.onlab.onos.net.Link.Type; | ||
27 | -import org.onlab.onos.net.link.DefaultLinkDescription; | ||
28 | -import org.onlab.onos.net.link.LinkEvent; | ||
29 | -import org.onlab.onos.net.link.LinkStoreDelegate; | ||
30 | -import org.onlab.onos.net.provider.ProviderId; | ||
31 | -import org.onlab.onos.store.common.StoreManager; | ||
32 | -import org.onlab.onos.store.common.StoreService; | ||
33 | -import org.onlab.onos.store.common.TestStoreManager; | ||
34 | -import com.google.common.collect.Iterables; | ||
35 | -import com.hazelcast.config.Config; | ||
36 | -import com.hazelcast.core.Hazelcast; | ||
37 | - | ||
38 | -/** | ||
39 | - * Test of the Hazelcast based distributed LinkStore implementation. | ||
40 | - */ | ||
41 | -public class DistributedLinkStoreTest { | ||
42 | - | ||
43 | - private static final ProviderId PID = new ProviderId("of", "foo"); | ||
44 | - private static final DeviceId DID1 = deviceId("of:foo"); | ||
45 | - private static final DeviceId DID2 = deviceId("of:bar"); | ||
46 | - | ||
47 | - private static final PortNumber P1 = PortNumber.portNumber(1); | ||
48 | - private static final PortNumber P2 = PortNumber.portNumber(2); | ||
49 | - private static final PortNumber P3 = PortNumber.portNumber(3); | ||
50 | - | ||
51 | - private StoreManager storeManager; | ||
52 | - | ||
53 | - private DistributedLinkStore linkStore; | ||
54 | - | ||
55 | - @BeforeClass | ||
56 | - public static void setUpBeforeClass() throws Exception { | ||
57 | - } | ||
58 | - | ||
59 | - @AfterClass | ||
60 | - public static void tearDownAfterClass() throws Exception { | ||
61 | - } | ||
62 | - | ||
63 | - @Before | ||
64 | - public void setUp() throws Exception { | ||
65 | - // TODO should find a way to clean Hazelcast instance without shutdown. | ||
66 | - Config config = TestStoreManager.getTestConfig(); | ||
67 | - | ||
68 | - storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); | ||
69 | - storeManager.activate(); | ||
70 | - | ||
71 | - linkStore = new TestDistributedLinkStore(storeManager); | ||
72 | - linkStore.activate(); | ||
73 | - } | ||
74 | - | ||
75 | - @After | ||
76 | - public void tearDown() throws Exception { | ||
77 | - linkStore.deactivate(); | ||
78 | - storeManager.deactivate(); | ||
79 | - } | ||
80 | - | ||
81 | - private void putLink(DeviceId srcId, PortNumber srcNum, | ||
82 | - DeviceId dstId, PortNumber dstNum, Type type) { | ||
83 | - ConnectPoint src = new ConnectPoint(srcId, srcNum); | ||
84 | - ConnectPoint dst = new ConnectPoint(dstId, dstNum); | ||
85 | - linkStore.createOrUpdateLink(PID, new DefaultLinkDescription(src, dst, type)); | ||
86 | - } | ||
87 | - | ||
88 | - private void putLink(LinkKey key, Type type) { | ||
89 | - putLink(key.src().deviceId(), key.src().port(), | ||
90 | - key.dst().deviceId(), key.dst().port(), | ||
91 | - type); | ||
92 | - } | ||
93 | - | ||
94 | - private static void assertLink(DeviceId srcId, PortNumber srcNum, | ||
95 | - DeviceId dstId, PortNumber dstNum, Type type, | ||
96 | - Link link) { | ||
97 | - assertEquals(srcId, link.src().deviceId()); | ||
98 | - assertEquals(srcNum, link.src().port()); | ||
99 | - assertEquals(dstId, link.dst().deviceId()); | ||
100 | - assertEquals(dstNum, link.dst().port()); | ||
101 | - assertEquals(type, link.type()); | ||
102 | - } | ||
103 | - | ||
104 | - private static void assertLink(LinkKey key, Type type, Link link) { | ||
105 | - assertLink(key.src().deviceId(), key.src().port(), | ||
106 | - key.dst().deviceId(), key.dst().port(), | ||
107 | - type, link); | ||
108 | - } | ||
109 | - | ||
110 | - @Test | ||
111 | - public final void testGetLinkCount() { | ||
112 | - assertEquals("initialy empty", 0, linkStore.getLinkCount()); | ||
113 | - | ||
114 | - putLink(DID1, P1, DID2, P2, DIRECT); | ||
115 | - putLink(DID2, P2, DID1, P1, DIRECT); | ||
116 | - putLink(DID1, P1, DID2, P2, DIRECT); | ||
117 | - | ||
118 | - assertEquals("expecting 2 unique link", 2, linkStore.getLinkCount()); | ||
119 | - } | ||
120 | - | ||
121 | - @Test | ||
122 | - public final void testGetLinks() { | ||
123 | - assertEquals("initialy empty", 0, | ||
124 | - Iterables.size(linkStore.getLinks())); | ||
125 | - | ||
126 | - LinkKey linkId1 = linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2)); | ||
127 | - LinkKey linkId2 = linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1)); | ||
128 | - | ||
129 | - putLink(linkId1, DIRECT); | ||
130 | - putLink(linkId2, DIRECT); | ||
131 | - putLink(linkId1, DIRECT); | ||
132 | - | ||
133 | - assertEquals("expecting 2 unique link", 2, | ||
134 | - Iterables.size(linkStore.getLinks())); | ||
135 | - | ||
136 | - Map<LinkKey, Link> links = new HashMap<>(); | ||
137 | - for (Link link : linkStore.getLinks()) { | ||
138 | - links.put(linkKey(link), link); | ||
139 | - } | ||
140 | - | ||
141 | - assertLink(linkId1, DIRECT, links.get(linkId1)); | ||
142 | - assertLink(linkId2, DIRECT, links.get(linkId2)); | ||
143 | - } | ||
144 | - | ||
145 | - @Test | ||
146 | - public final void testGetDeviceEgressLinks() { | ||
147 | - LinkKey linkId1 = linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2)); | ||
148 | - LinkKey linkId2 = linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1)); | ||
149 | - LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3)); | ||
150 | - | ||
151 | - putLink(linkId1, DIRECT); | ||
152 | - putLink(linkId2, DIRECT); | ||
153 | - putLink(linkId3, DIRECT); | ||
154 | - | ||
155 | - // DID1,P1 => DID2,P2 | ||
156 | - // DID2,P2 => DID1,P1 | ||
157 | - // DID1,P2 => DID2,P3 | ||
158 | - | ||
159 | - Set<Link> links1 = linkStore.getDeviceEgressLinks(DID1); | ||
160 | - assertEquals(2, links1.size()); | ||
161 | - // check | ||
162 | - | ||
163 | - Set<Link> links2 = linkStore.getDeviceEgressLinks(DID2); | ||
164 | - assertEquals(1, links2.size()); | ||
165 | - assertLink(linkId2, DIRECT, links2.iterator().next()); | ||
166 | - } | ||
167 | - | ||
168 | - @Test | ||
169 | - public final void testGetDeviceIngressLinks() { | ||
170 | - LinkKey linkId1 = linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2)); | ||
171 | - LinkKey linkId2 = linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1)); | ||
172 | - LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3)); | ||
173 | - | ||
174 | - putLink(linkId1, DIRECT); | ||
175 | - putLink(linkId2, DIRECT); | ||
176 | - putLink(linkId3, DIRECT); | ||
177 | - | ||
178 | - // DID1,P1 => DID2,P2 | ||
179 | - // DID2,P2 => DID1,P1 | ||
180 | - // DID1,P2 => DID2,P3 | ||
181 | - | ||
182 | - Set<Link> links1 = linkStore.getDeviceIngressLinks(DID2); | ||
183 | - assertEquals(2, links1.size()); | ||
184 | - // check | ||
185 | - | ||
186 | - Set<Link> links2 = linkStore.getDeviceIngressLinks(DID1); | ||
187 | - assertEquals(1, links2.size()); | ||
188 | - assertLink(linkId2, DIRECT, links2.iterator().next()); | ||
189 | - } | ||
190 | - | ||
191 | - @Test | ||
192 | - public final void testGetLink() { | ||
193 | - ConnectPoint src = new ConnectPoint(DID1, P1); | ||
194 | - ConnectPoint dst = new ConnectPoint(DID2, P2); | ||
195 | - LinkKey linkId1 = linkKey(src, dst); | ||
196 | - | ||
197 | - putLink(linkId1, DIRECT); | ||
198 | - | ||
199 | - Link link = linkStore.getLink(src, dst); | ||
200 | - assertLink(linkId1, DIRECT, link); | ||
201 | - | ||
202 | - assertNull("There shouldn't be reverese link", | ||
203 | - linkStore.getLink(dst, src)); | ||
204 | - } | ||
205 | - | ||
206 | - @Test | ||
207 | - public final void testGetEgressLinks() { | ||
208 | - final ConnectPoint d1P1 = new ConnectPoint(DID1, P1); | ||
209 | - final ConnectPoint d2P2 = new ConnectPoint(DID2, P2); | ||
210 | - LinkKey linkId1 = linkKey(d1P1, d2P2); | ||
211 | - LinkKey linkId2 = linkKey(d2P2, d1P1); | ||
212 | - LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3)); | ||
213 | - | ||
214 | - putLink(linkId1, DIRECT); | ||
215 | - putLink(linkId2, DIRECT); | ||
216 | - putLink(linkId3, DIRECT); | ||
217 | - | ||
218 | - // DID1,P1 => DID2,P2 | ||
219 | - // DID2,P2 => DID1,P1 | ||
220 | - // DID1,P2 => DID2,P3 | ||
221 | - | ||
222 | - Set<Link> links1 = linkStore.getEgressLinks(d1P1); | ||
223 | - assertEquals(1, links1.size()); | ||
224 | - assertLink(linkId1, DIRECT, links1.iterator().next()); | ||
225 | - | ||
226 | - Set<Link> links2 = linkStore.getEgressLinks(d2P2); | ||
227 | - assertEquals(1, links2.size()); | ||
228 | - assertLink(linkId2, DIRECT, links2.iterator().next()); | ||
229 | - } | ||
230 | - | ||
231 | - @Test | ||
232 | - public final void testGetIngressLinks() { | ||
233 | - final ConnectPoint d1P1 = new ConnectPoint(DID1, P1); | ||
234 | - final ConnectPoint d2P2 = new ConnectPoint(DID2, P2); | ||
235 | - LinkKey linkId1 = linkKey(d1P1, d2P2); | ||
236 | - LinkKey linkId2 = linkKey(d2P2, d1P1); | ||
237 | - LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3)); | ||
238 | - | ||
239 | - putLink(linkId1, DIRECT); | ||
240 | - putLink(linkId2, DIRECT); | ||
241 | - putLink(linkId3, DIRECT); | ||
242 | - | ||
243 | - // DID1,P1 => DID2,P2 | ||
244 | - // DID2,P2 => DID1,P1 | ||
245 | - // DID1,P2 => DID2,P3 | ||
246 | - | ||
247 | - Set<Link> links1 = linkStore.getIngressLinks(d2P2); | ||
248 | - assertEquals(1, links1.size()); | ||
249 | - assertLink(linkId1, DIRECT, links1.iterator().next()); | ||
250 | - | ||
251 | - Set<Link> links2 = linkStore.getIngressLinks(d1P1); | ||
252 | - assertEquals(1, links2.size()); | ||
253 | - assertLink(linkId2, DIRECT, links2.iterator().next()); | ||
254 | - } | ||
255 | - | ||
256 | - @Test | ||
257 | - public final void testCreateOrUpdateLink() { | ||
258 | - ConnectPoint src = new ConnectPoint(DID1, P1); | ||
259 | - ConnectPoint dst = new ConnectPoint(DID2, P2); | ||
260 | - | ||
261 | - // add link | ||
262 | - LinkEvent event = linkStore.createOrUpdateLink(PID, | ||
263 | - new DefaultLinkDescription(src, dst, INDIRECT)); | ||
264 | - | ||
265 | - assertLink(DID1, P1, DID2, P2, INDIRECT, event.subject()); | ||
266 | - assertEquals(LINK_ADDED, event.type()); | ||
267 | - | ||
268 | - // update link type | ||
269 | - LinkEvent event2 = linkStore.createOrUpdateLink(PID, | ||
270 | - new DefaultLinkDescription(src, dst, DIRECT)); | ||
271 | - | ||
272 | - assertLink(DID1, P1, DID2, P2, DIRECT, event2.subject()); | ||
273 | - assertEquals(LINK_UPDATED, event2.type()); | ||
274 | - | ||
275 | - // no change | ||
276 | - LinkEvent event3 = linkStore.createOrUpdateLink(PID, | ||
277 | - new DefaultLinkDescription(src, dst, DIRECT)); | ||
278 | - | ||
279 | - assertNull("No change event expected", event3); | ||
280 | - } | ||
281 | - | ||
282 | - @Test | ||
283 | - public final void testRemoveLink() { | ||
284 | - final ConnectPoint d1P1 = new ConnectPoint(DID1, P1); | ||
285 | - final ConnectPoint d2P2 = new ConnectPoint(DID2, P2); | ||
286 | - LinkKey linkId1 = linkKey(d1P1, d2P2); | ||
287 | - LinkKey linkId2 = linkKey(d2P2, d1P1); | ||
288 | - | ||
289 | - putLink(linkId1, DIRECT); | ||
290 | - putLink(linkId2, DIRECT); | ||
291 | - | ||
292 | - // DID1,P1 => DID2,P2 | ||
293 | - // DID2,P2 => DID1,P1 | ||
294 | - // DID1,P2 => DID2,P3 | ||
295 | - | ||
296 | - LinkEvent event = linkStore.removeLink(d1P1, d2P2); | ||
297 | - assertEquals(LINK_REMOVED, event.type()); | ||
298 | - LinkEvent event2 = linkStore.removeLink(d1P1, d2P2); | ||
299 | - assertNull(event2); | ||
300 | - | ||
301 | - assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1)); | ||
302 | - } | ||
303 | - | ||
304 | - @Ignore("Ignore until Delegate spec. is clear.") | ||
305 | - @Test | ||
306 | - public final void testEvents() throws InterruptedException { | ||
307 | - | ||
308 | - final ConnectPoint d1P1 = new ConnectPoint(DID1, P1); | ||
309 | - final ConnectPoint d2P2 = new ConnectPoint(DID2, P2); | ||
310 | - final LinkKey linkId1 = linkKey(d1P1, d2P2); | ||
311 | - | ||
312 | - final CountDownLatch addLatch = new CountDownLatch(1); | ||
313 | - LinkStoreDelegate checkAdd = new LinkStoreDelegate() { | ||
314 | - @Override | ||
315 | - public void notify(LinkEvent event) { | ||
316 | - assertEquals(LINK_ADDED, event.type()); | ||
317 | - assertLink(linkId1, INDIRECT, event.subject()); | ||
318 | - addLatch.countDown(); | ||
319 | - } | ||
320 | - }; | ||
321 | - final CountDownLatch updateLatch = new CountDownLatch(1); | ||
322 | - LinkStoreDelegate checkUpdate = new LinkStoreDelegate() { | ||
323 | - @Override | ||
324 | - public void notify(LinkEvent event) { | ||
325 | - assertEquals(LINK_UPDATED, event.type()); | ||
326 | - assertLink(linkId1, DIRECT, event.subject()); | ||
327 | - updateLatch.countDown(); | ||
328 | - } | ||
329 | - }; | ||
330 | - final CountDownLatch removeLatch = new CountDownLatch(1); | ||
331 | - LinkStoreDelegate checkRemove = new LinkStoreDelegate() { | ||
332 | - @Override | ||
333 | - public void notify(LinkEvent event) { | ||
334 | - assertEquals(LINK_REMOVED, event.type()); | ||
335 | - assertLink(linkId1, DIRECT, event.subject()); | ||
336 | - removeLatch.countDown(); | ||
337 | - } | ||
338 | - }; | ||
339 | - | ||
340 | - linkStore.setDelegate(checkAdd); | ||
341 | - putLink(linkId1, INDIRECT); | ||
342 | - assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS)); | ||
343 | - | ||
344 | - linkStore.unsetDelegate(checkAdd); | ||
345 | - linkStore.setDelegate(checkUpdate); | ||
346 | - putLink(linkId1, DIRECT); | ||
347 | - assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS)); | ||
348 | - | ||
349 | - linkStore.unsetDelegate(checkUpdate); | ||
350 | - linkStore.setDelegate(checkRemove); | ||
351 | - linkStore.removeLink(d1P1, d2P2); | ||
352 | - assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS)); | ||
353 | - } | ||
354 | - | ||
355 | - | ||
356 | - class TestDistributedLinkStore extends DistributedLinkStore { | ||
357 | - TestDistributedLinkStore(StoreService storeService) { | ||
358 | - this.storeService = storeService; | ||
359 | - } | ||
360 | - } | ||
361 | -} |
-
Please register or login to post a comment