tom

Merge remote-tracking branch 'origin/master'

...@@ -5,7 +5,7 @@ import java.util.Objects; ...@@ -5,7 +5,7 @@ import java.util.Objects;
5 public final class MastershipTerm { 5 public final class MastershipTerm {
6 6
7 private final NodeId master; 7 private final NodeId master;
8 - private int termNumber; 8 + private final int termNumber;
9 9
10 private MastershipTerm(NodeId master, int term) { 10 private MastershipTerm(NodeId master, int term) {
11 this.master = master; 11 this.master = master;
......
1 package org.onlab.onos.cluster.impl; 1 package org.onlab.onos.cluster.impl;
2 2
3 +import static com.google.common.base.Preconditions.checkNotNull;
4 +import static org.slf4j.LoggerFactory.getLogger;
5 +
6 +import java.util.Set;
7 +
3 import org.apache.felix.scr.annotations.Activate; 8 import org.apache.felix.scr.annotations.Activate;
4 import org.apache.felix.scr.annotations.Component; 9 import org.apache.felix.scr.annotations.Component;
5 import org.apache.felix.scr.annotations.Deactivate; 10 import org.apache.felix.scr.annotations.Deactivate;
...@@ -12,6 +17,7 @@ import org.onlab.onos.cluster.MastershipEvent; ...@@ -12,6 +17,7 @@ import org.onlab.onos.cluster.MastershipEvent;
12 import org.onlab.onos.cluster.MastershipListener; 17 import org.onlab.onos.cluster.MastershipListener;
13 import org.onlab.onos.cluster.MastershipService; 18 import org.onlab.onos.cluster.MastershipService;
14 import org.onlab.onos.cluster.MastershipStore; 19 import org.onlab.onos.cluster.MastershipStore;
20 +import org.onlab.onos.cluster.MastershipStoreDelegate;
15 import org.onlab.onos.cluster.MastershipTerm; 21 import org.onlab.onos.cluster.MastershipTerm;
16 import org.onlab.onos.cluster.MastershipTermService; 22 import org.onlab.onos.cluster.MastershipTermService;
17 import org.onlab.onos.cluster.NodeId; 23 import org.onlab.onos.cluster.NodeId;
...@@ -21,15 +27,10 @@ import org.onlab.onos.net.DeviceId; ...@@ -21,15 +27,10 @@ import org.onlab.onos.net.DeviceId;
21 import org.onlab.onos.net.MastershipRole; 27 import org.onlab.onos.net.MastershipRole;
22 import org.slf4j.Logger; 28 import org.slf4j.Logger;
23 29
24 -import java.util.Set;
25 -
26 -import static com.google.common.base.Preconditions.checkNotNull;
27 -import static org.slf4j.LoggerFactory.getLogger;
28 -
29 @Component(immediate = true) 30 @Component(immediate = true)
30 @Service 31 @Service
31 public class MastershipManager 32 public class MastershipManager
32 - implements MastershipService, MastershipAdminService { 33 +implements MastershipService, MastershipAdminService {
33 34
34 private static final String NODE_ID_NULL = "Node ID cannot be null"; 35 private static final String NODE_ID_NULL = "Node ID cannot be null";
35 private static final String DEVICE_ID_NULL = "Device ID cannot be null"; 36 private static final String DEVICE_ID_NULL = "Device ID cannot be null";
...@@ -40,6 +41,8 @@ public class MastershipManager ...@@ -40,6 +41,8 @@ public class MastershipManager
40 protected final AbstractListenerRegistry<MastershipEvent, MastershipListener> 41 protected final AbstractListenerRegistry<MastershipEvent, MastershipListener>
41 listenerRegistry = new AbstractListenerRegistry<>(); 42 listenerRegistry = new AbstractListenerRegistry<>();
42 43
44 + private final MastershipStoreDelegate delegate = new InternalDelegate();
45 +
43 @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) 46 @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
44 protected MastershipStore store; 47 protected MastershipStore store;
45 48
...@@ -52,12 +55,14 @@ public class MastershipManager ...@@ -52,12 +55,14 @@ public class MastershipManager
52 @Activate 55 @Activate
53 public void activate() { 56 public void activate() {
54 eventDispatcher.addSink(MastershipEvent.class, listenerRegistry); 57 eventDispatcher.addSink(MastershipEvent.class, listenerRegistry);
58 + store.setDelegate(delegate);
55 log.info("Started"); 59 log.info("Started");
56 } 60 }
57 61
58 @Deactivate 62 @Deactivate
59 public void deactivate() { 63 public void deactivate() {
60 eventDispatcher.removeSink(MastershipEvent.class); 64 eventDispatcher.removeSink(MastershipEvent.class);
65 + store.unsetDelegate(delegate);
61 log.info("Stopped"); 66 log.info("Stopped");
62 } 67 }
63 68
...@@ -141,4 +146,14 @@ public class MastershipManager ...@@ -141,4 +146,14 @@ public class MastershipManager
141 146
142 } 147 }
143 148
149 + public class InternalDelegate implements MastershipStoreDelegate {
150 +
151 + @Override
152 + public void notify(MastershipEvent event) {
153 + log.info("dispatching mastership event {}", event);
154 + eventDispatcher.post(event);
155 + }
156 +
157 + }
158 +
144 } 159 }
......
1 package org.onlab.onos.net.device.impl; 1 package org.onlab.onos.net.device.impl;
2 2
3 +import static com.google.common.base.Preconditions.checkNotNull;
4 +import static org.onlab.onos.net.device.DeviceEvent.Type.DEVICE_MASTERSHIP_CHANGED;
5 +import static org.slf4j.LoggerFactory.getLogger;
6 +
7 +import java.util.List;
8 +
3 import org.apache.felix.scr.annotations.Activate; 9 import org.apache.felix.scr.annotations.Activate;
4 import org.apache.felix.scr.annotations.Component; 10 import org.apache.felix.scr.annotations.Component;
5 import org.apache.felix.scr.annotations.Deactivate; 11 import org.apache.felix.scr.annotations.Deactivate;
...@@ -32,20 +38,14 @@ import org.onlab.onos.net.provider.AbstractProviderRegistry; ...@@ -32,20 +38,14 @@ import org.onlab.onos.net.provider.AbstractProviderRegistry;
32 import org.onlab.onos.net.provider.AbstractProviderService; 38 import org.onlab.onos.net.provider.AbstractProviderService;
33 import org.slf4j.Logger; 39 import org.slf4j.Logger;
34 40
35 -import java.util.List;
36 -
37 -import static com.google.common.base.Preconditions.checkNotNull;
38 -import static org.onlab.onos.net.device.DeviceEvent.Type.DEVICE_MASTERSHIP_CHANGED;
39 -import static org.slf4j.LoggerFactory.getLogger;
40 -
41 /** 41 /**
42 * Provides implementation of the device SB &amp; NB APIs. 42 * Provides implementation of the device SB &amp; NB APIs.
43 */ 43 */
44 @Component(immediate = true) 44 @Component(immediate = true)
45 @Service 45 @Service
46 public class DeviceManager 46 public class DeviceManager
47 - extends AbstractProviderRegistry<DeviceProvider, DeviceProviderService> 47 +extends AbstractProviderRegistry<DeviceProvider, DeviceProviderService>
48 - implements DeviceService, DeviceAdminService, DeviceProviderRegistry { 48 +implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
49 49
50 private static final String DEVICE_ID_NULL = "Device ID cannot be null"; 50 private static final String DEVICE_ID_NULL = "Device ID cannot be null";
51 private static final String PORT_NUMBER_NULL = "Port number cannot be null"; 51 private static final String PORT_NUMBER_NULL = "Port number cannot be null";
...@@ -58,7 +58,7 @@ public class DeviceManager ...@@ -58,7 +58,7 @@ public class DeviceManager
58 protected final AbstractListenerRegistry<DeviceEvent, DeviceListener> 58 protected final AbstractListenerRegistry<DeviceEvent, DeviceListener>
59 listenerRegistry = new AbstractListenerRegistry<>(); 59 listenerRegistry = new AbstractListenerRegistry<>();
60 60
61 - private DeviceStoreDelegate delegate = new InternalStoreDelegate(); 61 + private final DeviceStoreDelegate delegate = new InternalStoreDelegate();
62 62
63 private final MastershipListener mastershipListener = new InternalMastershipListener(); 63 private final MastershipListener mastershipListener = new InternalMastershipListener();
64 64
...@@ -249,9 +249,10 @@ public class DeviceManager ...@@ -249,9 +249,10 @@ public class DeviceManager
249 private class InternalMastershipListener implements MastershipListener { 249 private class InternalMastershipListener implements MastershipListener {
250 @Override 250 @Override
251 public void event(MastershipEvent event) { 251 public void event(MastershipEvent event) {
252 - // FIXME: for now we're taking action only on becoming master
253 if (event.master().equals(clusterService.getLocalNode().id())) { 252 if (event.master().equals(clusterService.getLocalNode().id())) {
254 applyRole(event.subject(), MastershipRole.MASTER); 253 applyRole(event.subject(), MastershipRole.MASTER);
254 + } else {
255 + applyRole(event.subject(), MastershipRole.STANDBY);
255 } 256 }
256 } 257 }
257 } 258 }
......
1 package org.onlab.onos.store.cluster.impl; 1 package org.onlab.onos.store.cluster.impl;
2 2
3 -import com.google.common.base.Optional; 3 +import static com.google.common.cache.CacheBuilder.newBuilder;
4 -import com.google.common.cache.LoadingCache; 4 +import static org.onlab.onos.cluster.MastershipEvent.Type.MASTER_CHANGED;
5 -import com.google.common.collect.ImmutableSet; 5 +
6 -import com.hazelcast.core.IMap; 6 +import java.util.Map;
7 +import java.util.Objects;
8 +import java.util.Set;
7 9
8 import org.apache.felix.scr.annotations.Activate; 10 import org.apache.felix.scr.annotations.Activate;
9 import org.apache.felix.scr.annotations.Component; 11 import org.apache.felix.scr.annotations.Component;
...@@ -23,11 +25,10 @@ import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache; ...@@ -23,11 +25,10 @@ import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache;
23 import org.onlab.onos.store.impl.AbstractDistributedStore; 25 import org.onlab.onos.store.impl.AbstractDistributedStore;
24 import org.onlab.onos.store.impl.OptionalCacheLoader; 26 import org.onlab.onos.store.impl.OptionalCacheLoader;
25 27
26 -import java.util.Map; 28 +import com.google.common.base.Optional;
27 -import java.util.Objects; 29 +import com.google.common.cache.LoadingCache;
28 -import java.util.Set; 30 +import com.google.common.collect.ImmutableSet;
29 - 31 +import com.hazelcast.core.IMap;
30 -import static com.google.common.cache.CacheBuilder.newBuilder;
31 32
32 /** 33 /**
33 * Distributed implementation of the cluster nodes store. 34 * Distributed implementation of the cluster nodes store.
...@@ -35,8 +36,8 @@ import static com.google.common.cache.CacheBuilder.newBuilder; ...@@ -35,8 +36,8 @@ import static com.google.common.cache.CacheBuilder.newBuilder;
35 @Component(immediate = true) 36 @Component(immediate = true)
36 @Service 37 @Service
37 public class DistributedMastershipStore 38 public class DistributedMastershipStore
38 - extends AbstractDistributedStore<MastershipEvent, MastershipStoreDelegate> 39 +extends AbstractDistributedStore<MastershipEvent, MastershipStoreDelegate>
39 - implements MastershipStore { 40 +implements MastershipStore {
40 41
41 private IMap<byte[], byte[]> rawMasters; 42 private IMap<byte[], byte[]> rawMasters;
42 private LoadingCache<DeviceId, Optional<NodeId>> masters; 43 private LoadingCache<DeviceId, Optional<NodeId>> masters;
...@@ -53,7 +54,7 @@ public class DistributedMastershipStore ...@@ -53,7 +54,7 @@ public class DistributedMastershipStore
53 OptionalCacheLoader<DeviceId, NodeId> nodeLoader 54 OptionalCacheLoader<DeviceId, NodeId> nodeLoader
54 = new OptionalCacheLoader<>(storeService, rawMasters); 55 = new OptionalCacheLoader<>(storeService, rawMasters);
55 masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); 56 masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
56 - rawMasters.addEntryListener(new RemoteEventHandler<>(masters), true); 57 + rawMasters.addEntryListener(new RemoteMasterShipEventHandler(masters), true);
57 58
58 loadMasters(); 59 loadMasters();
59 60
...@@ -122,4 +123,25 @@ public class DistributedMastershipStore ...@@ -122,4 +123,25 @@ public class DistributedMastershipStore
122 return null; 123 return null;
123 } 124 }
124 125
126 + private class RemoteMasterShipEventHandler extends RemoteEventHandler<DeviceId, NodeId> {
127 + public RemoteMasterShipEventHandler(LoadingCache<DeviceId, Optional<NodeId>> cache) {
128 + super(cache);
129 + }
130 +
131 + @Override
132 + protected void onAdd(DeviceId deviceId, NodeId nodeId) {
133 + notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
134 + }
135 +
136 + @Override
137 + protected void onRemove(DeviceId deviceId, NodeId nodeId) {
138 + notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
139 + }
140 +
141 + @Override
142 + protected void onUpdate(DeviceId deviceId, NodeId oldNodeId, NodeId nodeId) {
143 + notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
144 + }
145 + }
146 +
125 } 147 }
......
1 +package org.onlab.onos.store.flow.impl;
2 +
3 +import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_ADDED;
4 +import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
5 +import static org.slf4j.LoggerFactory.getLogger;
6 +
7 +import java.util.Collection;
8 +import java.util.Collections;
9 +
10 +import org.apache.felix.scr.annotations.Activate;
11 +import org.apache.felix.scr.annotations.Component;
12 +import org.apache.felix.scr.annotations.Deactivate;
13 +import org.apache.felix.scr.annotations.Service;
14 +import org.onlab.onos.ApplicationId;
15 +import org.onlab.onos.net.DeviceId;
16 +import org.onlab.onos.net.flow.DefaultFlowRule;
17 +import org.onlab.onos.net.flow.FlowRule;
18 +import org.onlab.onos.net.flow.FlowRule.FlowRuleState;
19 +import org.onlab.onos.net.flow.FlowRuleEvent;
20 +import org.onlab.onos.net.flow.FlowRuleEvent.Type;
21 +import org.onlab.onos.net.flow.FlowRuleStore;
22 +import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
23 +import org.onlab.onos.store.AbstractStore;
24 +import org.slf4j.Logger;
25 +
26 +import com.google.common.collect.ArrayListMultimap;
27 +import com.google.common.collect.ImmutableSet;
28 +import com.google.common.collect.Multimap;
29 +
30 +/**
31 + * Manages inventory of flow rules using trivial in-memory implementation.
32 + */
33 +//FIXME: I LIE I AM NOT DISTRIBUTED
34 +@Component(immediate = true)
35 +@Service
36 +public class DistributedFlowRuleStore
37 +extends AbstractStore<FlowRuleEvent, FlowRuleStoreDelegate>
38 +implements FlowRuleStore {
39 +
40 + private final Logger log = getLogger(getClass());
41 +
42 + // store entries as a pile of rules, no info about device tables
43 + private final Multimap<DeviceId, FlowRule> flowEntries =
44 + ArrayListMultimap.<DeviceId, FlowRule>create();
45 +
46 + private final Multimap<ApplicationId, FlowRule> flowEntriesById =
47 + ArrayListMultimap.<ApplicationId, FlowRule>create();
48 +
49 + @Activate
50 + public void activate() {
51 + log.info("Started");
52 + }
53 +
54 + @Deactivate
55 + public void deactivate() {
56 + log.info("Stopped");
57 + }
58 +
59 +
60 + @Override
61 + public synchronized FlowRule getFlowRule(FlowRule rule) {
62 + for (FlowRule f : flowEntries.get(rule.deviceId())) {
63 + if (f.equals(rule)) {
64 + return f;
65 + }
66 + }
67 + return null;
68 + }
69 +
70 + @Override
71 + public synchronized Iterable<FlowRule> getFlowEntries(DeviceId deviceId) {
72 + Collection<FlowRule> rules = flowEntries.get(deviceId);
73 + if (rules == null) {
74 + return Collections.emptyList();
75 + }
76 + return ImmutableSet.copyOf(rules);
77 + }
78 +
79 + @Override
80 + public synchronized Iterable<FlowRule> getFlowEntriesByAppId(ApplicationId appId) {
81 + Collection<FlowRule> rules = flowEntriesById.get(appId);
82 + if (rules == null) {
83 + return Collections.emptyList();
84 + }
85 + return ImmutableSet.copyOf(rules);
86 + }
87 +
88 + @Override
89 + public synchronized void storeFlowRule(FlowRule rule) {
90 + FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_ADD);
91 + DeviceId did = f.deviceId();
92 + if (!flowEntries.containsEntry(did, f)) {
93 + flowEntries.put(did, f);
94 + flowEntriesById.put(rule.appId(), f);
95 + }
96 + }
97 +
98 + @Override
99 + public synchronized void deleteFlowRule(FlowRule rule) {
100 + FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_REMOVE);
101 + DeviceId did = f.deviceId();
102 +
103 + /*
104 + * find the rule and mark it for deletion.
105 + * Ultimately a flow removed will come remove it.
106 + */
107 +
108 + if (flowEntries.containsEntry(did, f)) {
109 + //synchronized (flowEntries) {
110 + flowEntries.remove(did, f);
111 + flowEntries.put(did, f);
112 + flowEntriesById.remove(rule.appId(), rule);
113 + //}
114 + }
115 + }
116 +
117 + @Override
118 + public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowRule rule) {
119 + DeviceId did = rule.deviceId();
120 +
121 + // check if this new rule is an update to an existing entry
122 + if (flowEntries.containsEntry(did, rule)) {
123 + //synchronized (flowEntries) {
124 + // Multimaps support duplicates so we have to remove our rule
125 + // and replace it with the current version.
126 + flowEntries.remove(did, rule);
127 + flowEntries.put(did, rule);
128 + //}
129 + return new FlowRuleEvent(Type.RULE_UPDATED, rule);
130 + }
131 +
132 + flowEntries.put(did, rule);
133 + return new FlowRuleEvent(RULE_ADDED, rule);
134 + }
135 +
136 + @Override
137 + public synchronized FlowRuleEvent removeFlowRule(FlowRule rule) {
138 + //synchronized (this) {
139 + if (flowEntries.remove(rule.deviceId(), rule)) {
140 + return new FlowRuleEvent(RULE_REMOVED, rule);
141 + } else {
142 + return null;
143 + }
144 + //}
145 + }
146 +
147 +
148 +
149 +
150 +
151 +
152 +
153 +}
1 +package org.onlab.onos.store.host.impl;
2 +
3 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_ADDED;
4 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_MOVED;
5 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_REMOVED;
6 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_UPDATED;
7 +import static org.slf4j.LoggerFactory.getLogger;
8 +
9 +import java.util.Collections;
10 +import java.util.HashSet;
11 +import java.util.Map;
12 +import java.util.Set;
13 +import java.util.concurrent.ConcurrentHashMap;
14 +
15 +import org.apache.felix.scr.annotations.Activate;
16 +import org.apache.felix.scr.annotations.Component;
17 +import org.apache.felix.scr.annotations.Deactivate;
18 +import org.apache.felix.scr.annotations.Service;
19 +import org.onlab.onos.net.ConnectPoint;
20 +import org.onlab.onos.net.DefaultHost;
21 +import org.onlab.onos.net.DeviceId;
22 +import org.onlab.onos.net.Host;
23 +import org.onlab.onos.net.HostId;
24 +import org.onlab.onos.net.host.HostDescription;
25 +import org.onlab.onos.net.host.HostEvent;
26 +import org.onlab.onos.net.host.HostStore;
27 +import org.onlab.onos.net.host.HostStoreDelegate;
28 +import org.onlab.onos.net.host.PortAddresses;
29 +import org.onlab.onos.net.provider.ProviderId;
30 +import org.onlab.onos.store.AbstractStore;
31 +import org.onlab.packet.IpPrefix;
32 +import org.onlab.packet.MacAddress;
33 +import org.onlab.packet.VlanId;
34 +import org.slf4j.Logger;
35 +
36 +import com.google.common.collect.HashMultimap;
37 +import com.google.common.collect.ImmutableSet;
38 +import com.google.common.collect.Multimap;
39 +import com.google.common.collect.Sets;
40 +
41 +/**
42 + * Manages inventory of end-station hosts using trivial in-memory
43 + * implementation.
44 + */
45 +//FIXME: I LIE I AM NOT DISTRIBUTED
46 +@Component(immediate = true)
47 +@Service
48 +public class DistributedHostStore
49 +extends AbstractStore<HostEvent, HostStoreDelegate>
50 +implements HostStore {
51 +
52 + private final Logger log = getLogger(getClass());
53 +
54 + // Host inventory
55 + private final Map<HostId, Host> hosts = new ConcurrentHashMap<>();
56 +
57 + // Hosts tracked by their location
58 + private final Multimap<ConnectPoint, Host> locations = HashMultimap.create();
59 +
60 + private final Map<ConnectPoint, PortAddresses> portAddresses =
61 + new ConcurrentHashMap<>();
62 +
63 + @Activate
64 + public void activate() {
65 + log.info("Started");
66 + }
67 +
68 + @Deactivate
69 + public void deactivate() {
70 + log.info("Stopped");
71 + }
72 +
73 + @Override
74 + public HostEvent createOrUpdateHost(ProviderId providerId, HostId hostId,
75 + HostDescription hostDescription) {
76 + Host host = hosts.get(hostId);
77 + if (host == null) {
78 + return createHost(providerId, hostId, hostDescription);
79 + }
80 + return updateHost(providerId, host, hostDescription);
81 + }
82 +
83 + // creates a new host and sends HOST_ADDED
84 + private HostEvent createHost(ProviderId providerId, HostId hostId,
85 + HostDescription descr) {
86 + DefaultHost newhost = new DefaultHost(providerId, hostId,
87 + descr.hwAddress(),
88 + descr.vlan(),
89 + descr.location(),
90 + descr.ipAddresses());
91 + synchronized (this) {
92 + hosts.put(hostId, newhost);
93 + locations.put(descr.location(), newhost);
94 + }
95 + return new HostEvent(HOST_ADDED, newhost);
96 + }
97 +
98 + // checks for type of update to host, sends appropriate event
99 + private HostEvent updateHost(ProviderId providerId, Host host,
100 + HostDescription descr) {
101 + DefaultHost updated;
102 + HostEvent event;
103 + if (!host.location().equals(descr.location())) {
104 + updated = new DefaultHost(providerId, host.id(),
105 + host.mac(),
106 + host.vlan(),
107 + descr.location(),
108 + host.ipAddresses());
109 + event = new HostEvent(HOST_MOVED, updated);
110 +
111 + } else if (!(host.ipAddresses().equals(descr.ipAddresses()))) {
112 + updated = new DefaultHost(providerId, host.id(),
113 + host.mac(),
114 + host.vlan(),
115 + descr.location(),
116 + descr.ipAddresses());
117 + event = new HostEvent(HOST_UPDATED, updated);
118 + } else {
119 + return null;
120 + }
121 + synchronized (this) {
122 + hosts.put(host.id(), updated);
123 + locations.remove(host.location(), host);
124 + locations.put(updated.location(), updated);
125 + }
126 + return event;
127 + }
128 +
129 + @Override
130 + public HostEvent removeHost(HostId hostId) {
131 + synchronized (this) {
132 + Host host = hosts.remove(hostId);
133 + if (host != null) {
134 + locations.remove((host.location()), host);
135 + return new HostEvent(HOST_REMOVED, host);
136 + }
137 + return null;
138 + }
139 + }
140 +
141 + @Override
142 + public int getHostCount() {
143 + return hosts.size();
144 + }
145 +
146 + @Override
147 + public Iterable<Host> getHosts() {
148 + return Collections.unmodifiableSet(new HashSet<>(hosts.values()));
149 + }
150 +
151 + @Override
152 + public Host getHost(HostId hostId) {
153 + return hosts.get(hostId);
154 + }
155 +
156 + @Override
157 + public Set<Host> getHosts(VlanId vlanId) {
158 + Set<Host> vlanset = new HashSet<>();
159 + for (Host h : hosts.values()) {
160 + if (h.vlan().equals(vlanId)) {
161 + vlanset.add(h);
162 + }
163 + }
164 + return vlanset;
165 + }
166 +
167 + @Override
168 + public Set<Host> getHosts(MacAddress mac) {
169 + Set<Host> macset = new HashSet<>();
170 + for (Host h : hosts.values()) {
171 + if (h.mac().equals(mac)) {
172 + macset.add(h);
173 + }
174 + }
175 + return macset;
176 + }
177 +
178 + @Override
179 + public Set<Host> getHosts(IpPrefix ip) {
180 + Set<Host> ipset = new HashSet<>();
181 + for (Host h : hosts.values()) {
182 + if (h.ipAddresses().contains(ip)) {
183 + ipset.add(h);
184 + }
185 + }
186 + return ipset;
187 + }
188 +
189 + @Override
190 + public Set<Host> getConnectedHosts(ConnectPoint connectPoint) {
191 + return ImmutableSet.copyOf(locations.get(connectPoint));
192 + }
193 +
194 + @Override
195 + public Set<Host> getConnectedHosts(DeviceId deviceId) {
196 + Set<Host> hostset = new HashSet<>();
197 + for (ConnectPoint p : locations.keySet()) {
198 + if (p.deviceId().equals(deviceId)) {
199 + hostset.addAll(locations.get(p));
200 + }
201 + }
202 + return hostset;
203 + }
204 +
205 + @Override
206 + public void updateAddressBindings(PortAddresses addresses) {
207 + synchronized (portAddresses) {
208 + PortAddresses existing = portAddresses.get(addresses.connectPoint());
209 + if (existing == null) {
210 + portAddresses.put(addresses.connectPoint(), addresses);
211 + } else {
212 + Set<IpPrefix> union = Sets.union(existing.ips(), addresses.ips())
213 + .immutableCopy();
214 +
215 + MacAddress newMac = (addresses.mac() == null) ? existing.mac()
216 + : addresses.mac();
217 +
218 + PortAddresses newAddresses =
219 + new PortAddresses(addresses.connectPoint(), union, newMac);
220 +
221 + portAddresses.put(newAddresses.connectPoint(), newAddresses);
222 + }
223 + }
224 + }
225 +
226 + @Override
227 + public void removeAddressBindings(PortAddresses addresses) {
228 + synchronized (portAddresses) {
229 + PortAddresses existing = portAddresses.get(addresses.connectPoint());
230 + if (existing != null) {
231 + Set<IpPrefix> difference =
232 + Sets.difference(existing.ips(), addresses.ips()).immutableCopy();
233 +
234 + // If they removed the existing mac, set the new mac to null.
235 + // Otherwise, keep the existing mac.
236 + MacAddress newMac = existing.mac();
237 + if (addresses.mac() != null && addresses.mac().equals(existing.mac())) {
238 + newMac = null;
239 + }
240 +
241 + PortAddresses newAddresses =
242 + new PortAddresses(addresses.connectPoint(), difference, newMac);
243 +
244 + portAddresses.put(newAddresses.connectPoint(), newAddresses);
245 + }
246 + }
247 + }
248 +
249 + @Override
250 + public void clearAddressBindings(ConnectPoint connectPoint) {
251 + synchronized (portAddresses) {
252 + portAddresses.remove(connectPoint);
253 + }
254 + }
255 +
256 + @Override
257 + public Set<PortAddresses> getAddressBindings() {
258 + synchronized (portAddresses) {
259 + return new HashSet<>(portAddresses.values());
260 + }
261 + }
262 +
263 + @Override
264 + public PortAddresses getAddressBindingsForPort(ConnectPoint connectPoint) {
265 + PortAddresses addresses;
266 +
267 + synchronized (portAddresses) {
268 + addresses = portAddresses.get(connectPoint);
269 + }
270 +
271 + if (addresses == null) {
272 + addresses = new PortAddresses(connectPoint, null, null);
273 + }
274 +
275 + return addresses;
276 + }
277 +
278 +}
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import com.google.common.collect.ImmutableMap;
4 +import com.google.common.collect.ImmutableSet;
5 +import com.google.common.collect.ImmutableSetMultimap;
6 +import org.onlab.graph.DijkstraGraphSearch;
7 +import org.onlab.graph.GraphPathSearch;
8 +import org.onlab.graph.TarjanGraphSearch;
9 +import org.onlab.onos.net.AbstractModel;
10 +import org.onlab.onos.net.ConnectPoint;
11 +import org.onlab.onos.net.DefaultPath;
12 +import org.onlab.onos.net.DeviceId;
13 +import org.onlab.onos.net.Link;
14 +import org.onlab.onos.net.Path;
15 +import org.onlab.onos.net.provider.ProviderId;
16 +import org.onlab.onos.net.topology.ClusterId;
17 +import org.onlab.onos.net.topology.DefaultTopologyCluster;
18 +import org.onlab.onos.net.topology.DefaultTopologyVertex;
19 +import org.onlab.onos.net.topology.GraphDescription;
20 +import org.onlab.onos.net.topology.LinkWeight;
21 +import org.onlab.onos.net.topology.Topology;
22 +import org.onlab.onos.net.topology.TopologyCluster;
23 +import org.onlab.onos.net.topology.TopologyEdge;
24 +import org.onlab.onos.net.topology.TopologyGraph;
25 +import org.onlab.onos.net.topology.TopologyVertex;
26 +
27 +import java.util.ArrayList;
28 +import java.util.List;
29 +import java.util.Map;
30 +import java.util.Set;
31 +
32 +import static com.google.common.base.MoreObjects.toStringHelper;
33 +import static com.google.common.collect.ImmutableSetMultimap.Builder;
34 +import static org.onlab.graph.GraphPathSearch.Result;
35 +import static org.onlab.graph.TarjanGraphSearch.SCCResult;
36 +import static org.onlab.onos.net.Link.Type.INDIRECT;
37 +
38 +/**
39 + * Default implementation of the topology descriptor. This carries the
40 + * backing topology data.
41 + */
42 +public class DefaultTopology extends AbstractModel implements Topology {
43 +
44 + private static final DijkstraGraphSearch<TopologyVertex, TopologyEdge> DIJKSTRA =
45 + new DijkstraGraphSearch<>();
46 + private static final TarjanGraphSearch<TopologyVertex, TopologyEdge> TARJAN =
47 + new TarjanGraphSearch<>();
48 +
49 + private static final ProviderId PID = new ProviderId("core", "org.onlab.onos.net");
50 +
51 + private final long time;
52 + private final TopologyGraph graph;
53 +
54 + private final SCCResult<TopologyVertex, TopologyEdge> clusterResults;
55 + private final ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> results;
56 + private final ImmutableSetMultimap<PathKey, Path> paths;
57 +
58 + private final ImmutableMap<ClusterId, TopologyCluster> clusters;
59 + private final ImmutableSet<ConnectPoint> infrastructurePoints;
60 + private final ImmutableSetMultimap<ClusterId, ConnectPoint> broadcastSets;
61 +
62 + private ImmutableMap<DeviceId, TopologyCluster> clustersByDevice;
63 + private ImmutableSetMultimap<TopologyCluster, DeviceId> devicesByCluster;
64 + private ImmutableSetMultimap<TopologyCluster, Link> linksByCluster;
65 +
66 +
67 + /**
68 + * Creates a topology descriptor attributed to the specified provider.
69 + *
70 + * @param providerId identity of the provider
71 + * @param description data describing the new topology
72 + */
73 + DefaultTopology(ProviderId providerId, GraphDescription description) {
74 + super(providerId);
75 + this.time = description.timestamp();
76 +
77 + // Build the graph
78 + this.graph = new DefaultTopologyGraph(description.vertexes(),
79 + description.edges());
80 +
81 + this.results = searchForShortestPaths();
82 + this.paths = buildPaths();
83 +
84 + this.clusterResults = searchForClusters();
85 + this.clusters = buildTopologyClusters();
86 +
87 + buildIndexes();
88 +
89 + this.broadcastSets = buildBroadcastSets();
90 + this.infrastructurePoints = findInfrastructurePoints();
91 + }
92 +
93 + @Override
94 + public long time() {
95 + return time;
96 + }
97 +
98 + @Override
99 + public int clusterCount() {
100 + return clusters.size();
101 + }
102 +
103 + @Override
104 + public int deviceCount() {
105 + return graph.getVertexes().size();
106 + }
107 +
108 + @Override
109 + public int linkCount() {
110 + return graph.getEdges().size();
111 + }
112 +
113 + @Override
114 + public int pathCount() {
115 + return paths.size();
116 + }
117 +
118 + /**
119 + * Returns the backing topology graph.
120 + *
121 + * @return topology graph
122 + */
123 + TopologyGraph getGraph() {
124 + return graph;
125 + }
126 +
127 + /**
128 + * Returns the set of topology clusters.
129 + *
130 + * @return set of clusters
131 + */
132 + Set<TopologyCluster> getClusters() {
133 + return ImmutableSet.copyOf(clusters.values());
134 + }
135 +
136 + /**
137 + * Returns the specified topology cluster.
138 + *
139 + * @param clusterId cluster identifier
140 + * @return topology cluster
141 + */
142 + TopologyCluster getCluster(ClusterId clusterId) {
143 + return clusters.get(clusterId);
144 + }
145 +
146 + /**
147 + * Returns the topology cluster that contains the given device.
148 + *
149 + * @param deviceId device identifier
150 + * @return topology cluster
151 + */
152 + TopologyCluster getCluster(DeviceId deviceId) {
153 + return clustersByDevice.get(deviceId);
154 + }
155 +
156 + /**
157 + * Returns the set of cluster devices.
158 + *
159 + * @param cluster topology cluster
160 + * @return cluster devices
161 + */
162 + Set<DeviceId> getClusterDevices(TopologyCluster cluster) {
163 + return devicesByCluster.get(cluster);
164 + }
165 +
166 + /**
167 + * Returns the set of cluster links.
168 + *
169 + * @param cluster topology cluster
170 + * @return cluster links
171 + */
172 + Set<Link> getClusterLinks(TopologyCluster cluster) {
173 + return linksByCluster.get(cluster);
174 + }
175 +
176 + /**
177 + * Indicates whether the given point is an infrastructure link end-point.
178 + *
179 + * @param connectPoint connection point
180 + * @return true if infrastructure
181 + */
182 + boolean isInfrastructure(ConnectPoint connectPoint) {
183 + return infrastructurePoints.contains(connectPoint);
184 + }
185 +
186 + /**
187 + * Indicates whether the given point is part of a broadcast set.
188 + *
189 + * @param connectPoint connection point
190 + * @return true if in broadcast set
191 + */
192 + boolean isBroadcastPoint(ConnectPoint connectPoint) {
193 + // Any non-infrastructure, i.e. edge points are assumed to be OK.
194 + if (!isInfrastructure(connectPoint)) {
195 + return true;
196 + }
197 +
198 + // Find the cluster to which the device belongs.
199 + TopologyCluster cluster = clustersByDevice.get(connectPoint.deviceId());
200 + if (cluster == null) {
201 + throw new IllegalArgumentException("No cluster found for device " + connectPoint.deviceId());
202 + }
203 +
204 + // If the broadcast set is null or empty, or if the point explicitly
205 + // belongs to it, return true;
206 + Set<ConnectPoint> points = broadcastSets.get(cluster.id());
207 + return points == null || points.isEmpty() || points.contains(connectPoint);
208 + }
209 +
210 + /**
211 + * Returns the size of the cluster broadcast set.
212 + *
213 + * @param clusterId cluster identifier
214 + * @return size of the cluster broadcast set
215 + */
216 + int broadcastSetSize(ClusterId clusterId) {
217 + return broadcastSets.get(clusterId).size();
218 + }
219 +
220 + /**
221 + * Returns the set of pre-computed shortest paths between source and
222 + * destination devices.
223 + *
224 + * @param src source device
225 + * @param dst destination device
226 + * @return set of shortest paths
227 + */
228 + Set<Path> getPaths(DeviceId src, DeviceId dst) {
229 + return paths.get(new PathKey(src, dst));
230 + }
231 +
232 + /**
233 + * Computes on-demand the set of shortest paths between source and
234 + * destination devices.
235 + *
236 + * @param src source device
237 + * @param dst destination device
238 + * @return set of shortest paths
239 + */
240 + Set<Path> getPaths(DeviceId src, DeviceId dst, LinkWeight weight) {
241 + GraphPathSearch.Result<TopologyVertex, TopologyEdge> result =
242 + DIJKSTRA.search(graph, new DefaultTopologyVertex(src),
243 + new DefaultTopologyVertex(dst), weight);
244 + ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
245 + for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
246 + builder.add(networkPath(path));
247 + }
248 + return builder.build();
249 + }
250 +
251 +
252 + // Searches the graph for all shortest paths and returns the search results.
253 + private ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> searchForShortestPaths() {
254 + ImmutableMap.Builder<DeviceId, Result<TopologyVertex, TopologyEdge>> builder = ImmutableMap.builder();
255 +
256 + // Search graph paths for each source to all destinations.
257 + LinkWeight weight = new HopCountLinkWeight(graph.getVertexes().size());
258 + for (TopologyVertex src : graph.getVertexes()) {
259 + builder.put(src.deviceId(), DIJKSTRA.search(graph, src, null, weight));
260 + }
261 + return builder.build();
262 + }
263 +
264 + // Builds network paths from the graph path search results
265 + private ImmutableSetMultimap<PathKey, Path> buildPaths() {
266 + Builder<PathKey, Path> builder = ImmutableSetMultimap.builder();
267 + for (DeviceId deviceId : results.keySet()) {
268 + Result<TopologyVertex, TopologyEdge> result = results.get(deviceId);
269 + for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
270 + builder.put(new PathKey(path.src().deviceId(), path.dst().deviceId()),
271 + networkPath(path));
272 + }
273 + }
274 + return builder.build();
275 + }
276 +
277 + // Converts graph path to a network path with the same cost.
278 + private Path networkPath(org.onlab.graph.Path<TopologyVertex, TopologyEdge> path) {
279 + List<Link> links = new ArrayList<>();
280 + for (TopologyEdge edge : path.edges()) {
281 + links.add(edge.link());
282 + }
283 + return new DefaultPath(PID, links, path.cost());
284 + }
285 +
286 +
287 + // Searches for SCC clusters in the network topology graph using Tarjan
288 + // algorithm.
289 + private SCCResult<TopologyVertex, TopologyEdge> searchForClusters() {
290 + return TARJAN.search(graph, new NoIndirectLinksWeight());
291 + }
292 +
293 + // Builds the topology clusters and returns the id-cluster bindings.
294 + private ImmutableMap<ClusterId, TopologyCluster> buildTopologyClusters() {
295 + ImmutableMap.Builder<ClusterId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
296 + SCCResult<TopologyVertex, TopologyEdge> result =
297 + TARJAN.search(graph, new NoIndirectLinksWeight());
298 +
299 + // Extract both vertexes and edges from the results; the lists form
300 + // pairs along the same index.
301 + List<Set<TopologyVertex>> clusterVertexes = result.clusterVertexes();
302 + List<Set<TopologyEdge>> clusterEdges = result.clusterEdges();
303 +
304 + // Scan over the lists and create a cluster from the results.
305 + for (int i = 0, n = result.clusterCount(); i < n; i++) {
306 + Set<TopologyVertex> vertexSet = clusterVertexes.get(i);
307 + Set<TopologyEdge> edgeSet = clusterEdges.get(i);
308 +
309 + ClusterId cid = ClusterId.clusterId(i);
310 + DefaultTopologyCluster cluster =
311 + new DefaultTopologyCluster(cid, vertexSet.size(), edgeSet.size(),
312 + findRoot(vertexSet).deviceId());
313 + clusterBuilder.put(cid, cluster);
314 + }
315 + return clusterBuilder.build();
316 + }
317 +
318 + // Finds the vertex whose device id is the lexicographical minimum in the
319 + // specified set.
320 + private TopologyVertex findRoot(Set<TopologyVertex> vertexSet) {
321 + TopologyVertex minVertex = null;
322 + for (TopologyVertex vertex : vertexSet) {
323 + if (minVertex == null ||
324 + minVertex.deviceId().toString()
325 + .compareTo(minVertex.deviceId().toString()) < 0) {
326 + minVertex = vertex;
327 + }
328 + }
329 + return minVertex;
330 + }
331 +
332 + // Processes a map of broadcast sets for each cluster.
333 + private ImmutableSetMultimap<ClusterId, ConnectPoint> buildBroadcastSets() {
334 + Builder<ClusterId, ConnectPoint> builder = ImmutableSetMultimap.builder();
335 + for (TopologyCluster cluster : clusters.values()) {
336 + addClusterBroadcastSet(cluster, builder);
337 + }
338 + return builder.build();
339 + }
340 +
341 + // Finds all broadcast points for the cluster. These are those connection
342 + // points which lie along the shortest paths between the cluster root and
343 + // all other devices within the cluster.
344 + private void addClusterBroadcastSet(TopologyCluster cluster,
345 + Builder<ClusterId, ConnectPoint> builder) {
346 + // Use the graph root search results to build the broadcast set.
347 + Result<TopologyVertex, TopologyEdge> result = results.get(cluster.root());
348 + for (Map.Entry<TopologyVertex, Set<TopologyEdge>> entry : result.parents().entrySet()) {
349 + TopologyVertex vertex = entry.getKey();
350 +
351 + // Ignore any parents that lead outside the cluster.
352 + if (clustersByDevice.get(vertex.deviceId()) != cluster) {
353 + continue;
354 + }
355 +
356 + // Ignore any back-link sets that are empty.
357 + Set<TopologyEdge> parents = entry.getValue();
358 + if (parents.isEmpty()) {
359 + continue;
360 + }
361 +
362 + // Use the first back-link source and destinations to add to the
363 + // broadcast set.
364 + Link link = parents.iterator().next().link();
365 + builder.put(cluster.id(), link.src());
366 + builder.put(cluster.id(), link.dst());
367 + }
368 + }
369 +
370 + // Collects and returns an set of all infrastructure link end-points.
371 + private ImmutableSet<ConnectPoint> findInfrastructurePoints() {
372 + ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder();
373 + for (TopologyEdge edge : graph.getEdges()) {
374 + builder.add(edge.link().src());
375 + builder.add(edge.link().dst());
376 + }
377 + return builder.build();
378 + }
379 +
380 + // Builds cluster-devices, cluster-links and device-cluster indexes.
381 + private void buildIndexes() {
382 + // Prepare the index builders
383 + ImmutableMap.Builder<DeviceId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
384 + ImmutableSetMultimap.Builder<TopologyCluster, DeviceId> devicesBuilder = ImmutableSetMultimap.builder();
385 + ImmutableSetMultimap.Builder<TopologyCluster, Link> linksBuilder = ImmutableSetMultimap.builder();
386 +
387 + // Now scan through all the clusters
388 + for (TopologyCluster cluster : clusters.values()) {
389 + int i = cluster.id().index();
390 +
391 + // Scan through all the cluster vertexes.
392 + for (TopologyVertex vertex : clusterResults.clusterVertexes().get(i)) {
393 + devicesBuilder.put(cluster, vertex.deviceId());
394 + clusterBuilder.put(vertex.deviceId(), cluster);
395 + }
396 +
397 + // Scan through all the cluster edges.
398 + for (TopologyEdge edge : clusterResults.clusterEdges().get(i)) {
399 + linksBuilder.put(cluster, edge.link());
400 + }
401 + }
402 +
403 + // Finalize all indexes.
404 + clustersByDevice = clusterBuilder.build();
405 + devicesByCluster = devicesBuilder.build();
406 + linksByCluster = linksBuilder.build();
407 + }
408 +
409 + // Link weight for measuring link cost as hop count with indirect links
410 + // being as expensive as traversing the entire graph to assume the worst.
411 + private static class HopCountLinkWeight implements LinkWeight {
412 + private final int indirectLinkCost;
413 +
414 + HopCountLinkWeight(int indirectLinkCost) {
415 + this.indirectLinkCost = indirectLinkCost;
416 + }
417 +
418 + @Override
419 + public double weight(TopologyEdge edge) {
420 + // To force preference to use direct paths first, make indirect
421 + // links as expensive as the linear vertex traversal.
422 + return edge.link().type() == INDIRECT ? indirectLinkCost : 1;
423 + }
424 + }
425 +
426 + // Link weight for preventing traversal over indirect links.
427 + private static class NoIndirectLinksWeight implements LinkWeight {
428 + @Override
429 + public double weight(TopologyEdge edge) {
430 + return edge.link().type() == INDIRECT ? -1 : 1;
431 + }
432 + }
433 +
434 + @Override
435 + public String toString() {
436 + return toStringHelper(this)
437 + .add("time", time)
438 + .add("clusters", clusterCount())
439 + .add("devices", deviceCount())
440 + .add("links", linkCount())
441 + .add("pathCount", pathCount())
442 + .toString();
443 + }
444 +}
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import org.onlab.graph.AdjacencyListsGraph;
4 +import org.onlab.onos.net.topology.TopologyEdge;
5 +import org.onlab.onos.net.topology.TopologyGraph;
6 +import org.onlab.onos.net.topology.TopologyVertex;
7 +
8 +import java.util.Set;
9 +
10 +/**
11 + * Default implementation of an immutable topology graph based on a generic
12 + * implementation of adjacency lists graph.
13 + */
14 +public class DefaultTopologyGraph
15 + extends AdjacencyListsGraph<TopologyVertex, TopologyEdge>
16 + implements TopologyGraph {
17 +
18 + /**
19 + * Creates a topology graph comprising of the specified vertexes and edges.
20 + *
21 + * @param vertexes set of graph vertexes
22 + * @param edges set of graph edges
23 + */
24 + public DefaultTopologyGraph(Set<TopologyVertex> vertexes, Set<TopologyEdge> edges) {
25 + super(vertexes, edges);
26 + }
27 +
28 +}
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import static org.slf4j.LoggerFactory.getLogger;
4 +
5 +import java.util.List;
6 +import java.util.Set;
7 +
8 +import org.apache.felix.scr.annotations.Activate;
9 +import org.apache.felix.scr.annotations.Component;
10 +import org.apache.felix.scr.annotations.Deactivate;
11 +import org.apache.felix.scr.annotations.Service;
12 +import org.onlab.onos.event.Event;
13 +import org.onlab.onos.net.ConnectPoint;
14 +import org.onlab.onos.net.DeviceId;
15 +import org.onlab.onos.net.Link;
16 +import org.onlab.onos.net.Path;
17 +import org.onlab.onos.net.provider.ProviderId;
18 +import org.onlab.onos.net.topology.ClusterId;
19 +import org.onlab.onos.net.topology.GraphDescription;
20 +import org.onlab.onos.net.topology.LinkWeight;
21 +import org.onlab.onos.net.topology.Topology;
22 +import org.onlab.onos.net.topology.TopologyCluster;
23 +import org.onlab.onos.net.topology.TopologyEvent;
24 +import org.onlab.onos.net.topology.TopologyGraph;
25 +import org.onlab.onos.net.topology.TopologyStore;
26 +import org.onlab.onos.net.topology.TopologyStoreDelegate;
27 +import org.onlab.onos.store.AbstractStore;
28 +import org.slf4j.Logger;
29 +
30 +/**
31 + * Manages inventory of topology snapshots using trivial in-memory
32 + * structures implementation.
33 + */
34 +//FIXME: I LIE I AM NOT DISTRIBUTED
35 +@Component(immediate = true)
36 +@Service
37 +public class DistributedTopologyStore
38 +extends AbstractStore<TopologyEvent, TopologyStoreDelegate>
39 +implements TopologyStore {
40 +
41 + private final Logger log = getLogger(getClass());
42 +
43 + private volatile DefaultTopology current;
44 +
45 + @Activate
46 + public void activate() {
47 + log.info("Started");
48 + }
49 +
50 + @Deactivate
51 + public void deactivate() {
52 + log.info("Stopped");
53 + }
54 + @Override
55 + public Topology currentTopology() {
56 + return current;
57 + }
58 +
59 + @Override
60 + public boolean isLatest(Topology topology) {
61 + // Topology is current only if it is the same as our current topology
62 + return topology == current;
63 + }
64 +
65 + @Override
66 + public TopologyGraph getGraph(Topology topology) {
67 + return defaultTopology(topology).getGraph();
68 + }
69 +
70 + @Override
71 + public Set<TopologyCluster> getClusters(Topology topology) {
72 + return defaultTopology(topology).getClusters();
73 + }
74 +
75 + @Override
76 + public TopologyCluster getCluster(Topology topology, ClusterId clusterId) {
77 + return defaultTopology(topology).getCluster(clusterId);
78 + }
79 +
80 + @Override
81 + public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) {
82 + return defaultTopology(topology).getClusterDevices(cluster);
83 + }
84 +
85 + @Override
86 + public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) {
87 + return defaultTopology(topology).getClusterLinks(cluster);
88 + }
89 +
90 + @Override
91 + public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) {
92 + return defaultTopology(topology).getPaths(src, dst);
93 + }
94 +
95 + @Override
96 + public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst,
97 + LinkWeight weight) {
98 + return defaultTopology(topology).getPaths(src, dst, weight);
99 + }
100 +
101 + @Override
102 + public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) {
103 + return defaultTopology(topology).isInfrastructure(connectPoint);
104 + }
105 +
106 + @Override
107 + public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
108 + return defaultTopology(topology).isBroadcastPoint(connectPoint);
109 + }
110 +
111 + @Override
112 + public TopologyEvent updateTopology(ProviderId providerId,
113 + GraphDescription graphDescription,
114 + List<Event> reasons) {
115 + // First off, make sure that what we're given is indeed newer than
116 + // what we already have.
117 + if (current != null && graphDescription.timestamp() < current.time()) {
118 + return null;
119 + }
120 +
121 + // Have the default topology construct self from the description data.
122 + DefaultTopology newTopology =
123 + new DefaultTopology(providerId, graphDescription);
124 +
125 + // Promote the new topology to current and return a ready-to-send event.
126 + synchronized (this) {
127 + current = newTopology;
128 + return new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED, current);
129 + }
130 + }
131 +
132 + // Validates the specified topology and returns it as a default
133 + private DefaultTopology defaultTopology(Topology topology) {
134 + if (topology instanceof DefaultTopology) {
135 + return (DefaultTopology) topology;
136 + }
137 + throw new IllegalArgumentException("Topology class " + topology.getClass() +
138 + " not supported");
139 + }
140 +
141 +}
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import org.onlab.onos.net.DeviceId;
4 +
5 +import java.util.Objects;
6 +
7 +/**
8 + * Key for filing pre-computed paths between source and destination devices.
9 + */
10 +class PathKey {
11 + private final DeviceId src;
12 + private final DeviceId dst;
13 +
14 + /**
15 + * Creates a path key from the given source/dest pair.
16 + * @param src source device
17 + * @param dst destination device
18 + */
19 + PathKey(DeviceId src, DeviceId dst) {
20 + this.src = src;
21 + this.dst = dst;
22 + }
23 +
24 + @Override
25 + public int hashCode() {
26 + return Objects.hash(src, dst);
27 + }
28 +
29 + @Override
30 + public boolean equals(Object obj) {
31 + if (this == obj) {
32 + return true;
33 + }
34 + if (obj instanceof PathKey) {
35 + final PathKey other = (PathKey) obj;
36 + return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst);
37 + }
38 + return false;
39 + }
40 +}
...@@ -93,13 +93,17 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -93,13 +93,17 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
93 93
94 @Override 94 @Override
95 public final void sendMsg(OFMessage m) { 95 public final void sendMsg(OFMessage m) {
96 + if (role == RoleState.MASTER) {
96 this.write(m); 97 this.write(m);
97 } 98 }
99 + }
98 100
99 @Override 101 @Override
100 public final void sendMsg(List<OFMessage> msgs) { 102 public final void sendMsg(List<OFMessage> msgs) {
103 + if (role == RoleState.MASTER) {
101 this.write(msgs); 104 this.write(msgs);
102 } 105 }
106 + }
103 107
104 @Override 108 @Override
105 public abstract void write(OFMessage msg); 109 public abstract void write(OFMessage msg);
...@@ -164,8 +168,10 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -164,8 +168,10 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
164 */ 168 */
165 @Override 169 @Override
166 public final void handleMessage(OFMessage m) { 170 public final void handleMessage(OFMessage m) {
171 + if (this.role == RoleState.MASTER) {
167 this.agent.processMessage(dpid, m); 172 this.agent.processMessage(dpid, m);
168 } 173 }
174 + }
169 175
170 @Override 176 @Override
171 public RoleState getRole() { 177 public RoleState getRole() {
...@@ -226,19 +232,34 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -226,19 +232,34 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
226 @Override 232 @Override
227 public abstract void processDriverHandshakeMessage(OFMessage m); 233 public abstract void processDriverHandshakeMessage(OFMessage m);
228 234
235 +
236 + // Role Handling
237 +
229 @Override 238 @Override
230 public void setRole(RoleState role) { 239 public void setRole(RoleState role) {
231 try { 240 try {
232 - log.info("Sending role {} to switch {}", role, getStringId());
233 if (this.roleMan.sendRoleRequest(role, RoleRecvStatus.MATCHED_SET_ROLE)) { 241 if (this.roleMan.sendRoleRequest(role, RoleRecvStatus.MATCHED_SET_ROLE)) {
242 + log.info("Sending role {} to switch {}", role, getStringId());
243 + if (role == RoleState.SLAVE || role == RoleState.EQUAL) {
234 this.role = role; 244 this.role = role;
235 } 245 }
246 + }
236 } catch (IOException e) { 247 } catch (IOException e) {
237 log.error("Unable to write to switch {}.", this.dpid); 248 log.error("Unable to write to switch {}.", this.dpid);
238 } 249 }
239 } 250 }
240 251
241 - // Role Handling 252 + @Override
253 + public void reassertRole() {
254 + if (this.getRole() == RoleState.MASTER) {
255 + log.warn("Received permission error from switch {} while " +
256 + "being master. Reasserting master role.",
257 + this.getStringId());
258 + this.setRole(RoleState.MASTER);
259 + }
260 + }
261 +
262 +
242 263
243 @Override 264 @Override
244 public void handleRole(OFMessage m) throws SwitchStateException { 265 public void handleRole(OFMessage m) throws SwitchStateException {
...@@ -246,11 +267,15 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -246,11 +267,15 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
246 RoleRecvStatus rrs = roleMan.deliverRoleReply(rri); 267 RoleRecvStatus rrs = roleMan.deliverRoleReply(rri);
247 if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) { 268 if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) {
248 if (rri.getRole() == RoleState.MASTER) { 269 if (rri.getRole() == RoleState.MASTER) {
270 + this.role = rri.getRole();
249 this.transitionToMasterSwitch(); 271 this.transitionToMasterSwitch();
250 } else if (rri.getRole() == RoleState.EQUAL || 272 } else if (rri.getRole() == RoleState.EQUAL ||
251 - rri.getRole() == RoleState.MASTER) { 273 + rri.getRole() == RoleState.SLAVE) {
252 this.transitionToEqualSwitch(); 274 this.transitionToEqualSwitch();
253 } 275 }
276 + } else {
277 + return;
278 + //TODO: tell people that we failed.
254 } 279 }
255 } 280 }
256 281
...@@ -267,11 +292,15 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -267,11 +292,15 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
267 new RoleReplyInfo(r, null, m.getXid())); 292 new RoleReplyInfo(r, null, m.getXid()));
268 if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) { 293 if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) {
269 if (r == RoleState.MASTER) { 294 if (r == RoleState.MASTER) {
295 + this.role = r;
270 this.transitionToMasterSwitch(); 296 this.transitionToMasterSwitch();
271 } else if (r == RoleState.EQUAL || 297 } else if (r == RoleState.EQUAL ||
272 r == RoleState.SLAVE) { 298 r == RoleState.SLAVE) {
273 this.transitionToEqualSwitch(); 299 this.transitionToEqualSwitch();
274 } 300 }
301 + } else {
302 + return;
303 + //TODO: tell people that we failed.
275 } 304 }
276 } 305 }
277 306
...@@ -285,12 +314,7 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -285,12 +314,7 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
285 return true; 314 return true;
286 } 315 }
287 316
288 - @Override 317 +
289 - public void reassertRole() {
290 - if (this.getRole() == RoleState.MASTER) {
291 - this.setRole(RoleState.MASTER);
292 - }
293 - }
294 318
295 @Override 319 @Override
296 public final void setAgent(OpenFlowAgent ag) { 320 public final void setAgent(OpenFlowAgent ag) {
......
...@@ -521,9 +521,7 @@ class OFChannelHandler extends IdleStateAwareChannelHandler { ...@@ -521,9 +521,7 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
521 // if two controllers are master (even if its only for 521 // if two controllers are master (even if its only for
522 // a brief period). We might need to see if these errors 522 // a brief period). We might need to see if these errors
523 // persist before we reassert 523 // persist before we reassert
524 - log.warn("Received permission error from switch {} while" + 524 +
525 - "being master. Reasserting master role.",
526 - h.getSwitchInfoString());
527 h.sw.reassertRole(); 525 h.sw.reassertRole();
528 } else if (m.getErrType() == OFErrorType.FLOW_MOD_FAILED && 526 } else if (m.getErrType() == OFErrorType.FLOW_MOD_FAILED &&
529 ((OFFlowModFailedErrorMsg) m).getCode() == 527 ((OFFlowModFailedErrorMsg) m).getCode() ==
......
...@@ -142,9 +142,9 @@ class RoleManager implements RoleHandler { ...@@ -142,9 +142,9 @@ class RoleManager implements RoleHandler {
142 } 142 }
143 // OF1.0 switch with support for NX_ROLE_REQUEST vendor extn. 143 // OF1.0 switch with support for NX_ROLE_REQUEST vendor extn.
144 // make Role.EQUAL become Role.SLAVE 144 // make Role.EQUAL become Role.SLAVE
145 + pendingRole = role;
145 role = (role == RoleState.EQUAL) ? RoleState.SLAVE : role; 146 role = (role == RoleState.EQUAL) ? RoleState.SLAVE : role;
146 pendingXid = sendNxRoleRequest(role); 147 pendingXid = sendNxRoleRequest(role);
147 - pendingRole = role;
148 requestPending = true; 148 requestPending = true;
149 } else { 149 } else {
150 // OF1.3 switch, use OFPT_ROLE_REQUEST message 150 // OF1.3 switch, use OFPT_ROLE_REQUEST message
......
1 #!/usr/bin/python 1 #!/usr/bin/python
2 import sys, solar 2 import sys, solar
3 -topo = solar.Solar(cip=sys.argv[1]) 3 +topo = solar.Solar(cips=sys.argv[1:])
4 topo.run() 4 topo.run()
......
...@@ -17,22 +17,22 @@ class CustomCLI(CLI): ...@@ -17,22 +17,22 @@ class CustomCLI(CLI):
17 class Solar(object): 17 class Solar(object):
18 """ Create a tiered topology from semi-scratch in Mininet """ 18 """ Create a tiered topology from semi-scratch in Mininet """
19 19
20 - def __init__(self, cname='onos', cip='192.168.56.1', islands=3, edges=2, hosts=2, 20 + def __init__(self, cname='onos', cips=['192.168.56.1'], islands=3, edges=2, hosts=2):
21 - proto=None):
22 """Create tower topology for mininet""" 21 """Create tower topology for mininet"""
23 22
24 # We are creating the controller with local-loopback on purpose to avoid 23 # We are creating the controller with local-loopback on purpose to avoid
25 # having the switches connect immediately. Instead, we'll set controller 24 # having the switches connect immediately. Instead, we'll set controller
26 # explicitly for each switch after configuring it as we want. 25 # explicitly for each switch after configuring it as we want.
27 - self.flare = RemoteController(cname, cip, 6633) 26 + self.ctrls = [ RemoteController(cname, cip, 6633) for cip in cips ]
28 - self.net = Mininet(controller=self.flare, switch = OVSKernelSwitch, 27 + self.net = Mininet(controller=RemoteController, switch = OVSKernelSwitch,
29 build=False) 28 build=False)
30 29
31 - self.cip = cip 30 + self.cips = cips
32 self.spines = [] 31 self.spines = []
33 self.leaves = [] 32 self.leaves = []
34 self.hosts = [] 33 self.hosts = []
35 - self.proto = proto 34 + for ctrl in self.ctrls:
35 + self.net.addController(ctrl)
36 36
37 # Create the two core switches and links between them 37 # Create the two core switches and links between them
38 c1 = self.net.addSwitch('c1',dpid='1111000000000000') 38 c1 = self.net.addSwitch('c1',dpid='1111000000000000')
...@@ -83,29 +83,11 @@ class Solar(object): ...@@ -83,29 +83,11 @@ class Solar(object):
83 83
84 def run(self): 84 def run(self):
85 """ Runs the created network topology and launches mininet cli""" 85 """ Runs the created network topology and launches mininet cli"""
86 - self.run_silent() 86 + self.net.build()
87 + self.net.start()
87 CustomCLI(self.net) 88 CustomCLI(self.net)
88 self.net.stop() 89 self.net.stop()
89 90
90 - def run_silent(self):
91 - """ Runs silently - for unit testing """
92 - self.net.build()
93 -
94 - # Start the switches, configure them with desired protocols and only
95 - # then set the controller
96 - for sw in self.spines:
97 - sw.start([self.flare])
98 - if self.proto:
99 - sw.cmd('ovs-vsctl set bridge %(sw)s protocols=%(proto)s' % \
100 - { 'sw': sw.name, 'proto': self.proto})
101 - sw.cmdPrint('ovs-vsctl set-controller %(sw)s tcp:%(ctl)s:6633' % \
102 - {'sw': sw.name, 'ctl': self.cip})
103 -
104 - for sw in self.leaves:
105 - sw.start([self.flare])
106 - sw.cmdPrint('ovs-vsctl set-controller %(sw)s tcp:%(ctl)s:6633' % \
107 - {'sw': sw.name, 'ctl': self.cip})
108 -
109 def pingAll(self): 91 def pingAll(self):
110 """ PingAll to create flows - for unit testing """ 92 """ PingAll to create flows - for unit testing """
111 self.net.pingAll() 93 self.net.pingAll()
......