Thomas Vachuska
Committed by Gerrit Code Review

Removing hazelcat dependency throughout.

Change-Id: I738050fda142418d2956f613035892dac82ef098
Showing 30 changed files with 34 additions and 5510 deletions
...@@ -71,7 +71,7 @@ ...@@ -71,7 +71,7 @@
71 <dependency> 71 <dependency>
72 <groupId>org.mapdb</groupId> 72 <groupId>org.mapdb</groupId>
73 <artifactId>mapdb</artifactId> 73 <artifactId>mapdb</artifactId>
74 - <version>1.0.7</version> 74 + <version>1.0.7</version>
75 </dependency> 75 </dependency>
76 76
77 <dependency> 77 <dependency>
...@@ -93,31 +93,21 @@ ...@@ -93,31 +93,21 @@
93 <artifactId>commons-lang3</artifactId> 93 <artifactId>commons-lang3</artifactId>
94 </dependency> 94 </dependency>
95 <dependency> 95 <dependency>
96 - <groupId>org.easymock</groupId> 96 + <groupId>org.easymock</groupId>
97 - <artifactId>easymock</artifactId> 97 + <artifactId>easymock</artifactId>
98 - <scope>test</scope> 98 + <scope>test</scope>
99 - </dependency>
100 - <dependency>
101 - <groupId>org.onosproject</groupId>
102 - <artifactId>onos-api</artifactId>
103 - <classifier>tests</classifier>
104 - <scope>test</scope>
105 - </dependency>
106 - <dependency>
107 - <groupId>com.hazelcast</groupId>
108 - <artifactId>hazelcast</artifactId>
109 </dependency> 99 </dependency>
110 <dependency> 100 <dependency>
111 - <groupId>com.hazelcast</groupId> 101 + <groupId>org.onosproject</groupId>
112 - <artifactId>hazelcast</artifactId> 102 + <artifactId>onos-api</artifactId>
113 - <classifier>tests</classifier> 103 + <classifier>tests</classifier>
114 - <scope>test</scope> 104 + <scope>test</scope>
115 </dependency> 105 </dependency>
116 106
117 <!-- for shaded copycat --> 107 <!-- for shaded copycat -->
118 <dependency> 108 <dependency>
119 - <groupId>org.onosproject</groupId> 109 + <groupId>org.onosproject</groupId>
120 - <artifactId>onlab-thirdparty</artifactId> 110 + <artifactId>onlab-thirdparty</artifactId>
121 </dependency> 111 </dependency>
122 </dependencies> 112 </dependencies>
123 113
......
1 package org.onosproject.store.cluster.impl; 1 package org.onosproject.store.cluster.impl;
2 2
3 -import static com.hazelcast.util.AddressUtil.matchInterface;
4 import static java.net.NetworkInterface.getNetworkInterfaces; 3 import static java.net.NetworkInterface.getNetworkInterfaces;
5 import static java.util.Collections.list; 4 import static java.util.Collections.list;
6 import static org.onosproject.cluster.DefaultControllerNode.DEFAULT_PORT; 5 import static org.onosproject.cluster.DefaultControllerNode.DEFAULT_PORT;
...@@ -31,7 +30,6 @@ import org.slf4j.Logger; ...@@ -31,7 +30,6 @@ import org.slf4j.Logger;
31 30
32 import com.google.common.collect.ImmutableSet; 31 import com.google.common.collect.ImmutableSet;
33 import com.google.common.collect.Sets; 32 import com.google.common.collect.Sets;
34 -import com.hazelcast.util.AddressUtil;
35 33
36 /** 34 /**
37 * Implementation of ClusterDefinitionService. 35 * Implementation of ClusterDefinitionService.
...@@ -115,7 +113,7 @@ public class ClusterDefinitionManager implements ClusterDefinitionService { ...@@ -115,7 +113,7 @@ public class ClusterDefinitionManager implements ClusterDefinitionService {
115 Enumeration<InetAddress> inetAddresses = iface.getInetAddresses(); 113 Enumeration<InetAddress> inetAddresses = iface.getInetAddresses();
116 while (inetAddresses.hasMoreElements()) { 114 while (inetAddresses.hasMoreElements()) {
117 IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement()); 115 IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement());
118 - if (AddressUtil.matchInterface(ip.toString(), clusterDefinition.getIpPrefix())) { 116 + if (matchInterface(ip.toString(), clusterDefinition.getIpPrefix())) {
119 return ip; 117 return ip;
120 } 118 }
121 } 119 }
...@@ -169,4 +167,11 @@ public class ClusterDefinitionManager implements ClusterDefinitionService { ...@@ -169,4 +167,11 @@ public class ClusterDefinitionManager implements ClusterDefinitionService {
169 167
170 return IpAddress.valueOf(InetAddress.getLoopbackAddress()).toString(); 168 return IpAddress.valueOf(InetAddress.getLoopbackAddress()).toString();
171 } 169 }
170 +
171 + // Indicates whether the specified interface address matches the given prefix.
172 + // FIXME: Add a facility to IpPrefix to make this more robust
173 + private static boolean matchInterface(String ip, String ipPrefix) {
174 + String s = ipPrefix.replaceAll("\\.\\*", "");
175 + return ip.startsWith(s);
176 + }
172 } 177 }
......
1 -/*
2 - * Copyright 2014-2015 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.cluster.impl;
17 -
18 -import com.google.common.base.Optional;
19 -import com.google.common.cache.LoadingCache;
20 -import com.google.common.collect.ImmutableSet;
21 -import com.google.common.collect.Maps;
22 -import com.hazelcast.core.IMap;
23 -import com.hazelcast.core.Member;
24 -import com.hazelcast.core.MemberAttributeEvent;
25 -import com.hazelcast.core.MembershipEvent;
26 -import com.hazelcast.core.MembershipListener;
27 -import org.apache.felix.scr.annotations.Activate;
28 -import org.apache.felix.scr.annotations.Component;
29 -import org.apache.felix.scr.annotations.Deactivate;
30 -import org.apache.felix.scr.annotations.Service;
31 -import org.joda.time.DateTime;
32 -import org.onlab.packet.IpAddress;
33 -import org.onosproject.cluster.ClusterEvent;
34 -import org.onosproject.cluster.ClusterStore;
35 -import org.onosproject.cluster.ClusterStoreDelegate;
36 -import org.onosproject.cluster.ControllerNode;
37 -import org.onosproject.cluster.DefaultControllerNode;
38 -import org.onosproject.cluster.NodeId;
39 -import org.onosproject.store.hz.AbsentInvalidatingLoadingCache;
40 -import org.onosproject.store.hz.AbstractHazelcastStore;
41 -import org.onosproject.store.hz.OptionalCacheLoader;
42 -
43 -import java.util.Map;
44 -import java.util.Set;
45 -import java.util.concurrent.ConcurrentHashMap;
46 -
47 -import static com.google.common.cache.CacheBuilder.newBuilder;
48 -import static org.onosproject.cluster.ClusterEvent.Type.INSTANCE_ACTIVATED;
49 -import static org.onosproject.cluster.ClusterEvent.Type.INSTANCE_DEACTIVATED;
50 -import static org.onosproject.cluster.ControllerNode.State;
51 -
52 -/**
53 - * Distributed, Hazelcast-based implementation of the cluster nodes store.
54 - */
55 -@Component(immediate = true, enabled = false)
56 -@Service
57 -public class HazelcastClusterStore
58 - extends AbstractHazelcastStore<ClusterEvent, ClusterStoreDelegate>
59 - implements ClusterStore {
60 -
61 - private IMap<byte[], byte[]> rawNodes;
62 - private LoadingCache<NodeId, Optional<DefaultControllerNode>> nodes;
63 -
64 - private String listenerId;
65 - private final MembershipListener listener = new InternalMembershipListener();
66 - private final Map<NodeId, State> states = new ConcurrentHashMap<>();
67 - private final Map<NodeId, DateTime> lastUpdatedTimes = Maps.newConcurrentMap();
68 -
69 - private String nodesListenerId;
70 -
71 - @Override
72 - @Activate
73 - public void activate() {
74 - super.activate();
75 - listenerId = theInstance.getCluster().addMembershipListener(listener);
76 -
77 - rawNodes = theInstance.getMap("nodes");
78 - OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader
79 - = new OptionalCacheLoader<>(serializer, rawNodes);
80 - nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
81 - nodesListenerId = rawNodes.addEntryListener(new RemoteCacheEventHandler<>(nodes), true);
82 -
83 - loadClusterNodes();
84 -
85 - log.info("Started");
86 - }
87 -
88 - // Loads the initial set of cluster nodes
89 - private void loadClusterNodes() {
90 - for (Member member : theInstance.getCluster().getMembers()) {
91 - addNode(node(member));
92 - }
93 - }
94 -
95 - @Deactivate
96 - public void deactivate() {
97 - rawNodes.removeEntryListener(nodesListenerId);
98 - theInstance.getCluster().removeMembershipListener(listenerId);
99 - log.info("Stopped");
100 - }
101 -
102 - @Override
103 - public ControllerNode getLocalNode() {
104 - return node(theInstance.getCluster().getLocalMember());
105 - }
106 -
107 - @Override
108 - public Set<ControllerNode> getNodes() {
109 - ImmutableSet.Builder<ControllerNode> builder = ImmutableSet.builder();
110 - for (Optional<DefaultControllerNode> optional : nodes.asMap().values()) {
111 - builder.add(optional.get());
112 - }
113 - return builder.build();
114 - }
115 -
116 - @Override
117 - public ControllerNode getNode(NodeId nodeId) {
118 - return nodes.getUnchecked(nodeId).orNull();
119 - }
120 -
121 - @Override
122 - public State getState(NodeId nodeId) {
123 - State state = states.get(nodeId);
124 - return state == null ? State.INACTIVE : state;
125 - }
126 -
127 - @Override
128 - public DateTime getLastUpdated(NodeId nodeId) {
129 - return lastUpdatedTimes.get(nodeId);
130 - }
131 -
132 - @Override
133 - public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) {
134 - return addNode(new DefaultControllerNode(nodeId, ip, tcpPort));
135 - }
136 -
137 - @Override
138 - public void removeNode(NodeId nodeId) {
139 - synchronized (this) {
140 - rawNodes.remove(serialize(nodeId));
141 - nodes.invalidate(nodeId);
142 - }
143 - }
144 -
145 - // Adds a new node based on the specified member
146 - private synchronized ControllerNode addNode(DefaultControllerNode node) {
147 - rawNodes.put(serialize(node.id()), serialize(node));
148 - nodes.put(node.id(), Optional.of(node));
149 - updateState(node.id(), State.ACTIVE);
150 - return node;
151 - }
152 -
153 - // Creates a controller node descriptor from the Hazelcast member.
154 - private DefaultControllerNode node(Member member) {
155 - IpAddress ip = memberAddress(member);
156 - return new DefaultControllerNode(new NodeId(ip.toString()), ip);
157 - }
158 -
159 - private IpAddress memberAddress(Member member) {
160 - return IpAddress.valueOf(member.getSocketAddress().getAddress());
161 - }
162 -
163 - private void updateState(NodeId nodeId, State newState) {
164 - updateState(nodeId, newState);
165 - lastUpdatedTimes.put(nodeId, DateTime.now());
166 - }
167 -
168 - // Interceptor for membership events.
169 - private class InternalMembershipListener implements MembershipListener {
170 - @Override
171 - public void memberAdded(MembershipEvent membershipEvent) {
172 - log.info("Member {} added", membershipEvent.getMember());
173 - ControllerNode node = addNode(node(membershipEvent.getMember()));
174 - notifyDelegate(new ClusterEvent(INSTANCE_ACTIVATED, node));
175 - }
176 -
177 - @Override
178 - public void memberRemoved(MembershipEvent membershipEvent) {
179 - log.info("Member {} removed", membershipEvent.getMember());
180 - NodeId nodeId = new NodeId(memberAddress(membershipEvent.getMember()).toString());
181 - updateState(nodeId, State.INACTIVE);
182 - notifyDelegate(new ClusterEvent(INSTANCE_DEACTIVATED, getNode(nodeId)));
183 - }
184 -
185 - @Override
186 - public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
187 - log.info("Member {} attribute {} changed to {}",
188 - memberAttributeEvent.getMember(),
189 - memberAttributeEvent.getKey(),
190 - memberAttributeEvent.getValue());
191 - }
192 - }
193 -}
1 -/*
2 - * Copyright 2014-2015 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.cluster.impl;
17 -
18 -import com.google.common.collect.Maps;
19 -import com.hazelcast.config.TopicConfig;
20 -import com.hazelcast.core.IAtomicLong;
21 -import com.hazelcast.core.ILock;
22 -
23 -import org.apache.felix.scr.annotations.Activate;
24 -import org.apache.felix.scr.annotations.Component;
25 -import org.apache.felix.scr.annotations.Deactivate;
26 -import org.apache.felix.scr.annotations.Reference;
27 -import org.apache.felix.scr.annotations.ReferenceCardinality;
28 -import org.apache.felix.scr.annotations.Service;
29 -import org.onlab.util.KryoNamespace;
30 -import org.onosproject.cluster.ClusterService;
31 -import org.onosproject.cluster.Leadership;
32 -import org.onosproject.cluster.LeadershipEvent;
33 -import org.onosproject.cluster.LeadershipEventListener;
34 -import org.onosproject.cluster.LeadershipService;
35 -import org.onosproject.cluster.NodeId;
36 -import org.onosproject.event.ListenerRegistry;
37 -import org.onosproject.event.EventDeliveryService;
38 -import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
39 -import org.onosproject.store.cluster.messaging.ClusterMessage;
40 -import org.onosproject.store.cluster.messaging.ClusterMessageHandler;
41 -import org.onosproject.store.cluster.messaging.MessageSubject;
42 -import org.onosproject.store.hz.StoreService;
43 -import org.onosproject.store.serializers.KryoNamespaces;
44 -import org.onosproject.store.serializers.KryoSerializer;
45 -import org.slf4j.Logger;
46 -import org.slf4j.LoggerFactory;
47 -
48 -import java.util.HashMap;
49 -import java.util.List;
50 -import java.util.Map;
51 -import java.util.Set;
52 -import java.util.concurrent.CompletableFuture;
53 -import java.util.concurrent.ExecutorService;
54 -import java.util.concurrent.Executors;
55 -import java.util.concurrent.Future;
56 -import java.util.stream.Collectors;
57 -
58 -import static com.google.common.base.Preconditions.checkArgument;
59 -import static org.onlab.util.Tools.groupedThreads;
60 -
61 -/**
62 - * Distributed implementation of LeadershipService that is based on Hazelcast.
63 - * <p>
64 - * The election is eventually-consistent: if there is Hazelcast partitioning,
65 - * and the partitioning is healed, there could be a short window of time
66 - * until the leaders in each partition discover each other. If this happens,
67 - * the leaders release the leadership and run again for election.
68 - * </p>
69 - * <p>
70 - * The leader election is based on Hazelcast's Global Lock, which is stongly
71 - * consistent. In addition, each leader periodically advertises events
72 - * (using a Hazelcast Topic) that it is the elected leader. Those events are
73 - * used for two purposes: (1) Discover multi-leader collisions (in case of
74 - * healed Hazelcast partitions), and (2) Inform all listeners who is
75 - * the current leader (e.g., for informational purpose).
76 - * </p>
77 - */
78 -@Component(immediate = true, enabled = false)
79 -@Service
80 -public class HazelcastLeadershipService implements LeadershipService {
81 - private static final Logger log =
82 - LoggerFactory.getLogger(HazelcastLeadershipService.class);
83 -
84 - private static final KryoSerializer SERIALIZER = new KryoSerializer() {
85 - @Override
86 - protected void setupKryoPool() {
87 - serializerPool = KryoNamespace.newBuilder()
88 - .register(KryoNamespaces.API)
89 - .build()
90 - .populate(1);
91 - }
92 - };
93 -
94 - private static final long LEADERSHIP_PERIODIC_INTERVAL_MS = 5 * 1000; // 5s
95 - private static final long LEADERSHIP_REMOTE_TIMEOUT_MS = 15 * 1000; // 15s
96 - private static final String TOPIC_HZ_ID = "LeadershipService/AllTopics";
97 -
98 - // indicates there is no term value yet
99 - private static final long NO_TERM = 0;
100 -
101 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
102 - protected ClusterCommunicationService clusterCommunicator;
103 -
104 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
105 - protected ClusterService clusterService;
106 -
107 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
108 - protected StoreService storeService;
109 -
110 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
111 - protected EventDeliveryService eventDispatcher;
112 -
113 - private ListenerRegistry<LeadershipEvent, LeadershipEventListener>
114 - listenerRegistry;
115 - private final Map<String, Topic> topics = Maps.newConcurrentMap();
116 - private NodeId localNodeId;
117 -
118 - private static final MessageSubject LEADERSHIP_EVENT_MESSAGE_SUBJECT =
119 - new MessageSubject("hz-leadership-events");
120 -
121 - private ExecutorService messageHandlingExecutor;
122 -
123 - @Activate
124 - protected void activate() {
125 - localNodeId = clusterService.getLocalNode().id();
126 - listenerRegistry = new ListenerRegistry<>();
127 - eventDispatcher.addSink(LeadershipEvent.class, listenerRegistry);
128 -
129 - TopicConfig topicConfig = new TopicConfig();
130 - topicConfig.setGlobalOrderingEnabled(true);
131 - topicConfig.setName(TOPIC_HZ_ID);
132 - storeService.getHazelcastInstance().getConfig().addTopicConfig(topicConfig);
133 -
134 - messageHandlingExecutor = Executors.newSingleThreadExecutor(
135 - groupedThreads("onos/store/leadership", "message-handler"));
136 -
137 - clusterCommunicator.addSubscriber(
138 - LEADERSHIP_EVENT_MESSAGE_SUBJECT,
139 - new InternalLeadershipEventListener(),
140 - messageHandlingExecutor);
141 -
142 - log.info("Hazelcast Leadership Service started");
143 - }
144 -
145 - @Deactivate
146 - protected void deactivate() {
147 - eventDispatcher.removeSink(LeadershipEvent.class);
148 - messageHandlingExecutor.shutdown();
149 - clusterCommunicator.removeSubscriber(LEADERSHIP_EVENT_MESSAGE_SUBJECT);
150 -
151 - for (Topic topic : topics.values()) {
152 - topic.stop();
153 - }
154 - topics.clear();
155 -
156 - log.info("Hazelcast Leadership Service stopped");
157 - }
158 -
159 - @Override
160 - public NodeId getLeader(String path) {
161 - Topic topic = topics.get(path);
162 - if (topic == null) {
163 - return null;
164 - }
165 - return topic.leader();
166 - }
167 -
168 - @Override
169 - public Leadership getLeadership(String path) {
170 - checkArgument(path != null);
171 - Topic topic = topics.get(path);
172 - if (topic != null) {
173 - return new Leadership(topic.topicName(),
174 - topic.leader(),
175 - topic.term(),
176 - 0);
177 - }
178 - return null;
179 - }
180 -
181 - @Override
182 - public Set<String> ownedTopics(NodeId nodeId) {
183 - checkArgument(nodeId != null);
184 - return topics.values()
185 - .stream()
186 - .filter(topic -> nodeId.equals(topic.leader()))
187 - .map(topic -> topic.topicName)
188 - .collect(Collectors.toSet());
189 - }
190 -
191 - @Override
192 - public CompletableFuture<Leadership> runForLeadership(String path) {
193 - checkArgument(path != null);
194 - Topic topic = new Topic(path);
195 - Topic oldTopic = topics.putIfAbsent(path, topic);
196 - if (oldTopic == null) {
197 - topic.start();
198 - topic.runForLeadership();
199 - } else {
200 - oldTopic.runForLeadership();
201 - }
202 - return CompletableFuture.completedFuture(getLeadership(path));
203 - }
204 -
205 - @Override
206 - public CompletableFuture<Void> withdraw(String path) {
207 - checkArgument(path != null);
208 - Topic topic = topics.get(path);
209 - if (topic != null) {
210 - topics.remove(path, topic);
211 - topic.stop();
212 - }
213 - return CompletableFuture.completedFuture(null);
214 - }
215 -
216 - @Override
217 - public Map<String, Leadership> getLeaderBoard() {
218 - Map<String, Leadership> result = new HashMap<>();
219 -
220 - for (Topic topic : topics.values()) {
221 - Leadership leadership = new Leadership(topic.topicName(),
222 - topic.leader(),
223 - topic.term(),
224 - 0);
225 - result.put(topic.topicName(), leadership);
226 - }
227 - return result;
228 - }
229 -
230 - @Override
231 - public void addListener(LeadershipEventListener listener) {
232 - listenerRegistry.addListener(listener);
233 - }
234 -
235 - @Override
236 - public void removeListener(LeadershipEventListener listener) {
237 - listenerRegistry.removeListener(listener);
238 - }
239 -
240 - /**
241 - * Class for keeping per-topic information.
242 - */
243 - private final class Topic {
244 - private final String topicName;
245 - private volatile boolean isShutdown = true;
246 - private volatile boolean isRunningForLeadership = false;
247 - private volatile long lastLeadershipUpdateMs = 0;
248 - private ExecutorService leaderElectionExecutor;
249 -
250 - private volatile IAtomicLong term;
251 - // This is local state, recording the term number for the last time
252 - // this instance was leader for this topic. The current term could be
253 - // higher if the mastership has changed any times.
254 - private long myLastLeaderTerm = NO_TERM;
255 -
256 - private volatile NodeId leader;
257 - private ILock leaderLock;
258 - private Future<?> getLockFuture;
259 - private Future<?> periodicProcessingFuture;
260 -
261 - /**
262 - * Constructor.
263 - *
264 - * @param topicName the topic name
265 - */
266 - private Topic(String topicName) {
267 - this.topicName = topicName;
268 - }
269 -
270 - /**
271 - * Gets the topic name.
272 - *
273 - * @return the topic name
274 - */
275 - private String topicName() {
276 - return topicName;
277 - }
278 -
279 - /**
280 - * Gets the leader for the topic.
281 - *
282 - * @return the leader for the topic
283 - */
284 - private NodeId leader() {
285 - return leader;
286 - }
287 -
288 - /**
289 - * Gets the current term for the topic.
290 - *
291 - * @return the term for the topic
292 - */
293 - private long term() {
294 - if (term == null) {
295 - return NO_TERM;
296 - }
297 - return term.get();
298 - }
299 -
300 - /**
301 - * Starts operation.
302 - */
303 - private synchronized void start() {
304 - if (!isShutdown) {
305 - // already running
306 - return;
307 - }
308 - isShutdown = false;
309 - String threadPoolName = "election-" + topicName + "-%d";
310 - leaderElectionExecutor = Executors.newScheduledThreadPool(2,
311 - groupedThreads("onos/leadership", threadPoolName));
312 -
313 - periodicProcessingFuture =
314 - leaderElectionExecutor.submit(new Runnable() {
315 - @Override
316 - public void run() {
317 - doPeriodicProcessing();
318 - }
319 - });
320 - }
321 -
322 - /**
323 - * Runs for leadership.
324 - */
325 - private synchronized void runForLeadership() {
326 - if (isRunningForLeadership) {
327 - return; // Nothing to do: already running
328 - }
329 - if (isShutdown) {
330 - start();
331 - }
332 - isRunningForLeadership = true;
333 - String lockHzId = "LeadershipService/" + topicName + "/lock";
334 - String termHzId = "LeadershipService/" + topicName + "/term";
335 - leaderLock = storeService.getHazelcastInstance().getLock(lockHzId);
336 - term = storeService.getHazelcastInstance().getAtomicLong(termHzId);
337 -
338 - getLockFuture = leaderElectionExecutor.submit(new Runnable() {
339 - @Override
340 - public void run() {
341 - doLeaderElectionThread();
342 - }
343 - });
344 - }
345 -
346 - /**
347 - * Stops leadership election for the topic.
348 - */
349 - private synchronized void stop() {
350 - isShutdown = true;
351 - isRunningForLeadership = false;
352 - // getLockFuture.cancel(true);
353 - // periodicProcessingFuture.cancel(true);
354 - leaderElectionExecutor.shutdownNow();
355 - }
356 -
357 - /**
358 - * Received a Leadership Event.
359 - *
360 - * @param leadershipEvent the received Leadership Event
361 - */
362 - private void receivedLeadershipEvent(LeadershipEvent leadershipEvent) {
363 - NodeId eventLeaderId = leadershipEvent.subject().leader();
364 - if (!leadershipEvent.subject().topic().equals(topicName)) {
365 - return; // Not our topic: ignore
366 - }
367 - if (eventLeaderId.equals(localNodeId)) {
368 - return; // My own message: ignore
369 - }
370 -
371 - synchronized (this) {
372 - switch (leadershipEvent.type()) {
373 - case LEADER_ELECTED:
374 - // FALLTHROUGH
375 - case LEADER_REELECTED:
376 - //
377 - // Another leader: if we are also a leader, then give up
378 - // leadership and run for re-election.
379 - //
380 - if ((leader != null) && leader.equals(localNodeId)) {
381 - if (getLockFuture != null) {
382 - getLockFuture.cancel(true);
383 - }
384 - } else {
385 - // Just update the current leader
386 - leader = leadershipEvent.subject().leader();
387 - lastLeadershipUpdateMs = System.currentTimeMillis();
388 - }
389 - break;
390 - case LEADER_BOOTED:
391 - // Remove the state for the current leader
392 - if ((leader != null) && eventLeaderId.equals(leader)) {
393 - leader = null;
394 - }
395 - break;
396 - default:
397 - break;
398 - }
399 - }
400 - }
401 -
402 - private void doPeriodicProcessing() {
403 -
404 - while (!isShutdown) {
405 -
406 - //
407 - // Periodic tasks:
408 - // (a) Advertise ourselves as the leader
409 - // OR
410 - // (b) Expire a stale (remote) leader
411 - //
412 - synchronized (this) {
413 - LeadershipEvent leadershipEvent;
414 - if (leader != null) {
415 - if (leader.equals(localNodeId)) {
416 - //
417 - // Advertise ourselves as the leader
418 - //
419 - leadershipEvent = new LeadershipEvent(
420 - LeadershipEvent.Type.LEADER_REELECTED,
421 - new Leadership(topicName, localNodeId, myLastLeaderTerm, 0));
422 - // Dispatch to all instances
423 -
424 - clusterCommunicator.broadcastIncludeSelf(
425 - leadershipEvent,
426 - LEADERSHIP_EVENT_MESSAGE_SUBJECT,
427 - SERIALIZER::encode);
428 - } else {
429 - //
430 - // Test if time to expire a stale leader
431 - //
432 - long delta = System.currentTimeMillis() -
433 - lastLeadershipUpdateMs;
434 - if (delta > LEADERSHIP_REMOTE_TIMEOUT_MS) {
435 - log.debug("Topic {} leader {} booted due to heartbeat timeout",
436 - topicName, leader);
437 - leadershipEvent = new LeadershipEvent(
438 - LeadershipEvent.Type.LEADER_BOOTED,
439 - new Leadership(topicName, leader, myLastLeaderTerm, 0));
440 - // Dispatch only to the local listener(s)
441 - eventDispatcher.post(leadershipEvent);
442 - leader = null;
443 - }
444 - }
445 - }
446 - }
447 -
448 - // Sleep before re-advertising
449 - try {
450 - Thread.sleep(LEADERSHIP_PERIODIC_INTERVAL_MS);
451 - } catch (InterruptedException e) {
452 - log.debug("Leader Election periodic thread interrupted");
453 - }
454 - }
455 - }
456 -
457 - /**
458 - * Performs the leader election by using Hazelcast.
459 - */
460 - private void doLeaderElectionThread() {
461 -
462 - while (!isShutdown) {
463 - LeadershipEvent leadershipEvent;
464 - //
465 - // Try to acquire the lock and keep it until the instance is
466 - // shutdown.
467 - //
468 - log.debug("Leader Election begin for topic {}",
469 - topicName);
470 - try {
471 - // Block until it becomes the leader
472 - leaderLock.lockInterruptibly();
473 - } catch (InterruptedException e) {
474 - //
475 - // Thread interrupted. Either shutdown or run for
476 - // re-election.
477 - //
478 - log.debug("Election interrupted for topic {}",
479 - topicName);
480 - continue;
481 - }
482 -
483 - try {
484 - synchronized (this) {
485 - //
486 - // This instance is now the leader
487 - //
488 - log.info("Leader Elected for topic {}", topicName);
489 -
490 - updateTerm();
491 -
492 - leader = localNodeId;
493 - leadershipEvent = new LeadershipEvent(
494 - LeadershipEvent.Type.LEADER_ELECTED,
495 - new Leadership(topicName, localNodeId, myLastLeaderTerm, 0));
496 -
497 - clusterCommunicator.broadcastIncludeSelf(
498 - leadershipEvent,
499 - LEADERSHIP_EVENT_MESSAGE_SUBJECT,
500 - SERIALIZER::encode);
501 - }
502 -
503 - // Sleep forever until interrupted
504 - Thread.sleep(Long.MAX_VALUE);
505 - } catch (InterruptedException e) {
506 - //
507 - // Thread interrupted. Either shutdown or run for
508 - // re-election.
509 - //
510 - log.debug("Leader Interrupted for topic {}",
511 - topicName);
512 -
513 - } finally {
514 - synchronized (this) {
515 - // If we reach here, we should release the leadership
516 - log.debug("Leader Lock Released for topic {}", topicName);
517 - if ((leader != null) &&
518 - leader.equals(localNodeId)) {
519 - leader = null;
520 - }
521 - leadershipEvent = new LeadershipEvent(
522 - LeadershipEvent.Type.LEADER_BOOTED,
523 - new Leadership(topicName, localNodeId, myLastLeaderTerm, 0));
524 -
525 - clusterCommunicator.broadcastIncludeSelf(
526 - leadershipEvent,
527 - LEADERSHIP_EVENT_MESSAGE_SUBJECT,
528 - SERIALIZER::encode);
529 -
530 - if (leaderLock.isLockedByCurrentThread()) {
531 - leaderLock.unlock();
532 - }
533 - }
534 - }
535 - }
536 - isRunningForLeadership = false;
537 - }
538 -
539 - // Globally guarded by the leadership lock for this term
540 - // Locally guarded by synchronized (this)
541 - private void updateTerm() {
542 - long oldTerm = term.get();
543 - long newTerm = term.incrementAndGet();
544 - myLastLeaderTerm = newTerm;
545 - log.debug("Topic {} updated term from {} to {}", topicName,
546 - oldTerm, newTerm);
547 - }
548 - }
549 -
550 - private class InternalLeadershipEventListener implements ClusterMessageHandler {
551 -
552 - @Override
553 - public void handle(ClusterMessage message) {
554 - LeadershipEvent leadershipEvent =
555 - SERIALIZER.decode(message.payload());
556 -
557 - log.trace("Leadership Event: time = {} type = {} event = {}",
558 - leadershipEvent.time(), leadershipEvent.type(),
559 - leadershipEvent);
560 - //
561 - // If there is no entry for the topic, then create a new one to
562 - // keep track of the leadership, but don't run for leadership itself.
563 - //
564 - String topicName = leadershipEvent.subject().topic();
565 - Topic topic = topics.get(topicName);
566 - if (topic == null) {
567 - topic = new Topic(topicName);
568 - Topic oldTopic = topics.putIfAbsent(topicName, topic);
569 - if (oldTopic == null) {
570 - // encountered new topic, start periodic processing
571 - topic.start();
572 - } else {
573 - topic = oldTopic;
574 - }
575 - }
576 - topic.receivedLeadershipEvent(leadershipEvent);
577 - eventDispatcher.post(leadershipEvent);
578 - }
579 - }
580 -
581 - @Override
582 - public Map<String, List<NodeId>> getCandidates() {
583 - return null;
584 - }
585 -
586 - @Override
587 - public List<NodeId> getCandidates(String path) {
588 - return null;
589 - }
590 -
591 - @Override
592 - public boolean stepdown(String path) {
593 - throw new UnsupportedOperationException();
594 - }
595 -
596 - @Override
597 - public boolean makeTopCandidate(String path, NodeId nodeId) {
598 - throw new UnsupportedOperationException();
599 - }
600 -}
1 -/*
2 - * Copyright 2014-2015 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.core.impl;
17 -
18 -import static org.apache.commons.lang3.concurrent.ConcurrentUtils.putIfAbsent;
19 -
20 -import com.google.common.collect.ImmutableSet;
21 -import com.hazelcast.core.EntryEvent;
22 -import com.hazelcast.core.EntryListener;
23 -import com.hazelcast.core.IAtomicLong;
24 -import com.hazelcast.core.MapEvent;
25 -
26 -import org.apache.felix.scr.annotations.Activate;
27 -import org.apache.felix.scr.annotations.Component;
28 -import org.apache.felix.scr.annotations.Deactivate;
29 -import org.apache.felix.scr.annotations.Service;
30 -import org.onosproject.core.ApplicationId;
31 -import org.onosproject.core.ApplicationIdStore;
32 -import org.onosproject.core.DefaultApplicationId;
33 -import org.onosproject.store.hz.AbstractHazelcastStore;
34 -import org.onosproject.store.hz.SMap;
35 -import org.onosproject.store.serializers.KryoNamespaces;
36 -import org.onosproject.store.serializers.KryoSerializer;
37 -import org.onlab.util.KryoNamespace;
38 -
39 -import java.util.Map;
40 -import java.util.Set;
41 -import java.util.concurrent.ConcurrentHashMap;
42 -
43 -/**
44 - * Simple implementation of the application ID registry using in-memory
45 - * structures.
46 - */
47 -@Component(immediate = false, enabled = false)
48 -@Service
49 -public class DistributedApplicationIdStore
50 - extends AbstractHazelcastStore<AppIdEvent, AppIdStoreDelegate>
51 - implements ApplicationIdStore {
52 -
53 - protected IAtomicLong lastAppId;
54 - protected SMap<String, DefaultApplicationId> appIdsByName;
55 -
56 - protected Map<Short, DefaultApplicationId> appIds = new ConcurrentHashMap<>();
57 -
58 - private String listenerId;
59 -
60 -
61 - @Override
62 - @Activate
63 - public void activate() {
64 - super.activate();
65 -
66 - this.serializer = new KryoSerializer() {
67 - @Override
68 - protected void setupKryoPool() {
69 - serializerPool = KryoNamespace.newBuilder()
70 - .register(KryoNamespaces.API)
71 - .nextId(KryoNamespaces.BEGIN_USER_CUSTOM_ID)
72 - .build();
73 - }
74 - };
75 -
76 - lastAppId = theInstance.getAtomicLong("applicationId");
77 -
78 - appIdsByName = new SMap<>(theInstance.<byte[], byte[]>getMap("appIdsByName"), this.serializer);
79 - listenerId = appIdsByName.addEntryListener((new RemoteAppIdEventHandler()), true);
80 -
81 - primeAppIds();
82 -
83 - log.info("Started");
84 - }
85 -
86 - @Deactivate
87 - public void deactivate() {
88 - appIdsByName.removeEntryListener(listenerId);
89 - log.info("Stopped");
90 - }
91 -
92 - @Override
93 - public Set<ApplicationId> getAppIds() {
94 - return ImmutableSet.<ApplicationId>copyOf(appIds.values());
95 - }
96 -
97 - @Override
98 - public ApplicationId getAppId(Short id) {
99 - ApplicationId appId = appIds.get(id);
100 - if (appId == null) {
101 - primeAppIds();
102 - return appIds.get(id);
103 - }
104 - return appId;
105 - }
106 -
107 - @Override
108 - public ApplicationId getAppId(String name) {
109 - return appIdsByName.get(name);
110 - }
111 -
112 - private void primeAppIds() {
113 - for (DefaultApplicationId appId : appIdsByName.values()) {
114 - appIds.putIfAbsent(appId.id(), appId);
115 - }
116 - }
117 -
118 - @Override
119 - public ApplicationId registerApplication(String name) {
120 - DefaultApplicationId appId = appIdsByName.get(name);
121 - if (appId == null) {
122 - int id = (int) lastAppId.getAndIncrement();
123 - appId = putIfAbsent(appIdsByName, name,
124 - new DefaultApplicationId(id, name));
125 - }
126 - return appId;
127 - }
128 -
129 - private class RemoteAppIdEventHandler implements EntryListener<String, DefaultApplicationId> {
130 - @Override
131 - public void entryAdded(EntryEvent<String, DefaultApplicationId> event) {
132 - DefaultApplicationId appId = event.getValue();
133 - appIds.put(appId.id(), appId);
134 - }
135 -
136 - @Override
137 - public void entryRemoved(EntryEvent<String, DefaultApplicationId> event) {
138 - }
139 -
140 - @Override
141 - public void entryUpdated(EntryEvent<String, DefaultApplicationId> event) {
142 - entryAdded(event);
143 - }
144 -
145 - @Override
146 - public void entryEvicted(EntryEvent<String, DefaultApplicationId> event) {
147 - }
148 -
149 - @Override
150 - public void mapEvicted(MapEvent event) {
151 - }
152 -
153 - @Override
154 - public void mapCleared(MapEvent event) {
155 - }
156 - }
157 -}
1 -/*
2 - * Copyright 2014-2015 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.core.impl;
17 -
18 -import com.hazelcast.core.HazelcastInstance;
19 -import com.hazelcast.core.IAtomicLong;
20 -import org.apache.felix.scr.annotations.Activate;
21 -import org.apache.felix.scr.annotations.Component;
22 -import org.apache.felix.scr.annotations.Reference;
23 -import org.apache.felix.scr.annotations.ReferenceCardinality;
24 -import org.apache.felix.scr.annotations.Service;
25 -import org.onosproject.core.IdBlock;
26 -import org.onosproject.core.IdBlockStore;
27 -import org.onosproject.store.hz.StoreService;
28 -
29 -import java.util.Map;
30 -
31 -/**
32 - * Distributed implementation of id block store using Hazelcast.
33 - */
34 -@Component(immediate = false, enabled = false)
35 -@Service
36 -public class DistributedIdBlockStore implements IdBlockStore {
37 -
38 - private static final long DEFAULT_BLOCK_SIZE = 0x100000L;
39 -
40 - protected Map<String, IAtomicLong> topicBlocks;
41 -
42 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
43 - protected StoreService storeService;
44 -
45 - protected HazelcastInstance theInstance;
46 -
47 - @Activate
48 - public void activate() {
49 - theInstance = storeService.getHazelcastInstance();
50 - }
51 -
52 - @Override
53 - public IdBlock getIdBlock(String topic) {
54 - Long blockBase = theInstance.getAtomicLong(topic).getAndAdd(DEFAULT_BLOCK_SIZE);
55 - return new IdBlock(blockBase, DEFAULT_BLOCK_SIZE);
56 - }
57 -}
1 - /*
2 - * Copyright 2014-2015 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.flow.impl;
17 -
18 -import com.google.common.cache.CacheBuilder;
19 -import com.google.common.cache.CacheLoader;
20 -import com.google.common.cache.LoadingCache;
21 -import com.google.common.collect.ImmutableList;
22 -import com.google.common.collect.Iterables;
23 -import com.google.common.collect.Maps;
24 -import com.google.common.collect.Sets;
25 -import com.google.common.util.concurrent.Futures;
26 -import com.hazelcast.core.IMap;
27 -
28 -import org.apache.felix.scr.annotations.Activate;
29 -import org.apache.felix.scr.annotations.Component;
30 -import org.apache.felix.scr.annotations.Deactivate;
31 -import org.apache.felix.scr.annotations.Modified;
32 -import org.apache.felix.scr.annotations.Property;
33 -import org.apache.felix.scr.annotations.Reference;
34 -import org.apache.felix.scr.annotations.ReferenceCardinality;
35 -import org.apache.felix.scr.annotations.Service;
36 -import org.onlab.util.BoundedThreadPool;
37 -import org.onlab.util.KryoNamespace;
38 -import org.onlab.util.NewConcurrentHashMap;
39 -import org.onlab.util.Tools;
40 -import org.onosproject.cfg.ComponentConfigService;
41 -import org.onosproject.cluster.ClusterService;
42 -import org.onosproject.cluster.NodeId;
43 -import org.onosproject.core.CoreService;
44 -import org.onosproject.core.IdGenerator;
45 -import org.onosproject.net.Device;
46 -import org.onosproject.net.DeviceId;
47 -import org.onosproject.net.device.DeviceService;
48 -import org.onosproject.net.flow.CompletedBatchOperation;
49 -import org.onosproject.net.flow.DefaultFlowEntry;
50 -import org.onosproject.net.flow.FlowEntry;
51 -import org.onosproject.net.flow.FlowEntry.FlowEntryState;
52 -import org.onosproject.net.flow.FlowId;
53 -import org.onosproject.net.flow.FlowRule;
54 -import org.onosproject.net.flow.FlowRuleBatchEntry;
55 -import org.onosproject.net.flow.FlowRuleBatchEntry.FlowRuleOperation;
56 -import org.onosproject.net.flow.FlowRuleBatchEvent;
57 -import org.onosproject.net.flow.FlowRuleBatchOperation;
58 -import org.onosproject.net.flow.FlowRuleBatchRequest;
59 -import org.onosproject.net.flow.FlowRuleEvent;
60 -import org.onosproject.net.flow.FlowRuleEvent.Type;
61 -import org.onosproject.net.flow.FlowRuleService;
62 -import org.onosproject.net.flow.FlowRuleStore;
63 -import org.onosproject.net.flow.FlowRuleStoreDelegate;
64 -import org.onosproject.net.flow.StoredFlowEntry;
65 -import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
66 -import org.onosproject.store.cluster.messaging.ClusterMessage;
67 -import org.onosproject.store.cluster.messaging.ClusterMessageHandler;
68 -import org.onosproject.store.flow.ReplicaInfo;
69 -import org.onosproject.store.flow.ReplicaInfoEvent;
70 -import org.onosproject.store.flow.ReplicaInfoEventListener;
71 -import org.onosproject.store.flow.ReplicaInfoService;
72 -import org.onosproject.store.hz.AbstractHazelcastStore;
73 -import org.onosproject.store.hz.SMap;
74 -import org.onosproject.store.serializers.KryoSerializer;
75 -import org.onosproject.store.serializers.StoreSerializer;
76 -import org.onosproject.store.serializers.impl.DistributedStoreSerializers;
77 -import org.osgi.service.component.ComponentContext;
78 -import org.slf4j.Logger;
79 -
80 -import java.util.ArrayList;
81 -import java.util.Collections;
82 -import java.util.Dictionary;
83 -import java.util.HashSet;
84 -import java.util.List;
85 -import java.util.Map;
86 -import java.util.Map.Entry;
87 -import java.util.Set;
88 -import java.util.concurrent.ConcurrentHashMap;
89 -import java.util.concurrent.ConcurrentMap;
90 -import java.util.concurrent.CopyOnWriteArraySet;
91 -import java.util.concurrent.ExecutionException;
92 -import java.util.concurrent.ExecutorService;
93 -import java.util.concurrent.Executors;
94 -import java.util.concurrent.Future;
95 -import java.util.concurrent.TimeUnit;
96 -import java.util.stream.Collectors;
97 -
98 -import static com.google.common.base.Preconditions.checkNotNull;
99 -import static org.apache.commons.lang3.concurrent.ConcurrentUtils.createIfAbsentUnchecked;
100 -import static com.google.common.base.Strings.isNullOrEmpty;
101 -import static org.onlab.util.Tools.get;
102 -import static org.onlab.util.Tools.groupedThreads;
103 -import static org.onosproject.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
104 -import static org.onosproject.store.flow.impl.FlowStoreMessageSubjects.*;
105 -import static org.slf4j.LoggerFactory.getLogger;
106 -
107 -/**
108 - * Manages inventory of flow rules using a distributed state management protocol.
109 - */
110 -@Component(immediate = false, enabled = false)
111 -@Service
112 -public class DistributedFlowRuleStore
113 - extends AbstractHazelcastStore<FlowRuleBatchEvent, FlowRuleStoreDelegate>
114 - implements FlowRuleStore {
115 -
116 - private final Logger log = getLogger(getClass());
117 -
118 - private static final int MESSAGE_HANDLER_THREAD_POOL_SIZE = 8;
119 - private static final boolean DEFAULT_BACKUP_ENABLED = true;
120 - private static final long FLOW_RULE_STORE_TIMEOUT_MILLIS = 5000;
121 -
122 - @Property(name = "msgHandlerPoolSize", intValue = MESSAGE_HANDLER_THREAD_POOL_SIZE,
123 - label = "Number of threads in the message handler pool")
124 - private int msgHandlerPoolSize = MESSAGE_HANDLER_THREAD_POOL_SIZE;
125 -
126 - @Property(name = "backupEnabled", boolValue = DEFAULT_BACKUP_ENABLED,
127 - label = "Indicates whether backups are enabled or not")
128 - private boolean backupEnabled = DEFAULT_BACKUP_ENABLED;
129 -
130 - private InternalFlowTable flowTable = new InternalFlowTable();
131 -
132 - /*private final ConcurrentMap<DeviceId, ConcurrentMap<FlowId, Set<StoredFlowEntry>>>
133 - flowEntries = new ConcurrentHashMap<>();*/
134 -
135 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
136 - protected ReplicaInfoService replicaInfoManager;
137 -
138 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
139 - protected ClusterCommunicationService clusterCommunicator;
140 -
141 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
142 - protected ClusterService clusterService;
143 -
144 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
145 - protected DeviceService deviceService;
146 -
147 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
148 - protected CoreService coreService;
149 -
150 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
151 - protected ComponentConfigService configService;
152 -
153 - private Map<Long, NodeId> pendingResponses = Maps.newConcurrentMap();
154 -
155 - // Cache of SMaps used for backup data. each SMap contain device flow table
156 - private LoadingCache<DeviceId, SMap<FlowId, ImmutableList<StoredFlowEntry>>> smaps;
157 -
158 - private ExecutorService messageHandlingExecutor;
159 -
160 - private final ExecutorService backupExecutors =
161 - BoundedThreadPool.newSingleThreadExecutor(groupedThreads("onos/flow", "async-backups"));
162 - //Executors.newSingleThreadExecutor(groupedThreads("onos/flow", "async-backups"));
163 -
164 - private boolean syncBackup = false;
165 -
166 - protected static final StoreSerializer SERIALIZER = new KryoSerializer() {
167 - @Override
168 - protected void setupKryoPool() {
169 - serializerPool = KryoNamespace.newBuilder()
170 - .register(DistributedStoreSerializers.STORE_COMMON)
171 - .nextId(DistributedStoreSerializers.STORE_CUSTOM_BEGIN)
172 - .register(FlowRuleEvent.class)
173 - .register(FlowRuleEvent.Type.class)
174 - .build();
175 - }
176 - };
177 -
178 - private ReplicaInfoEventListener replicaInfoEventListener;
179 -
180 - private IdGenerator idGenerator;
181 -
182 - private NodeId local;
183 -
184 - @Activate
185 - public void activate(ComponentContext context) {
186 - configService.registerProperties(getClass());
187 - super.serializer = SERIALIZER;
188 - super.theInstance = storeService.getHazelcastInstance();
189 -
190 - idGenerator = coreService.getIdGenerator(FlowRuleService.FLOW_OP_TOPIC);
191 -
192 - local = clusterService.getLocalNode().id();
193 -
194 - // Cache to create SMap on demand
195 - smaps = CacheBuilder.newBuilder()
196 - .softValues()
197 - .build(new SMapLoader());
198 -
199 - messageHandlingExecutor = Executors.newFixedThreadPool(
200 - msgHandlerPoolSize, groupedThreads("onos/store/flow", "message-handlers"));
201 -
202 - registerMessageHandlers(messageHandlingExecutor);
203 -
204 - replicaInfoEventListener = new InternalReplicaInfoEventListener();
205 -
206 - replicaInfoManager.addListener(replicaInfoEventListener);
207 -
208 - logConfig("Started");
209 - }
210 -
211 - @Deactivate
212 - public void deactivate(ComponentContext context) {
213 - configService.unregisterProperties(getClass(), false);
214 - unregisterMessageHandlers();
215 - messageHandlingExecutor.shutdownNow();
216 - replicaInfoManager.removeListener(replicaInfoEventListener);
217 - log.info("Stopped");
218 - }
219 -
220 - @Modified
221 - public void modified(ComponentContext context) {
222 - if (context == null) {
223 - backupEnabled = DEFAULT_BACKUP_ENABLED;
224 - logConfig("Default config");
225 - return;
226 - }
227 -
228 - Dictionary properties = context.getProperties();
229 - int newPoolSize;
230 - boolean newBackupEnabled;
231 - try {
232 - String s = get(properties, "msgHandlerPoolSize");
233 - newPoolSize = isNullOrEmpty(s) ? msgHandlerPoolSize : Integer.parseInt(s.trim());
234 -
235 - s = get(properties, "backupEnabled");
236 - newBackupEnabled = isNullOrEmpty(s) ? backupEnabled : Boolean.parseBoolean(s.trim());
237 -
238 - } catch (NumberFormatException | ClassCastException e) {
239 - newPoolSize = MESSAGE_HANDLER_THREAD_POOL_SIZE;
240 - newBackupEnabled = DEFAULT_BACKUP_ENABLED;
241 - }
242 -
243 - if (newBackupEnabled != backupEnabled) {
244 - backupEnabled = newBackupEnabled;
245 - }
246 - if (newPoolSize != msgHandlerPoolSize) {
247 - msgHandlerPoolSize = newPoolSize;
248 - ExecutorService oldMsgHandler = messageHandlingExecutor;
249 - messageHandlingExecutor = Executors.newFixedThreadPool(
250 - msgHandlerPoolSize, groupedThreads("onos/store/flow", "message-handlers"));
251 -
252 - // replace previously registered handlers.
253 - registerMessageHandlers(messageHandlingExecutor);
254 - oldMsgHandler.shutdown();
255 - }
256 - logConfig("Reconfigured");
257 - }
258 -
259 - private void registerMessageHandlers(ExecutorService executor) {
260 -
261 - clusterCommunicator.addSubscriber(APPLY_BATCH_FLOWS, new OnStoreBatch(local), executor);
262 -
263 - clusterCommunicator.addSubscriber(REMOTE_APPLY_COMPLETED, new ClusterMessageHandler() {
264 - @Override
265 - public void handle(ClusterMessage message) {
266 - FlowRuleBatchEvent event = SERIALIZER.decode(message.payload());
267 - log.trace("received completed notification for {}", event);
268 - notifyDelegate(event);
269 - }
270 - }, executor);
271 -
272 - clusterCommunicator.addSubscriber(GET_FLOW_ENTRY, new ClusterMessageHandler() {
273 -
274 - @Override
275 - public void handle(ClusterMessage message) {
276 - FlowRule rule = SERIALIZER.decode(message.payload());
277 - log.trace("received get flow entry request for {}", rule);
278 - FlowEntry flowEntry = flowTable.getFlowEntry(rule); //getFlowEntryInternal(rule);
279 - message.respond(SERIALIZER.encode(flowEntry));
280 - }
281 - }, executor);
282 -
283 - clusterCommunicator.addSubscriber(GET_DEVICE_FLOW_ENTRIES, new ClusterMessageHandler() {
284 -
285 - @Override
286 - public void handle(ClusterMessage message) {
287 - DeviceId deviceId = SERIALIZER.decode(message.payload());
288 - log.trace("Received get flow entries request for {} from {}", deviceId, message.sender());
289 - Set<FlowEntry> flowEntries = flowTable.getFlowEntries(deviceId);
290 - message.respond(SERIALIZER.encode(flowEntries));
291 - }
292 - }, executor);
293 -
294 - clusterCommunicator.addSubscriber(REMOVE_FLOW_ENTRY, new ClusterMessageHandler() {
295 -
296 - @Override
297 - public void handle(ClusterMessage message) {
298 - FlowEntry rule = SERIALIZER.decode(message.payload());
299 - log.trace("received get flow entry request for {}", rule);
300 - FlowRuleEvent event = removeFlowRuleInternal(rule);
301 - message.respond(SERIALIZER.encode(event));
302 - }
303 - }, executor);
304 - }
305 -
306 - private void unregisterMessageHandlers() {
307 - clusterCommunicator.removeSubscriber(REMOVE_FLOW_ENTRY);
308 - clusterCommunicator.removeSubscriber(GET_DEVICE_FLOW_ENTRIES);
309 - clusterCommunicator.removeSubscriber(GET_FLOW_ENTRY);
310 - clusterCommunicator.removeSubscriber(APPLY_BATCH_FLOWS);
311 - clusterCommunicator.removeSubscriber(REMOTE_APPLY_COMPLETED);
312 - }
313 -
314 - private void logConfig(String prefix) {
315 - log.info("{} with msgHandlerPoolSize = {}; backupEnabled = {}",
316 - prefix, msgHandlerPoolSize, backupEnabled);
317 - }
318 -
319 -
320 - // This is not a efficient operation on a distributed sharded
321 - // flow store. We need to revisit the need for this operation or at least
322 - // make it device specific.
323 - @Override
324 - public int getFlowRuleCount() {
325 - // implementing in-efficient operation for debugging purpose.
326 - int sum = 0;
327 - for (Device device : deviceService.getDevices()) {
328 - final DeviceId did = device.id();
329 - sum += Iterables.size(getFlowEntries(did));
330 - }
331 - return sum;
332 - }
333 -
334 - @Override
335 - public FlowEntry getFlowEntry(FlowRule rule) {
336 - ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(rule.deviceId());
337 -
338 - if (!replicaInfo.master().isPresent()) {
339 - log.warn("Failed to getFlowEntry: No master for {}", rule.deviceId());
340 - return null;
341 - }
342 -
343 - if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) {
344 - return flowTable.getFlowEntry(rule);
345 - }
346 -
347 - log.trace("Forwarding getFlowEntry to {}, which is the primary (master) for device {}",
348 - replicaInfo.master().orNull(), rule.deviceId());
349 -
350 - return Tools.futureGetOrElse(clusterCommunicator.sendAndReceive(rule,
351 - FlowStoreMessageSubjects.GET_FLOW_ENTRY,
352 - SERIALIZER::encode,
353 - SERIALIZER::decode,
354 - replicaInfo.master().get()),
355 - FLOW_RULE_STORE_TIMEOUT_MILLIS,
356 - TimeUnit.MILLISECONDS,
357 - null);
358 - }
359 -
360 - @Override
361 - public Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) {
362 -
363 - ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(deviceId);
364 -
365 - if (!replicaInfo.master().isPresent()) {
366 - log.warn("Failed to getFlowEntries: No master for {}", deviceId);
367 - return Collections.emptyList();
368 - }
369 -
370 - if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) {
371 - return flowTable.getFlowEntries(deviceId);
372 - }
373 -
374 - log.trace("Forwarding getFlowEntries to {}, which is the primary (master) for device {}",
375 - replicaInfo.master().orNull(), deviceId);
376 -
377 - return Tools.futureGetOrElse(clusterCommunicator.sendAndReceive(deviceId,
378 - FlowStoreMessageSubjects.GET_DEVICE_FLOW_ENTRIES,
379 - SERIALIZER::encode,
380 - SERIALIZER::decode,
381 - replicaInfo.master().get()),
382 - FLOW_RULE_STORE_TIMEOUT_MILLIS,
383 - TimeUnit.MILLISECONDS,
384 - Collections.emptyList());
385 - }
386 -
387 - @Override
388 - public void storeFlowRule(FlowRule rule) {
389 - storeBatch(new FlowRuleBatchOperation(
390 - Collections.singletonList(new FlowRuleBatchEntry(FlowRuleOperation.ADD, rule)),
391 - rule.deviceId(), idGenerator.getNewId()));
392 - }
393 -
394 - @Override
395 - public void storeBatch(FlowRuleBatchOperation operation) {
396 -
397 -
398 - if (operation.getOperations().isEmpty()) {
399 -
400 - notifyDelegate(FlowRuleBatchEvent.completed(
401 - new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
402 - new CompletedBatchOperation(true, Collections.emptySet(),
403 - operation.deviceId())));
404 - return;
405 - }
406 -
407 - DeviceId deviceId = operation.deviceId();
408 -
409 - ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(deviceId);
410 -
411 - if (!replicaInfo.master().isPresent()) {
412 - log.warn("No master for {} : flows will be marked for removal", deviceId);
413 -
414 - updateStoreInternal(operation);
415 -
416 - notifyDelegate(FlowRuleBatchEvent.completed(
417 - new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
418 - new CompletedBatchOperation(true, Collections.emptySet(), operation.deviceId())));
419 - return;
420 - }
421 -
422 - final NodeId local = clusterService.getLocalNode().id();
423 - if (replicaInfo.master().get().equals(local)) {
424 - storeBatchInternal(operation);
425 - return;
426 - }
427 -
428 - log.trace("Forwarding storeBatch to {}, which is the primary (master) for device {}",
429 - replicaInfo.master().orNull(), deviceId);
430 -
431 - if (!clusterCommunicator.unicast(operation,
432 - APPLY_BATCH_FLOWS, SERIALIZER::encode,
433 - replicaInfo.master().get())) {
434 - log.warn("Failed to storeBatch: {} to {}", operation, replicaInfo.master());
435 -
436 - Set<FlowRule> allFailures = operation.getOperations().stream()
437 - .map(op -> op.target())
438 - .collect(Collectors.toSet());
439 -
440 - notifyDelegate(FlowRuleBatchEvent.completed(
441 - new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
442 - new CompletedBatchOperation(false, allFailures, deviceId)));
443 - return;
444 - }
445 - }
446 -
447 - private void storeBatchInternal(FlowRuleBatchOperation operation) {
448 -
449 - final DeviceId did = operation.deviceId();
450 - //final Collection<FlowEntry> ft = flowTable.getFlowEntries(did);
451 - Set<FlowRuleBatchEntry> currentOps = updateStoreInternal(operation);
452 - if (currentOps.isEmpty()) {
453 - batchOperationComplete(FlowRuleBatchEvent.completed(
454 - new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
455 - new CompletedBatchOperation(true, Collections.emptySet(), did)));
456 - return;
457 - }
458 - updateBackup(did, currentOps);
459 -
460 - notifyDelegate(FlowRuleBatchEvent.requested(new
461 - FlowRuleBatchRequest(operation.id(),
462 - currentOps), operation.deviceId()));
463 -
464 - }
465 -
466 - private Set<FlowRuleBatchEntry> updateStoreInternal(FlowRuleBatchOperation operation) {
467 - return operation.getOperations().stream().map(
468 - op -> {
469 - StoredFlowEntry entry;
470 - switch (op.operator()) {
471 - case ADD:
472 - entry = new DefaultFlowEntry(op.target());
473 - // always add requested FlowRule
474 - // Note: 2 equal FlowEntry may have different treatment
475 - flowTable.remove(entry.deviceId(), entry);
476 - flowTable.add(entry);
477 -
478 - return op;
479 - case REMOVE:
480 - entry = flowTable.getFlowEntry(op.target());
481 - if (entry != null) {
482 - entry.setState(FlowEntryState.PENDING_REMOVE);
483 - return op;
484 - }
485 - break;
486 - case MODIFY:
487 - //TODO: figure this out at some point
488 - break;
489 - default:
490 - log.warn("Unknown flow operation operator: {}", op.operator());
491 - }
492 - return null;
493 - }
494 - ).filter(op -> op != null).collect(Collectors.toSet());
495 - }
496 -
497 - private void updateBackup(DeviceId deviceId, final Set<FlowRuleBatchEntry> entries) {
498 - if (!backupEnabled) {
499 - return;
500 - }
501 -
502 - Future<?> backup = backupExecutors.submit(new UpdateBackup(deviceId, entries));
503 -
504 - if (syncBackup) {
505 - // wait for backup to complete
506 - try {
507 - backup.get();
508 - } catch (InterruptedException | ExecutionException e) {
509 - log.error("Failed to create backups", e);
510 - }
511 - }
512 - }
513 -
514 - @Override
515 - public void deleteFlowRule(FlowRule rule) {
516 - storeBatch(
517 - new FlowRuleBatchOperation(
518 - Collections.singletonList(
519 - new FlowRuleBatchEntry(
520 - FlowRuleOperation.REMOVE,
521 - rule)), rule.deviceId(), idGenerator.getNewId()));
522 - }
523 -
524 - @Override
525 - public FlowRuleEvent addOrUpdateFlowRule(FlowEntry rule) {
526 - ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(rule.deviceId());
527 - final NodeId localId = clusterService.getLocalNode().id();
528 - if (localId.equals(replicaInfo.master().orNull())) {
529 - return addOrUpdateFlowRuleInternal(rule);
530 - }
531 -
532 - log.warn("Tried to update FlowRule {} state,"
533 - + " while the Node was not the master.", rule);
534 - return null;
535 - }
536 -
537 - private FlowRuleEvent addOrUpdateFlowRuleInternal(FlowEntry rule) {
538 - final DeviceId did = rule.deviceId();
539 -
540 -
541 - // check if this new rule is an update to an existing entry
542 - StoredFlowEntry stored = flowTable.getFlowEntry(rule);
543 - if (stored != null) {
544 - stored.setBytes(rule.bytes());
545 - stored.setLife(rule.life());
546 - stored.setPackets(rule.packets());
547 - if (stored.state() == FlowEntryState.PENDING_ADD) {
548 - stored.setState(FlowEntryState.ADDED);
549 - FlowRuleBatchEntry entry =
550 - new FlowRuleBatchEntry(FlowRuleOperation.ADD, stored);
551 - updateBackup(did, Sets.newHashSet(entry));
552 - return new FlowRuleEvent(Type.RULE_ADDED, rule);
553 - }
554 - return new FlowRuleEvent(Type.RULE_UPDATED, rule);
555 - }
556 -
557 - // TODO: Confirm if this behavior is correct. See SimpleFlowRuleStore
558 - // TODO: also update backup if the behavior is correct.
559 - flowTable.add(rule);
560 -
561 -
562 - return null;
563 -
564 - }
565 -
566 - @Override
567 - public FlowRuleEvent removeFlowRule(FlowEntry rule) {
568 - final DeviceId deviceId = rule.deviceId();
569 - ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(deviceId);
570 -
571 - final NodeId localId = clusterService.getLocalNode().id();
572 - if (localId.equals(replicaInfo.master().orNull())) {
573 - // bypass and handle it locally
574 - return removeFlowRuleInternal(rule);
575 - }
576 -
577 - if (!replicaInfo.master().isPresent()) {
578 - log.warn("Failed to removeFlowRule: No master for {}", deviceId);
579 - // TODO: revisit if this should be null (="no-op") or Exception
580 - return null;
581 - }
582 -
583 - log.trace("Forwarding removeFlowRule to {}, which is the primary (master) for device {}",
584 - replicaInfo.master().orNull(), deviceId);
585 -
586 - return Futures.get(clusterCommunicator.sendAndReceive(
587 - rule,
588 - REMOVE_FLOW_ENTRY,
589 - SERIALIZER::encode,
590 - SERIALIZER::decode,
591 - replicaInfo.master().get()),
592 - FLOW_RULE_STORE_TIMEOUT_MILLIS,
593 - TimeUnit.MILLISECONDS,
594 - RuntimeException.class);
595 - }
596 -
597 - private FlowRuleEvent removeFlowRuleInternal(FlowEntry rule) {
598 - final DeviceId deviceId = rule.deviceId();
599 - // This is where one could mark a rule as removed and still keep it in the store.
600 - final boolean removed = flowTable.remove(deviceId, rule); //flowEntries.remove(deviceId, rule);
601 - FlowRuleBatchEntry entry =
602 - new FlowRuleBatchEntry(FlowRuleOperation.REMOVE, rule);
603 - updateBackup(deviceId, Sets.newHashSet(entry));
604 - if (removed) {
605 - return new FlowRuleEvent(RULE_REMOVED, rule);
606 - } else {
607 - return null;
608 - }
609 -
610 - }
611 -
612 - @Override
613 - public void batchOperationComplete(FlowRuleBatchEvent event) {
614 - //FIXME: need a per device pending response
615 -
616 - NodeId nodeId = pendingResponses.remove(event.subject().batchId());
617 - if (nodeId == null) {
618 - notifyDelegate(event);
619 - } else {
620 - // TODO check unicast return value
621 - clusterCommunicator.unicast(event, REMOTE_APPLY_COMPLETED, SERIALIZER::encode, nodeId);
622 - //error log: log.warn("Failed to respond to peer for batch operation result");
623 - }
624 - }
625 -
626 - private void loadFromBackup(final DeviceId did) {
627 - if (!backupEnabled) {
628 - return;
629 - }
630 - log.info("We are now the master for {}. Will load flow rules from backup", did);
631 - try {
632 - log.debug("Loading FlowRules for {} from backups", did);
633 - SMap<FlowId, ImmutableList<StoredFlowEntry>> backupFlowTable = smaps.get(did);
634 - for (Entry<FlowId, ImmutableList<StoredFlowEntry>> e
635 - : backupFlowTable.entrySet()) {
636 -
637 - log.trace("loading {}", e.getValue());
638 - for (StoredFlowEntry entry : e.getValue()) {
639 - flowTable.getFlowEntriesById(entry).remove(entry);
640 - flowTable.getFlowEntriesById(entry).add(entry);
641 -
642 -
643 - }
644 - }
645 - } catch (ExecutionException e) {
646 - log.error("Failed to load backup flowtable for {}", did, e);
647 - }
648 - }
649 -
650 - private void removeFromPrimary(final DeviceId did) {
651 - flowTable.clearDevice(did);
652 - }
653 -
654 -
655 - private final class OnStoreBatch implements ClusterMessageHandler {
656 - private final NodeId local;
657 -
658 - private OnStoreBatch(NodeId local) {
659 - this.local = local;
660 - }
661 -
662 - @Override
663 - public void handle(final ClusterMessage message) {
664 - FlowRuleBatchOperation operation = SERIALIZER.decode(message.payload());
665 - log.debug("received batch request {}", operation);
666 -
667 - final DeviceId deviceId = operation.deviceId();
668 - ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(deviceId);
669 - if (!local.equals(replicaInfo.master().orNull())) {
670 -
671 - Set<FlowRule> failures = new HashSet<>(operation.size());
672 - for (FlowRuleBatchEntry op : operation.getOperations()) {
673 - failures.add(op.target());
674 - }
675 - CompletedBatchOperation allFailed = new CompletedBatchOperation(false, failures, deviceId);
676 - // This node is no longer the master, respond as all failed.
677 - // TODO: we might want to wrap response in envelope
678 - // to distinguish sw programming failure and hand over
679 - // it make sense in the latter case to retry immediately.
680 - message.respond(SERIALIZER.encode(allFailed));
681 - return;
682 - }
683 -
684 -
685 - pendingResponses.put(operation.id(), message.sender());
686 - storeBatchInternal(operation);
687 -
688 - }
689 - }
690 -
691 - private final class SMapLoader
692 - extends CacheLoader<DeviceId, SMap<FlowId, ImmutableList<StoredFlowEntry>>> {
693 -
694 - @Override
695 - public SMap<FlowId, ImmutableList<StoredFlowEntry>> load(DeviceId id)
696 - throws Exception {
697 - IMap<byte[], byte[]> map = theInstance.getMap("flowtable_" + id.toString());
698 - return new SMap<FlowId, ImmutableList<StoredFlowEntry>>(map, SERIALIZER);
699 - }
700 - }
701 -
702 - private final class InternalReplicaInfoEventListener
703 - implements ReplicaInfoEventListener {
704 -
705 - @Override
706 - public void event(ReplicaInfoEvent event) {
707 - final NodeId local = clusterService.getLocalNode().id();
708 - final DeviceId did = event.subject();
709 - final ReplicaInfo rInfo = event.replicaInfo();
710 -
711 - switch (event.type()) {
712 - case MASTER_CHANGED:
713 - if (local.equals(rInfo.master().orNull())) {
714 - // This node is the new master, populate local structure
715 - // from backup
716 - loadFromBackup(did);
717 - }
718 - //else {
719 - // This node is no longer the master holder,
720 - // clean local structure
721 - //removeFromPrimary(did);
722 - // TODO: probably should stop pending backup activities in
723 - // executors to avoid overwriting with old value
724 - //}
725 - break;
726 - default:
727 - break;
728 -
729 - }
730 - }
731 - }
732 -
733 - // Task to update FlowEntries in backup HZ store
734 - private final class UpdateBackup implements Runnable {
735 -
736 - private final DeviceId deviceId;
737 - private final Set<FlowRuleBatchEntry> ops;
738 -
739 -
740 - public UpdateBackup(DeviceId deviceId,
741 - Set<FlowRuleBatchEntry> ops) {
742 - this.deviceId = checkNotNull(deviceId);
743 - this.ops = checkNotNull(ops);
744 -
745 - }
746 -
747 - @Override
748 - public void run() {
749 - try {
750 - log.trace("update backup {} {}", deviceId, ops
751 - );
752 - final SMap<FlowId, ImmutableList<StoredFlowEntry>> backupFlowTable = smaps.get(deviceId);
753 -
754 -
755 - ops.stream().forEach(
756 - op -> {
757 - final FlowRule entry = op.target();
758 - final FlowId id = entry.id();
759 - ImmutableList<StoredFlowEntry> original = backupFlowTable.get(id);
760 - List<StoredFlowEntry> list = new ArrayList<>();
761 - if (original != null) {
762 - list.addAll(original);
763 - }
764 - list.remove(op.target());
765 - if (op.operator() == FlowRuleOperation.ADD) {
766 - list.add((StoredFlowEntry) entry);
767 - }
768 -
769 - ImmutableList<StoredFlowEntry> newValue = ImmutableList.copyOf(list);
770 - boolean success;
771 - if (original == null) {
772 - success = (backupFlowTable.putIfAbsent(id, newValue) == null);
773 - } else {
774 - success = backupFlowTable.replace(id, original, newValue);
775 - }
776 - if (!success) {
777 - log.error("Updating backup failed.");
778 - }
779 -
780 - }
781 - );
782 - } catch (ExecutionException e) {
783 - log.error("Failed to write to backups", e);
784 - }
785 -
786 - }
787 - }
788 -
789 - private class InternalFlowTable {
790 -
791 - /*
792 - TODO: This needs to be cleaned up. Perhaps using the eventually consistent
793 - map when it supports distributed to a sequence of instances.
794 - */
795 -
796 -
797 - private final ConcurrentMap<DeviceId, ConcurrentMap<FlowId, Set<StoredFlowEntry>>>
798 - flowEntries = new ConcurrentHashMap<>();
799 -
800 -
801 - private NewConcurrentHashMap<FlowId, Set<StoredFlowEntry>> lazyEmptyFlowTable() {
802 - return NewConcurrentHashMap.<FlowId, Set<StoredFlowEntry>>ifNeeded();
803 - }
804 -
805 - /**
806 - * Returns the flow table for specified device.
807 - *
808 - * @param deviceId identifier of the device
809 - * @return Map representing Flow Table of given device.
810 - */
811 - private ConcurrentMap<FlowId, Set<StoredFlowEntry>> getFlowTable(DeviceId deviceId) {
812 - return createIfAbsentUnchecked(flowEntries,
813 - deviceId, lazyEmptyFlowTable());
814 - }
815 -
816 - private Set<StoredFlowEntry> getFlowEntriesInternal(DeviceId deviceId, FlowId flowId) {
817 - final ConcurrentMap<FlowId, Set<StoredFlowEntry>> flowTable = getFlowTable(deviceId);
818 - Set<StoredFlowEntry> r = flowTable.get(flowId);
819 - if (r == null) {
820 - final Set<StoredFlowEntry> concurrentlyAdded;
821 - r = new CopyOnWriteArraySet<>();
822 - concurrentlyAdded = flowTable.putIfAbsent(flowId, r);
823 - if (concurrentlyAdded != null) {
824 - return concurrentlyAdded;
825 - }
826 - }
827 - return r;
828 - }
829 -
830 - private StoredFlowEntry getFlowEntryInternal(FlowRule rule) {
831 - for (StoredFlowEntry f : getFlowEntriesInternal(rule.deviceId(), rule.id())) {
832 - if (f.equals(rule)) {
833 - return f;
834 - }
835 - }
836 - return null;
837 - }
838 -
839 - private Set<FlowEntry> getFlowEntriesInternal(DeviceId deviceId) {
840 - return getFlowTable(deviceId).values().stream()
841 - .flatMap((list -> list.stream())).collect(Collectors.toSet());
842 -
843 - }
844 -
845 -
846 - public StoredFlowEntry getFlowEntry(FlowRule rule) {
847 - return getFlowEntryInternal(rule);
848 - }
849 -
850 - public Set<FlowEntry> getFlowEntries(DeviceId deviceId) {
851 - return getFlowEntriesInternal(deviceId);
852 - }
853 -
854 - public Set<StoredFlowEntry> getFlowEntriesById(FlowEntry entry) {
855 - return getFlowEntriesInternal(entry.deviceId(), entry.id());
856 - }
857 -
858 - public void add(FlowEntry rule) {
859 - ((CopyOnWriteArraySet)
860 - getFlowEntriesInternal(rule.deviceId(), rule.id())).add(rule);
861 - }
862 -
863 - public boolean remove(DeviceId deviceId, FlowEntry rule) {
864 - return ((CopyOnWriteArraySet)
865 - getFlowEntriesInternal(deviceId, rule.id())).remove(rule);
866 - //return flowEntries.remove(deviceId, rule);
867 - }
868 -
869 - public void clearDevice(DeviceId did) {
870 - flowEntries.remove(did);
871 - }
872 - }
873 -
874 -
875 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.hz;
17 -
18 -import java.util.concurrent.Callable;
19 -import java.util.concurrent.ExecutionException;
20 -
21 -import com.google.common.base.Optional;
22 -import com.google.common.cache.ForwardingLoadingCache.SimpleForwardingLoadingCache;
23 -import com.google.common.cache.LoadingCache;
24 -
25 -/**
26 - * Wrapper around LoadingCache to handle negative hit scenario.
27 - * <p>
28 - * When the LoadingCache returned Absent,
29 - * this implementation will invalidate the entry immediately to avoid
30 - * caching negative hits.
31 - *
32 - * @param <K> Cache key type
33 - * @param <V> Cache value type. (Optional{@literal <V>})
34 - */
35 -public class AbsentInvalidatingLoadingCache<K, V> extends
36 - SimpleForwardingLoadingCache<K, Optional<V>> {
37 -
38 - /**
39 - * Constructor.
40 - *
41 - * @param delegate actual {@link LoadingCache} to delegate loading.
42 - */
43 - public AbsentInvalidatingLoadingCache(LoadingCache<K, Optional<V>> delegate) {
44 - super(delegate);
45 - }
46 -
47 - @Override
48 - public Optional<V> get(K key) throws ExecutionException {
49 - Optional<V> v = super.get(key);
50 - if (!v.isPresent()) {
51 - invalidate(key);
52 - }
53 - return v;
54 - }
55 -
56 - @Override
57 - public Optional<V> getUnchecked(K key) {
58 - Optional<V> v = super.getUnchecked(key);
59 - if (!v.isPresent()) {
60 - invalidate(key);
61 - }
62 - return v;
63 - }
64 -
65 - @Override
66 - public Optional<V> apply(K key) {
67 - return getUnchecked(key);
68 - }
69 -
70 - @Override
71 - public Optional<V> getIfPresent(Object key) {
72 - Optional<V> v = super.getIfPresent(key);
73 - if (!v.isPresent()) {
74 - invalidate(key);
75 - }
76 - return v;
77 - }
78 -
79 - @Override
80 - public Optional<V> get(K key, Callable<? extends Optional<V>> valueLoader)
81 - throws ExecutionException {
82 -
83 - Optional<V> v = super.get(key, valueLoader);
84 - if (!v.isPresent()) {
85 - invalidate(key);
86 - }
87 - return v;
88 - }
89 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.hz;
17 -
18 -import com.google.common.base.Optional;
19 -import com.google.common.cache.LoadingCache;
20 -import com.hazelcast.core.EntryAdapter;
21 -import com.hazelcast.core.EntryEvent;
22 -import com.hazelcast.core.HazelcastInstance;
23 -import com.hazelcast.core.MapEvent;
24 -import com.hazelcast.core.Member;
25 -
26 -import org.apache.felix.scr.annotations.Activate;
27 -import org.apache.felix.scr.annotations.Component;
28 -import org.apache.felix.scr.annotations.Reference;
29 -import org.apache.felix.scr.annotations.ReferenceCardinality;
30 -import org.onosproject.event.Event;
31 -import org.onosproject.store.AbstractStore;
32 -import org.onosproject.store.StoreDelegate;
33 -import org.onosproject.store.serializers.KryoSerializer;
34 -import org.onosproject.store.serializers.StoreSerializer;
35 -import org.slf4j.Logger;
36 -
37 -import static com.google.common.base.Preconditions.checkNotNull;
38 -import static org.slf4j.LoggerFactory.getLogger;
39 -
40 -/**
41 - * Abstraction of a distributed store based on Hazelcast.
42 - */
43 -@Component
44 -public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDelegate<E>>
45 - extends AbstractStore<E, D> {
46 -
47 - protected final Logger log = getLogger(getClass());
48 -
49 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
50 - protected StoreService storeService;
51 -
52 - protected StoreSerializer serializer;
53 -
54 - protected HazelcastInstance theInstance;
55 -
56 - @Activate
57 - public void activate() {
58 - serializer = new KryoSerializer();
59 - theInstance = storeService.getHazelcastInstance();
60 - }
61 -
62 - /**
63 - * Serializes the specified object using the backing store service.
64 - *
65 - * @param obj object to be serialized
66 - * @return serialized object
67 - */
68 - protected byte[] serialize(Object obj) {
69 - return serializer.encode(obj);
70 - }
71 -
72 - /**
73 - * Deserializes the specified object using the backing store service.
74 - *
75 - * @param bytes bytes to be deserialized
76 - * @param <T> type of object
77 - * @return deserialized object
78 - */
79 - protected <T> T deserialize(byte[] bytes) {
80 - return serializer.decode(bytes);
81 - }
82 -
83 -
84 - /**
85 - * An IMap entry listener, which reflects each remote event to the cache.
86 - *
87 - * @param <K> IMap key type after deserialization
88 - * @param <V> IMap value type after deserialization
89 - */
90 - public class RemoteCacheEventHandler<K, V> extends EntryAdapter<byte[], byte[]> {
91 -
92 - private final Member localMember;
93 - private LoadingCache<K, Optional<V>> cache;
94 -
95 - /**
96 - * Constructor.
97 - *
98 - * @param cache cache to update
99 - */
100 - public RemoteCacheEventHandler(LoadingCache<K, Optional<V>> cache) {
101 - this.localMember = theInstance.getCluster().getLocalMember();
102 - this.cache = checkNotNull(cache);
103 - }
104 -
105 - @Override
106 - public void mapCleared(MapEvent event) {
107 - if (localMember.equals(event.getMember())) {
108 - // ignore locally triggered event
109 - return;
110 - }
111 - cache.invalidateAll();
112 - }
113 -
114 - @Override
115 - public void entryAdded(EntryEvent<byte[], byte[]> event) {
116 - if (localMember.equals(event.getMember())) {
117 - // ignore locally triggered event
118 - return;
119 - }
120 - K key = deserialize(event.getKey());
121 - V newVal = deserialize(event.getValue());
122 - Optional<V> newValue = Optional.of(newVal);
123 - cache.asMap().putIfAbsent(key, newValue);
124 - onAdd(key, newVal);
125 - }
126 -
127 - @Override
128 - public void entryUpdated(EntryEvent<byte[], byte[]> event) {
129 - if (localMember.equals(event.getMember())) {
130 - // ignore locally triggered event
131 - return;
132 - }
133 - K key = deserialize(event.getKey());
134 - V oldVal = deserialize(event.getOldValue());
135 - Optional<V> oldValue = Optional.fromNullable(oldVal);
136 - V newVal = deserialize(event.getValue());
137 - Optional<V> newValue = Optional.of(newVal);
138 - cache.asMap().replace(key, oldValue, newValue);
139 - onUpdate(key, oldVal, newVal);
140 - }
141 -
142 - @Override
143 - public void entryRemoved(EntryEvent<byte[], byte[]> event) {
144 - if (localMember.equals(event.getMember())) {
145 - // ignore locally triggered event
146 - return;
147 - }
148 - K key = deserialize(event.getKey());
149 - V val = deserialize(event.getOldValue());
150 - cache.invalidate(key);
151 - onRemove(key, val);
152 - }
153 -
154 - /**
155 - * Cache entry addition hook.
156 - *
157 - * @param key new key
158 - * @param newVal new value
159 - */
160 - protected void onAdd(K key, V newVal) {
161 - }
162 -
163 - /**
164 - * Cache entry update hook.
165 - *
166 - * @param key new key
167 - * @param oldValue old value
168 - * @param newVal new value
169 - */
170 - protected void onUpdate(K key, V oldValue, V newVal) {
171 - }
172 -
173 - /**
174 - * Cache entry remove hook.
175 - *
176 - * @param key new key
177 - * @param val old value
178 - */
179 - protected void onRemove(K key, V val) {
180 - }
181 - }
182 -
183 - /**
184 - * Distributed object remote event entry listener.
185 - *
186 - * @param <K> Entry key type after deserialization
187 - * @param <V> Entry value type after deserialization
188 - */
189 - public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> {
190 -
191 - private final Member localMember;
192 -
193 - public RemoteEventHandler() {
194 - this.localMember = theInstance.getCluster().getLocalMember();
195 - }
196 - @Override
197 - public void entryAdded(EntryEvent<byte[], byte[]> event) {
198 - if (localMember.equals(event.getMember())) {
199 - // ignore locally triggered event
200 - return;
201 - }
202 - K key = deserialize(event.getKey());
203 - V newVal = deserialize(event.getValue());
204 - onAdd(key, newVal);
205 - }
206 -
207 - @Override
208 - public void entryRemoved(EntryEvent<byte[], byte[]> event) {
209 - if (localMember.equals(event.getMember())) {
210 - // ignore locally triggered event
211 - return;
212 - }
213 - K key = deserialize(event.getKey());
214 - V val = deserialize(event.getValue());
215 - onRemove(key, val);
216 - }
217 -
218 - @Override
219 - public void entryUpdated(EntryEvent<byte[], byte[]> event) {
220 - if (localMember.equals(event.getMember())) {
221 - // ignore locally triggered event
222 - return;
223 - }
224 - K key = deserialize(event.getKey());
225 - V oldVal = deserialize(event.getOldValue());
226 - V newVal = deserialize(event.getValue());
227 - onUpdate(key, oldVal, newVal);
228 - }
229 -
230 - /**
231 - * Remote entry addition hook.
232 - *
233 - * @param key new key
234 - * @param newVal new value
235 - */
236 - protected void onAdd(K key, V newVal) {
237 - }
238 -
239 - /**
240 - * Remote entry update hook.
241 - *
242 - * @param key new key
243 - * @param oldValue old value
244 - * @param newVal new value
245 - */
246 - protected void onUpdate(K key, V oldValue, V newVal) {
247 - }
248 -
249 - /**
250 - * Remote entry remove hook.
251 - *
252 - * @param key new key
253 - * @param val old value
254 - */
255 - protected void onRemove(K key, V val) {
256 - }
257 - }
258 -
259 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.hz;
17 -
18 -import static com.google.common.base.Preconditions.checkNotNull;
19 -
20 -import org.onosproject.store.serializers.StoreSerializer;
21 -
22 -import com.google.common.base.Optional;
23 -import com.google.common.cache.CacheLoader;
24 -import com.hazelcast.core.IMap;
25 -
26 -/**
27 - * CacheLoader to wrap Map value with Optional,
28 - * to handle negative hit on underlying IMap.
29 - *
30 - * @param <K> IMap key type after deserialization
31 - * @param <V> IMap value type after deserialization
32 - */
33 -public final class OptionalCacheLoader<K, V> extends
34 - CacheLoader<K, Optional<V>> {
35 -
36 - private final StoreSerializer serializer;
37 - private IMap<byte[], byte[]> rawMap;
38 -
39 - /**
40 - * Constructor.
41 - *
42 - * @param serializer to use for serialization
43 - * @param rawMap underlying IMap
44 - */
45 - public OptionalCacheLoader(StoreSerializer serializer, IMap<byte[], byte[]> rawMap) {
46 - this.serializer = checkNotNull(serializer);
47 - this.rawMap = checkNotNull(rawMap);
48 - }
49 -
50 - @Override
51 - public Optional<V> load(K key) throws Exception {
52 - byte[] keyBytes = serializer.encode(key);
53 - byte[] valBytes = rawMap.get(keyBytes);
54 - if (valBytes == null) {
55 - return Optional.absent();
56 - }
57 - V dev = serializer.decode(valBytes);
58 - return Optional.of(dev);
59 - }
60 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.hz;
17 -
18 -import static com.google.common.base.Preconditions.checkNotNull;
19 -
20 -import java.util.ArrayList;
21 -import java.util.Collection;
22 -import java.util.Collections;
23 -import java.util.HashMap;
24 -import java.util.HashSet;
25 -import java.util.IdentityHashMap;
26 -import java.util.Map;
27 -import java.util.Set;
28 -import java.util.concurrent.Future;
29 -import java.util.concurrent.TimeUnit;
30 -
31 -import org.apache.commons.lang3.tuple.Pair;
32 -import org.onosproject.store.serializers.StoreSerializer;
33 -
34 -import com.google.common.base.Function;
35 -import com.google.common.util.concurrent.Futures;
36 -import com.hazelcast.core.EntryEvent;
37 -import com.hazelcast.core.EntryListener;
38 -import com.hazelcast.core.EntryView;
39 -import com.hazelcast.core.ExecutionCallback;
40 -import com.hazelcast.core.IMap;
41 -import com.hazelcast.core.MapEvent;
42 -import com.hazelcast.map.EntryProcessor;
43 -import com.hazelcast.map.MapInterceptor;
44 -import com.hazelcast.mapreduce.JobTracker;
45 -import com.hazelcast.mapreduce.aggregation.Aggregation;
46 -import com.hazelcast.mapreduce.aggregation.Supplier;
47 -import com.hazelcast.monitor.LocalMapStats;
48 -import com.hazelcast.query.Predicate;
49 -
50 -/**
51 - * Wrapper around IMap&lt;byte[], byte[]&gt; which serializes/deserializes
52 - * key and value using StoreSerializer.
53 - *
54 - * @param <K> key type
55 - * @param <V> value type
56 - */
57 -public class SMap<K, V> implements IMap<K, V> {
58 -
59 - private final IMap<byte[], byte[]> m;
60 - private final StoreSerializer serializer;
61 -
62 - /**
63 - * Creates a SMap instance.
64 - *
65 - * @param baseMap base IMap to use
66 - * @param serializer serializer to use for both key and value
67 - */
68 - public SMap(IMap<byte[], byte[]> baseMap, StoreSerializer serializer) {
69 - this.m = checkNotNull(baseMap);
70 - this.serializer = checkNotNull(serializer);
71 - }
72 -
73 - @Override
74 - public int size() {
75 - return m.size();
76 - }
77 -
78 - @Override
79 - public boolean isEmpty() {
80 - return m.isEmpty();
81 - }
82 -
83 - @Override
84 - public void putAll(Map<? extends K, ? extends V> map) {
85 - Map<byte[], byte[]> sm = new IdentityHashMap<>(map.size());
86 - for (java.util.Map.Entry<? extends K, ? extends V> e : map.entrySet()) {
87 - sm.put(serializeKey(e.getKey()), serializeVal(e.getValue()));
88 - }
89 - m.putAll(sm);
90 - }
91 -
92 - @Deprecated
93 - @Override
94 - public Object getId() {
95 - return m.getId();
96 - }
97 -
98 - @Override
99 - public String getPartitionKey() {
100 - return m.getPartitionKey();
101 - }
102 -
103 - @Override
104 - public String getName() {
105 - return m.getName();
106 - }
107 -
108 - @Override
109 - public String getServiceName() {
110 - return m.getServiceName();
111 - }
112 -
113 - @Override
114 - public void destroy() {
115 - m.destroy();
116 - }
117 -
118 - @Override
119 - public boolean containsKey(Object key) {
120 - return m.containsKey(serializeKey(key));
121 - }
122 -
123 - @Override
124 - public boolean containsValue(Object value) {
125 - return m.containsValue(serializeVal(value));
126 - }
127 -
128 - @Override
129 - public V get(Object key) {
130 - return deserializeVal(m.get(serializeKey(key)));
131 - }
132 -
133 - @Override
134 - public V put(K key, V value) {
135 - return deserializeVal(m.put(serializeKey(key), serializeVal(value)));
136 - }
137 -
138 - @Override
139 - public V remove(Object key) {
140 - return deserializeVal(m.remove(serializeKey(key)));
141 - }
142 -
143 - @Override
144 - public boolean remove(Object key, Object value) {
145 - return m.remove(serializeKey(key), serializeVal(value));
146 - }
147 -
148 - @Override
149 - public void delete(Object key) {
150 - m.delete(serializeKey(key));
151 - }
152 -
153 - @Override
154 - public void flush() {
155 - m.flush();
156 - }
157 -
158 - @Override
159 - public Map<K, V> getAll(Set<K> keys) {
160 - Set<byte[]> sk = serializeKeySet(keys);
161 - Map<byte[], byte[]> bm = m.getAll(sk);
162 - Map<K, V> dsm = new HashMap<>(bm.size());
163 - for (java.util.Map.Entry<byte[], byte[]> e : bm.entrySet()) {
164 - dsm.put(deserializeKey(e.getKey()), deserializeVal(e.getValue()));
165 - }
166 - return dsm;
167 - }
168 -
169 - @Override
170 - public void loadAll(boolean replaceExistingValues) {
171 - m.loadAll(replaceExistingValues);
172 - }
173 -
174 - @Override
175 - public void loadAll(Set<K> keys, boolean replaceExistingValues) {
176 - Set<byte[]> sk = serializeKeySet(keys);
177 - m.loadAll(sk, replaceExistingValues);
178 - }
179 -
180 - @Override
181 - public void clear() {
182 - m.clear();
183 - }
184 -
185 - @Override
186 - public Future<V> getAsync(K key) {
187 - Future<byte[]> f = m.getAsync(serializeKey(key));
188 - return Futures.lazyTransform(f, new DeserializeVal());
189 - }
190 -
191 - @Override
192 - public Future<V> putAsync(K key, V value) {
193 - Future<byte[]> f = m.putAsync(serializeKey(key), serializeVal(value));
194 - return Futures.lazyTransform(f, new DeserializeVal());
195 - }
196 -
197 - @Override
198 - public Future<V> putAsync(K key, V value, long ttl, TimeUnit timeunit) {
199 - Future<byte[]> f = m.putAsync(serializeKey(key), serializeVal(value), ttl, timeunit);
200 - return Futures.lazyTransform(f, new DeserializeVal());
201 - }
202 -
203 - @Override
204 - public Future<V> removeAsync(K key) {
205 - Future<byte[]> f = m.removeAsync(serializeKey(key));
206 - return Futures.lazyTransform(f, new DeserializeVal());
207 - }
208 -
209 - @Override
210 - public boolean tryRemove(K key, long timeout, TimeUnit timeunit) {
211 - return m.tryRemove(serializeKey(key), timeout, timeunit);
212 - }
213 -
214 - @Override
215 - public boolean tryPut(K key, V value, long timeout, TimeUnit timeunit) {
216 - return m.tryPut(serializeKey(key), serializeVal(value), timeout, timeunit);
217 - }
218 -
219 - @Override
220 - public V put(K key, V value, long ttl, TimeUnit timeunit) {
221 - return deserializeVal(m.put(serializeKey(key), serializeVal(value), ttl, timeunit));
222 - }
223 -
224 - @Override
225 - public void putTransient(K key, V value, long ttl, TimeUnit timeunit) {
226 - m.putTransient(serializeKey(key), serializeVal(value), ttl, timeunit);
227 - }
228 -
229 - @Override
230 - public V putIfAbsent(K key, V value) {
231 - return deserializeVal(m.putIfAbsent(serializeKey(key), serializeVal(value)));
232 - }
233 -
234 - @Override
235 - public V putIfAbsent(K key, V value, long ttl, TimeUnit timeunit) {
236 - return deserializeVal(m.putIfAbsent(serializeKey(key), serializeVal(value), ttl, timeunit));
237 - }
238 -
239 - @Override
240 - public boolean replace(K key, V oldValue, V newValue) {
241 - return m.replace(serializeKey(key), serializeVal(oldValue), serializeVal(newValue));
242 - }
243 -
244 - @Override
245 - public V replace(K key, V value) {
246 - return deserializeVal(m.replace(serializeKey(key), serializeVal(value)));
247 - }
248 -
249 - @Override
250 - public void set(K key, V value) {
251 - m.set(serializeKey(key), serializeVal(value));
252 - }
253 -
254 - @Override
255 - public void set(K key, V value, long ttl, TimeUnit timeunit) {
256 - m.set(serializeKey(key), serializeVal(value), ttl, timeunit);
257 - }
258 -
259 - @Override
260 - public void lock(K key) {
261 - m.lock(serializeKey(key));
262 - }
263 -
264 - @Override
265 - public void lock(K key, long leaseTime, TimeUnit timeUnit) {
266 - m.lock(serializeKey(key), leaseTime, timeUnit);
267 - }
268 -
269 - @Override
270 - public boolean isLocked(K key) {
271 - return m.isLocked(serializeKey(key));
272 - }
273 -
274 - @Override
275 - public boolean tryLock(K key) {
276 - return m.tryLock(serializeKey(key));
277 - }
278 -
279 - @Override
280 - public boolean tryLock(K key, long time, TimeUnit timeunit)
281 - throws InterruptedException {
282 - return m.tryLock(serializeKey(key), time, timeunit);
283 - }
284 -
285 - @Override
286 - public void unlock(K key) {
287 - m.unlock(serializeKey(key));
288 - }
289 -
290 - @Override
291 - public void forceUnlock(K key) {
292 - m.forceUnlock(serializeKey(key));
293 - }
294 -
295 - @Override
296 - public String addLocalEntryListener(EntryListener<K, V> listener) {
297 - return m.addLocalEntryListener(new BaseEntryListener(listener));
298 - }
299 -
300 - /**
301 - * {@inheritDoc}
302 - *
303 - * @deprecated not implemented yet
304 - * @throws UnsupportedOperationException not implemented yet
305 - */
306 - @Deprecated
307 - @Override
308 - public String addLocalEntryListener(EntryListener<K, V> listener,
309 - Predicate<K, V> predicate, boolean includeValue) {
310 - throw new UnsupportedOperationException();
311 - }
312 -
313 - /**
314 - * {@inheritDoc}
315 - *
316 - * @deprecated not implemented yet
317 - * @throws UnsupportedOperationException not implemented yet
318 - */
319 - @Deprecated
320 - @Override
321 - public String addLocalEntryListener(EntryListener<K, V> listener,
322 - Predicate<K, V> predicate, K key, boolean includeValue) {
323 - throw new UnsupportedOperationException();
324 - }
325 -
326 - /**
327 - * {@inheritDoc}
328 - *
329 - * @deprecated not implemented yet
330 - * @throws UnsupportedOperationException not implemented yet
331 - */
332 - @Deprecated
333 - @Override
334 - public String addInterceptor(MapInterceptor interceptor) {
335 - throw new UnsupportedOperationException();
336 - }
337 -
338 - @Override
339 - public void removeInterceptor(String id) {
340 - m.removeInterceptor(id);
341 - }
342 -
343 - @Override
344 - public String addEntryListener(EntryListener<K, V> listener,
345 - boolean includeValue) {
346 - return m.addEntryListener(new BaseEntryListener(listener), includeValue);
347 - }
348 -
349 - @Override
350 - public boolean removeEntryListener(String id) {
351 - return m.removeEntryListener(id);
352 - }
353 -
354 - @Override
355 - public String addEntryListener(EntryListener<K, V> listener, K key,
356 - boolean includeValue) {
357 - return m.addEntryListener(new BaseEntryListener(listener),
358 - serializeKey(key), includeValue);
359 - }
360 -
361 - /**
362 - * {@inheritDoc}
363 - *
364 - * @deprecated not implemented yet
365 - * @throws UnsupportedOperationException not implemented yet
366 - */
367 - @Deprecated
368 - @Override
369 - public String addEntryListener(EntryListener<K, V> listener,
370 - Predicate<K, V> predicate, boolean includeValue) {
371 - throw new UnsupportedOperationException();
372 - }
373 -
374 - /**
375 - * {@inheritDoc}
376 - *
377 - * @deprecated not implemented yet
378 - * @throws UnsupportedOperationException not implemented yet
379 - */
380 - @Deprecated
381 - @Override
382 - public String addEntryListener(EntryListener<K, V> listener,
383 - Predicate<K, V> predicate, K key, boolean includeValue) {
384 - throw new UnsupportedOperationException();
385 - }
386 -
387 - /**
388 - * {@inheritDoc}
389 - *
390 - * @deprecated not implemented yet
391 - * @throws UnsupportedOperationException not implemented yet
392 - */
393 - @Deprecated
394 - @Override
395 - public EntryView<K, V> getEntryView(K key) {
396 - throw new UnsupportedOperationException();
397 - }
398 -
399 - @Override
400 - public boolean evict(K key) {
401 - return m.evict(serializeKey(key));
402 - }
403 -
404 - @Override
405 - public void evictAll() {
406 - m.evictAll();
407 - }
408 -
409 - @Override
410 - public Set<K> keySet() {
411 - return deserializeKeySet(m.keySet());
412 - }
413 -
414 - @Override
415 - public Collection<V> values() {
416 - return deserializeVal(m.values());
417 - }
418 -
419 - @Override
420 - public Set<java.util.Map.Entry<K, V>> entrySet() {
421 - return deserializeEntrySet(m.entrySet());
422 - }
423 -
424 - /**
425 - * {@inheritDoc}
426 - *
427 - * @deprecated not implemented yet
428 - * @throws UnsupportedOperationException not implemented yet
429 - */
430 - @Deprecated
431 - @SuppressWarnings("rawtypes")
432 - @Override
433 - public Set<K> keySet(Predicate predicate) {
434 - throw new UnsupportedOperationException();
435 - }
436 -
437 - /**
438 - * {@inheritDoc}
439 - *
440 - * @deprecated not implemented yet
441 - * @throws UnsupportedOperationException not implemented yet
442 - */
443 - @Deprecated
444 - @SuppressWarnings("rawtypes")
445 - @Override
446 - public Set<java.util.Map.Entry<K, V>> entrySet(Predicate predicate) {
447 - throw new UnsupportedOperationException();
448 - }
449 -
450 - /**
451 - * {@inheritDoc}
452 - *
453 - * @deprecated not implemented yet
454 - * @throws UnsupportedOperationException not implemented yet
455 - */
456 - @Deprecated
457 - @SuppressWarnings("rawtypes")
458 - @Override
459 - public Collection<V> values(Predicate predicate) {
460 - throw new UnsupportedOperationException();
461 - }
462 -
463 - @Override
464 - public Set<K> localKeySet() {
465 - return deserializeKeySet(m.localKeySet());
466 - }
467 -
468 - /**
469 - * {@inheritDoc}
470 - *
471 - * @deprecated not implemented yet
472 - * @throws UnsupportedOperationException not implemented yet
473 - */
474 - @Deprecated
475 - @SuppressWarnings("rawtypes")
476 - @Override
477 - public Set<K> localKeySet(Predicate predicate) {
478 - throw new UnsupportedOperationException();
479 - }
480 -
481 - /**
482 - * {@inheritDoc}
483 - *
484 - * @deprecated not implemented yet
485 - * @throws UnsupportedOperationException not implemented yet
486 - */
487 - @Deprecated
488 - @Override
489 - public void addIndex(String attribute, boolean ordered) {
490 - throw new UnsupportedOperationException();
491 - }
492 -
493 - @Override
494 - public LocalMapStats getLocalMapStats() {
495 - return m.getLocalMapStats();
496 - }
497 -
498 - /**
499 - * {@inheritDoc}
500 - *
501 - * @deprecated not implemented yet
502 - * @throws UnsupportedOperationException not implemented yet
503 - */
504 - @Deprecated
505 - @SuppressWarnings("rawtypes")
506 - @Override
507 - public Object executeOnKey(K key, EntryProcessor entryProcessor) {
508 - throw new UnsupportedOperationException();
509 - }
510 -
511 - /**
512 - * {@inheritDoc}
513 - *
514 - * @deprecated not implemented yet
515 - * @throws UnsupportedOperationException not implemented yet
516 - */
517 - @Deprecated
518 - @SuppressWarnings("rawtypes")
519 - @Override
520 - public Map<K, Object> executeOnKeys(Set<K> keys,
521 - EntryProcessor entryProcessor) {
522 - throw new UnsupportedOperationException();
523 - }
524 -
525 - /**
526 - * {@inheritDoc}
527 - *
528 - * @deprecated not implemented yet
529 - * @throws UnsupportedOperationException not implemented yet
530 - */
531 - @Deprecated
532 - @SuppressWarnings("rawtypes")
533 - @Override
534 - public void submitToKey(K key, EntryProcessor entryProcessor,
535 - ExecutionCallback callback) {
536 - throw new UnsupportedOperationException();
537 - }
538 -
539 - /**
540 - * {@inheritDoc}
541 - *
542 - * @deprecated not implemented yet
543 - * @throws UnsupportedOperationException not implemented yet
544 - */
545 - @Deprecated
546 - @SuppressWarnings("rawtypes")
547 - @Override
548 - public Future submitToKey(K key, EntryProcessor entryProcessor) {
549 - throw new UnsupportedOperationException();
550 - }
551 -
552 - /**
553 - * {@inheritDoc}
554 - *
555 - * @deprecated not implemented yet
556 - * @throws UnsupportedOperationException not implemented yet
557 - */
558 - @Deprecated
559 - @SuppressWarnings("rawtypes")
560 - @Override
561 - public Map<K, Object> executeOnEntries(EntryProcessor entryProcessor) {
562 - throw new UnsupportedOperationException();
563 - }
564 -
565 - /**
566 - * {@inheritDoc}
567 - *
568 - * @deprecated not implemented yet
569 - * @throws UnsupportedOperationException not implemented yet
570 - */
571 - @Deprecated
572 - @SuppressWarnings("rawtypes")
573 - @Override
574 - public Map<K, Object> executeOnEntries(EntryProcessor entryProcessor,
575 - Predicate predicate) {
576 - throw new UnsupportedOperationException();
577 - }
578 -
579 - /**
580 - * {@inheritDoc}
581 - *
582 - * @deprecated not implemented yet
583 - * @throws UnsupportedOperationException not implemented yet
584 - */
585 - @Deprecated
586 - @Override
587 - public <SuppliedValue, Result> Result aggregate(
588 - Supplier<K, V, SuppliedValue> supplier,
589 - Aggregation<K, SuppliedValue, Result> aggregation) {
590 -
591 - throw new UnsupportedOperationException();
592 - }
593 -
594 - /**
595 - * {@inheritDoc}
596 - *
597 - * @deprecated not implemented yet
598 - * @throws UnsupportedOperationException not implemented yet
599 - */
600 - @Deprecated
601 - @Override
602 - public <SuppliedValue, Result> Result aggregate(
603 - Supplier<K, V, SuppliedValue> supplier,
604 - Aggregation<K, SuppliedValue, Result> aggregation,
605 - JobTracker jobTracker) {
606 -
607 - throw new UnsupportedOperationException();
608 - }
609 -
610 - private byte[] serializeKey(Object key) {
611 - return serializer.encode(key);
612 - }
613 -
614 - private K deserializeKey(byte[] key) {
615 - return serializer.decode(key);
616 - }
617 -
618 - private byte[] serializeVal(Object val) {
619 - return serializer.encode(val);
620 - }
621 -
622 - private V deserializeVal(byte[] val) {
623 - if (val == null) {
624 - return null;
625 - }
626 - return serializer.decode(val.clone());
627 - }
628 -
629 - private Set<byte[]> serializeKeySet(Set<K> keys) {
630 - Set<byte[]> sk = Collections.newSetFromMap(new IdentityHashMap<byte[], Boolean>(keys.size()));
631 - for (K key : keys) {
632 - sk.add(serializeKey(key));
633 - }
634 - return sk;
635 - }
636 -
637 - private Set<K> deserializeKeySet(Set<byte[]> keys) {
638 - Set<K> dsk = new HashSet<>(keys.size());
639 - for (byte[] key : keys) {
640 - dsk.add(deserializeKey(key));
641 - }
642 - return dsk;
643 - }
644 -
645 - private Collection<V> deserializeVal(Collection<byte[]> vals) {
646 - Collection<V> dsl = new ArrayList<>(vals.size());
647 - for (byte[] val : vals) {
648 - dsl.add(deserializeVal(val));
649 - }
650 - return dsl;
651 - }
652 -
653 - private Set<java.util.Map.Entry<K, V>> deserializeEntrySet(
654 - Set<java.util.Map.Entry<byte[], byte[]>> entries) {
655 -
656 - Set<java.util.Map.Entry<K, V>> dse = new HashSet<>(entries.size());
657 - for (java.util.Map.Entry<byte[], byte[]> entry : entries) {
658 - dse.add(Pair.of(deserializeKey(entry.getKey()),
659 - deserializeVal(entry.getValue())));
660 - }
661 - return dse;
662 - }
663 -
664 - private final class BaseEntryListener
665 - implements EntryListener<byte[], byte[]> {
666 -
667 - private final EntryListener<K, V> listener;
668 -
669 - public BaseEntryListener(EntryListener<K, V> listener) {
670 - this.listener = listener;
671 - }
672 -
673 - @Override
674 - public void mapEvicted(MapEvent event) {
675 - listener.mapEvicted(event);
676 - }
677 -
678 - @Override
679 - public void mapCleared(MapEvent event) {
680 - listener.mapCleared(event);
681 - }
682 -
683 - @Override
684 - public void entryUpdated(EntryEvent<byte[], byte[]> event) {
685 - EntryEvent<K, V> evt = new EntryEvent<K, V>(
686 - event.getSource(),
687 - event.getMember(),
688 - event.getEventType().getType(),
689 - deserializeKey(event.getKey()),
690 - deserializeVal(event.getOldValue()),
691 - deserializeVal(event.getValue()));
692 -
693 - listener.entryUpdated(evt);
694 - }
695 -
696 - @Override
697 - public void entryRemoved(EntryEvent<byte[], byte[]> event) {
698 - EntryEvent<K, V> evt = new EntryEvent<K, V>(
699 - event.getSource(),
700 - event.getMember(),
701 - event.getEventType().getType(),
702 - deserializeKey(event.getKey()),
703 - deserializeVal(event.getOldValue()),
704 - null);
705 -
706 - listener.entryRemoved(evt);
707 - }
708 -
709 - @Override
710 - public void entryEvicted(EntryEvent<byte[], byte[]> event) {
711 - EntryEvent<K, V> evt = new EntryEvent<K, V>(
712 - event.getSource(),
713 - event.getMember(),
714 - event.getEventType().getType(),
715 - deserializeKey(event.getKey()),
716 - deserializeVal(event.getOldValue()),
717 - deserializeVal(event.getValue()));
718 -
719 - listener.entryEvicted(evt);
720 - }
721 -
722 - @Override
723 - public void entryAdded(EntryEvent<byte[], byte[]> event) {
724 - EntryEvent<K, V> evt = new EntryEvent<K, V>(
725 - event.getSource(),
726 - event.getMember(),
727 - event.getEventType().getType(),
728 - deserializeKey(event.getKey()),
729 - null,
730 - deserializeVal(event.getValue()));
731 -
732 - listener.entryAdded(evt);
733 - }
734 - }
735 -
736 - private final class DeserializeVal implements Function<byte[], V> {
737 - @Override
738 - public V apply(byte[] input) {
739 - return deserializeVal(input);
740 - }
741 - }
742 -
743 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.hz;
17 -
18 -import com.google.common.base.Function;
19 -import com.google.common.collect.FluentIterable;
20 -import com.hazelcast.core.IQueue;
21 -import com.hazelcast.core.ItemEvent;
22 -import com.hazelcast.core.ItemListener;
23 -import com.hazelcast.monitor.LocalQueueStats;
24 -
25 -import org.onosproject.store.serializers.StoreSerializer;
26 -
27 -import java.util.Collection;
28 -import java.util.Iterator;
29 -import java.util.concurrent.TimeUnit;
30 -
31 -import static com.google.common.base.Preconditions.checkNotNull;
32 -
33 -/**
34 - * Wrapper around IQueue&lt;byte[]&gt; which serializes/deserializes
35 - * key and value using StoreSerializer.
36 - *
37 - * @param <T> type
38 - */
39 -public class SQueue<T> implements IQueue<T> {
40 -
41 - private final IQueue<byte[]> q;
42 - private final StoreSerializer serializer;
43 -
44 - /**
45 - * Creates a SQueue instance.
46 - *
47 - * @param baseQueue base IQueue to use
48 - * @param serializer serializer to use for both key and value
49 - */
50 - public SQueue(IQueue<byte[]> baseQueue, StoreSerializer serializer) {
51 - this.q = checkNotNull(baseQueue);
52 - this.serializer = checkNotNull(serializer);
53 - }
54 -
55 - private byte[] serialize(Object key) {
56 - return serializer.encode(key);
57 - }
58 -
59 - private T deserialize(byte[] key) {
60 - return serializer.decode(key);
61 - }
62 -
63 - @Override
64 - public boolean add(T t) {
65 - return q.add(serialize(t));
66 - }
67 -
68 - @Override
69 - public boolean offer(T t) {
70 - return q.offer(serialize(t));
71 - }
72 -
73 - @Override
74 - public void put(T t) throws InterruptedException {
75 - q.put(serialize(t));
76 - }
77 -
78 - @Override
79 - public boolean offer(T t, long l, TimeUnit timeUnit) throws InterruptedException {
80 - return q.offer(serialize(t), l, timeUnit);
81 - }
82 -
83 - @Override
84 - public T take() throws InterruptedException {
85 - return deserialize(q.take());
86 - }
87 -
88 - @Override
89 - public T poll(long l, TimeUnit timeUnit) throws InterruptedException {
90 - return deserialize(q.poll(l, timeUnit));
91 - }
92 -
93 - @Override
94 - public int remainingCapacity() {
95 - return q.remainingCapacity();
96 - }
97 -
98 - @Override
99 - public boolean remove(Object o) {
100 - return q.remove(serialize(o));
101 - }
102 -
103 - @Override
104 - public boolean contains(Object o) {
105 - return q.contains(serialize(o));
106 - }
107 -
108 - @Deprecated // not implemented yet
109 - @Override
110 - public int drainTo(Collection<? super T> collection) {
111 - throw new UnsupportedOperationException();
112 - }
113 -
114 - @Deprecated // not implemented yet
115 - @Override
116 - public int drainTo(Collection<? super T> collection, int i) {
117 - throw new UnsupportedOperationException();
118 - }
119 -
120 - @Override
121 - public T remove() {
122 - return deserialize(q.remove());
123 - }
124 -
125 - @Override
126 - public T poll() {
127 - return deserialize(q.poll());
128 - }
129 -
130 - @Override
131 - public T element() {
132 - return deserialize(q.element());
133 - }
134 -
135 - @Override
136 - public T peek() {
137 - return deserialize(q.peek());
138 - }
139 -
140 - @Override
141 - public int size() {
142 - return q.size();
143 - }
144 -
145 - @Override
146 - public boolean isEmpty() {
147 - return q.isEmpty();
148 - }
149 -
150 - @Override
151 - public Iterator<T> iterator() {
152 - return FluentIterable.from(q)
153 - .transform(new DeserializeVal())
154 - .iterator();
155 - }
156 -
157 - @Deprecated // not implemented yet
158 - @Override
159 - public Object[] toArray() {
160 - throw new UnsupportedOperationException();
161 - }
162 -
163 - @Deprecated // not implemented yet
164 - @Override
165 - public <T1> T1[] toArray(T1[] t1s) {
166 - throw new UnsupportedOperationException();
167 - }
168 -
169 - @Deprecated // not implemented yet
170 - @Override
171 - public boolean containsAll(Collection<?> collection) {
172 - throw new UnsupportedOperationException();
173 - }
174 -
175 - @Deprecated // not implemented yet
176 - @Override
177 - public boolean addAll(Collection<? extends T> collection) {
178 - throw new UnsupportedOperationException();
179 - }
180 -
181 - @Deprecated // not implemented yet
182 - @Override
183 - public boolean removeAll(Collection<?> collection) {
184 - throw new UnsupportedOperationException();
185 - }
186 -
187 - @Deprecated // not implemented yet
188 - @Override
189 - public boolean retainAll(Collection<?> collection) {
190 - throw new UnsupportedOperationException();
191 - }
192 -
193 - @Override
194 - public void clear() {
195 - q.clear();
196 - }
197 -
198 - @Override
199 - public LocalQueueStats getLocalQueueStats() {
200 - return q.getLocalQueueStats();
201 - }
202 -
203 -
204 - @Override
205 - public String addItemListener(ItemListener<T> itemListener, boolean withValue) {
206 - ItemListener<byte[]> il = new ItemListener<byte[]>() {
207 - @Override
208 - public void itemAdded(ItemEvent<byte[]> item) {
209 - itemListener.itemAdded(new ItemEvent<T>(getName(item),
210 - item.getEventType(),
211 - deserialize(item.getItem()),
212 - item.getMember()));
213 - }
214 -
215 - @Override
216 - public void itemRemoved(ItemEvent<byte[]> item) {
217 - itemListener.itemRemoved(new ItemEvent<T>(getName(item),
218 - item.getEventType(),
219 - deserialize(item.getItem()),
220 - item.getMember()));
221 - }
222 -
223 - private String getName(ItemEvent<byte[]> item) {
224 - return (item.getSource() instanceof String) ?
225 - (String) item.getSource() : item.getSource().toString();
226 -
227 - }
228 - };
229 - return q.addItemListener(il, withValue);
230 - }
231 -
232 -
233 - @Override
234 - public boolean removeItemListener(String registrationId) {
235 - return q.removeItemListener(registrationId);
236 - }
237 -
238 - @Deprecated
239 - @Override
240 - public Object getId() {
241 - return q.getId();
242 - }
243 -
244 - @Override
245 - public String getPartitionKey() {
246 - return q.getPartitionKey();
247 - }
248 -
249 - @Override
250 - public String getName() {
251 - return q.getName();
252 - }
253 -
254 - @Override
255 - public String getServiceName() {
256 - return q.getServiceName();
257 - }
258 -
259 - @Override
260 - public void destroy() {
261 - q.destroy();
262 - }
263 -
264 - private final class DeserializeVal implements Function<byte[], T> {
265 - @Override
266 - public T apply(byte[] input) {
267 - return deserialize(input);
268 - }
269 - }
270 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.hz;
17 -
18 -import static com.google.common.base.Preconditions.checkNotNull;
19 -
20 -import java.util.ArrayList;
21 -import java.util.Collection;
22 -import java.util.HashSet;
23 -import java.util.Set;
24 -import java.util.concurrent.TimeUnit;
25 -
26 -import org.onosproject.store.serializers.StoreSerializer;
27 -
28 -import com.hazelcast.core.TransactionalMap;
29 -import com.hazelcast.query.Predicate;
30 -
31 -/**
32 - * Wrapper around TransactionalMap&lt;byte[], byte[]&gt; which serializes/deserializes
33 - * key and value using StoreSerializer.
34 - *
35 - * @param <K> key type
36 - * @param <V> value type
37 - */
38 -public class STxMap<K, V> implements TransactionalMap<K, V> {
39 -
40 - private final TransactionalMap<byte[], byte[]> m;
41 - private final StoreSerializer serializer;
42 -
43 - /**
44 - * Creates a STxMap instance.
45 - *
46 - * @param baseMap base IMap to use
47 - * @param serializer serializer to use for both key and value
48 - */
49 - public STxMap(TransactionalMap<byte[], byte[]> baseMap, StoreSerializer serializer) {
50 - this.m = checkNotNull(baseMap);
51 - this.serializer = checkNotNull(serializer);
52 - }
53 -
54 - @Override
55 - public int size() {
56 - return m.size();
57 - }
58 -
59 - @Override
60 - public boolean isEmpty() {
61 - return m.isEmpty();
62 - }
63 -
64 - @Deprecated
65 - @Override
66 - public Object getId() {
67 - return m.getId();
68 - }
69 -
70 - @Override
71 - public String getPartitionKey() {
72 - return m.getPartitionKey();
73 - }
74 -
75 - @Override
76 - public String getName() {
77 - return m.getName();
78 - }
79 -
80 - @Override
81 - public String getServiceName() {
82 - return m.getServiceName();
83 - }
84 -
85 - @Override
86 - public void destroy() {
87 - m.destroy();
88 - }
89 -
90 - @Override
91 - public boolean containsKey(Object key) {
92 - return m.containsKey(serializeKey(key));
93 - }
94 -
95 - @Override
96 - public V get(Object key) {
97 - return deserializeVal(m.get(serializeKey(key)));
98 - }
99 -
100 - @Override
101 - public V getForUpdate(Object key) {
102 - return deserializeVal(m.getForUpdate(serializeKey(key)));
103 - }
104 -
105 - @Override
106 - public V put(K key, V value) {
107 - return deserializeVal(m.put(serializeKey(key), serializeVal(value)));
108 - }
109 -
110 - @Override
111 - public V remove(Object key) {
112 - return deserializeVal(m.remove(serializeKey(key)));
113 - }
114 -
115 - @Override
116 - public boolean remove(Object key, Object value) {
117 - return m.remove(serializeKey(key), serializeVal(value));
118 - }
119 -
120 - @Override
121 - public void delete(Object key) {
122 - m.delete(serializeKey(key));
123 - }
124 -
125 - @Override
126 - public V put(K key, V value, long ttl, TimeUnit timeunit) {
127 - return deserializeVal(m.put(serializeKey(key), serializeVal(value), ttl, timeunit));
128 - }
129 -
130 - @Override
131 - public V putIfAbsent(K key, V value) {
132 - return deserializeVal(m.putIfAbsent(serializeKey(key), serializeVal(value)));
133 - }
134 -
135 - @Override
136 - public boolean replace(K key, V oldValue, V newValue) {
137 - return m.replace(serializeKey(key), serializeVal(oldValue), serializeVal(newValue));
138 - }
139 -
140 - @Override
141 - public V replace(K key, V value) {
142 - return deserializeVal(m.replace(serializeKey(key), serializeVal(value)));
143 - }
144 -
145 - @Override
146 - public void set(K key, V value) {
147 - m.set(serializeKey(key), serializeVal(value));
148 - }
149 -
150 -
151 - @Override
152 - public Set<K> keySet() {
153 - return deserializeKeySet(m.keySet());
154 - }
155 -
156 - @Override
157 - public Collection<V> values() {
158 - return deserializeVals(m.values());
159 - }
160 -
161 - @Deprecated // marking method not implemented
162 - @SuppressWarnings("rawtypes")
163 - @Override
164 - public Set<K> keySet(Predicate predicate) {
165 - throw new UnsupportedOperationException();
166 - }
167 -
168 - @Deprecated // marking method not implemented
169 - @SuppressWarnings("rawtypes")
170 - @Override
171 - public Collection<V> values(Predicate predicate) {
172 - throw new UnsupportedOperationException();
173 - }
174 -
175 - private byte[] serializeKey(Object key) {
176 - return serializer.encode(key);
177 - }
178 -
179 - private K deserializeKey(byte[] key) {
180 - return serializer.decode(key);
181 - }
182 -
183 - private byte[] serializeVal(Object val) {
184 - return serializer.encode(val);
185 - }
186 -
187 - private V deserializeVal(byte[] val) {
188 - if (val == null) {
189 - return null;
190 - }
191 - return serializer.decode(val.clone());
192 - }
193 -
194 - private Set<K> deserializeKeySet(Set<byte[]> keys) {
195 - Set<K> dsk = new HashSet<>(keys.size());
196 - for (byte[] key : keys) {
197 - dsk.add(deserializeKey(key));
198 - }
199 - return dsk;
200 - }
201 -
202 - private Collection<V> deserializeVals(Collection<byte[]> vals) {
203 - Collection<V> dsl = new ArrayList<>(vals.size());
204 - for (byte[] val : vals) {
205 - dsl.add(deserializeVal(val));
206 - }
207 - return dsl;
208 - }
209 -}
1 -/*
2 - * Copyright 2014-2015 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.hz;
17 -
18 -import com.google.common.io.ByteStreams;
19 -import com.google.common.io.Files;
20 -import com.hazelcast.config.Config;
21 -import com.hazelcast.config.FileSystemXmlConfig;
22 -import com.hazelcast.core.Hazelcast;
23 -import com.hazelcast.core.HazelcastInstance;
24 -
25 -import org.apache.felix.scr.annotations.Activate;
26 -import org.apache.felix.scr.annotations.Component;
27 -import org.apache.felix.scr.annotations.Deactivate;
28 -import org.apache.felix.scr.annotations.Service;
29 -import org.onosproject.store.cluster.impl.ClusterDefinitionManager;
30 -import org.slf4j.Logger;
31 -import org.slf4j.LoggerFactory;
32 -
33 -import java.io.File;
34 -import java.io.FileNotFoundException;
35 -import java.io.IOException;
36 -import java.io.InputStream;
37 -
38 -/**
39 - * Auxiliary bootstrap of distributed store.
40 - */
41 -@Component(immediate = false, enabled = false)
42 -@Service
43 -public class StoreManager implements StoreService {
44 -
45 - protected static final String HAZELCAST_XML_FILE = "etc/hazelcast.xml";
46 -
47 - private final Logger log = LoggerFactory.getLogger(getClass());
48 -
49 - protected HazelcastInstance instance;
50 -
51 - @Activate
52 - public void activate() {
53 - try {
54 - File hazelcastFile = new File(HAZELCAST_XML_FILE);
55 - if (!hazelcastFile.exists()) {
56 - createDefaultHazelcastFile(hazelcastFile);
57 - }
58 -
59 - Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE);
60 -
61 - instance = Hazelcast.newHazelcastInstance(config);
62 - log.info("Started");
63 - } catch (FileNotFoundException e) {
64 - log.error("Unable to configure Hazelcast", e);
65 - }
66 - }
67 -
68 - private void createDefaultHazelcastFile(File hazelcastFile) {
69 - String ip = ClusterDefinitionManager.getSiteLocalAddress();
70 - String ipPrefix = ip.replaceFirst("\\.[0-9]*$", ".*");
71 - InputStream his = getClass().getResourceAsStream("/hazelcast.xml");
72 - try {
73 - String hzCfg = new String(ByteStreams.toByteArray(his), "UTF-8");
74 - hzCfg = hzCfg.replaceFirst("@NAME", ip);
75 - hzCfg = hzCfg.replaceFirst("@PREFIX", ipPrefix);
76 - Files.write(hzCfg.getBytes("UTF-8"), hazelcastFile);
77 - } catch (IOException e) {
78 - log.error("Unable to write default hazelcast file", e);
79 - }
80 - }
81 -
82 - @Deactivate
83 - public void deactivate() {
84 - instance.shutdown();
85 - log.info("Stopped");
86 - }
87 -
88 - @Override
89 - public HazelcastInstance getHazelcastInstance() {
90 - return instance;
91 - }
92 -
93 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.hz;
17 -
18 -import com.hazelcast.core.HazelcastInstance;
19 -
20 -/**
21 - * Bootstrap service to get a handle on a share Hazelcast instance.
22 - */
23 -public interface StoreService {
24 -
25 - /**
26 - * Returns the shared Hazelcast instance for use as a distributed store
27 - * backing.
28 - *
29 - * @return shared Hazelcast instance
30 - */
31 - HazelcastInstance getHazelcastInstance();
32 -
33 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -
17 -/**
18 - * Common abstractions and facilities for implementing distributed store
19 - * using Hazelcast.
20 - */
21 -package org.onosproject.store.hz;
1 -/*
2 - * Copyright 2014-2015 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.mastership.impl;
17 -
18 -import static org.onosproject.mastership.MastershipEvent.Type.MASTER_CHANGED;
19 -import static org.onosproject.mastership.MastershipEvent.Type.BACKUPS_CHANGED;
20 -import static org.apache.commons.lang3.concurrent.ConcurrentUtils.putIfAbsent;
21 -
22 -import java.util.ArrayList;
23 -import java.util.HashSet;
24 -import java.util.List;
25 -import java.util.Map;
26 -import java.util.Map.Entry;
27 -import java.util.Set;
28 -import java.util.concurrent.CompletableFuture;
29 -
30 -import org.apache.felix.scr.annotations.Activate;
31 -import org.apache.felix.scr.annotations.Component;
32 -import org.apache.felix.scr.annotations.Deactivate;
33 -import org.apache.felix.scr.annotations.Reference;
34 -import org.apache.felix.scr.annotations.ReferenceCardinality;
35 -import org.apache.felix.scr.annotations.Service;
36 -import org.onosproject.cluster.ClusterService;
37 -import org.onosproject.cluster.NodeId;
38 -import org.onosproject.cluster.RoleInfo;
39 -import org.onosproject.mastership.MastershipEvent;
40 -import org.onosproject.mastership.MastershipStore;
41 -import org.onosproject.mastership.MastershipStoreDelegate;
42 -import org.onosproject.mastership.MastershipTerm;
43 -import org.onosproject.net.DeviceId;
44 -import org.onosproject.net.MastershipRole;
45 -import org.onosproject.store.hz.AbstractHazelcastStore;
46 -import org.onosproject.store.hz.SMap;
47 -import org.onosproject.store.serializers.KryoNamespaces;
48 -import org.onosproject.store.serializers.KryoSerializer;
49 -import org.onlab.util.KryoNamespace;
50 -
51 -import com.google.common.base.Objects;
52 -import com.hazelcast.config.Config;
53 -import com.hazelcast.config.MapConfig;
54 -import com.hazelcast.core.EntryEvent;
55 -import com.hazelcast.core.EntryListener;
56 -import com.hazelcast.core.MapEvent;
57 -
58 -import static org.onosproject.net.MastershipRole.*;
59 -
60 -/**
61 - * Distributed implementation of the mastership store. The store is
62 - * responsible for the master selection process.
63 - */
64 -@Component(immediate = true, enabled = false)
65 -@Service
66 -public class DistributedMastershipStore
67 - extends AbstractHazelcastStore<MastershipEvent, MastershipStoreDelegate>
68 - implements MastershipStore {
69 -
70 - //term number representing that master has never been chosen yet
71 - private static final Integer NOTHING = 0;
72 - //initial term/TTL value
73 - private static final Integer INIT = 1;
74 -
75 - //device to node roles
76 - private static final String NODE_ROLES_MAP_NAME = "nodeRoles";
77 - protected SMap<DeviceId, RoleValue> roleMap;
78 - //devices to terms
79 - private static final String TERMS_MAP_NAME = "terms";
80 - protected SMap<DeviceId, Integer> terms;
81 -
82 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
83 - protected ClusterService clusterService;
84 -
85 - private String listenerId;
86 -
87 - @Override
88 - @Activate
89 - public void activate() {
90 - super.activate();
91 -
92 - this.serializer = new KryoSerializer() {
93 - @Override
94 - protected void setupKryoPool() {
95 - serializerPool = KryoNamespace.newBuilder()
96 - .register(KryoNamespaces.API)
97 - .nextId(KryoNamespaces.BEGIN_USER_CUSTOM_ID)
98 - .register(new RoleValueSerializer(), RoleValue.class)
99 - .build();
100 - }
101 - };
102 -
103 - final Config config = theInstance.getConfig();
104 -
105 - MapConfig nodeRolesCfg = config.getMapConfig(NODE_ROLES_MAP_NAME);
106 - nodeRolesCfg.setAsyncBackupCount(MapConfig.MAX_BACKUP_COUNT - nodeRolesCfg.getBackupCount());
107 -
108 - MapConfig termsCfg = config.getMapConfig(TERMS_MAP_NAME);
109 - termsCfg.setAsyncBackupCount(MapConfig.MAX_BACKUP_COUNT - termsCfg.getBackupCount());
110 -
111 - roleMap = new SMap<>(theInstance.<byte[], byte[]>getMap(NODE_ROLES_MAP_NAME), this.serializer);
112 - listenerId = roleMap.addEntryListener((new RemoteMasterShipEventHandler()), true);
113 - terms = new SMap<>(theInstance.<byte[], byte[]>getMap(TERMS_MAP_NAME), this.serializer);
114 -
115 - log.info("Started");
116 - }
117 -
118 - @Deactivate
119 - public void deactivate() {
120 - roleMap.removeEntryListener(listenerId);
121 - log.info("Stopped");
122 - }
123 -
124 - @Override
125 - public MastershipRole getRole(NodeId nodeId, DeviceId deviceId) {
126 - final RoleValue roleInfo = roleMap.get(deviceId);
127 - if (roleInfo != null) {
128 - return roleInfo.getRole(nodeId);
129 - }
130 - return NONE;
131 - }
132 -
133 - @Override
134 - public CompletableFuture<MastershipEvent> setMaster(NodeId newMaster, DeviceId deviceId) {
135 -
136 - roleMap.lock(deviceId);
137 - try {
138 - final RoleValue rv = getRoleValue(deviceId);
139 - final MastershipRole currentRole = rv.getRole(newMaster);
140 - switch (currentRole) {
141 - case MASTER:
142 - //reinforce mastership
143 - // RoleInfo integrity check
144 - boolean modified = rv.reassign(newMaster, STANDBY, NONE);
145 - if (modified) {
146 - roleMap.put(deviceId, rv);
147 - // should never reach here.
148 - log.warn("{} was in both MASTER and STANDBY for {}", newMaster, deviceId);
149 - // trigger BACKUPS_CHANGED?
150 - }
151 - return CompletableFuture.completedFuture(null);
152 - case STANDBY:
153 - case NONE:
154 - final NodeId currentMaster = rv.get(MASTER);
155 - if (currentMaster != null) {
156 - // place current master in STANDBY
157 - rv.reassign(currentMaster, NONE, STANDBY);
158 - rv.replace(currentMaster, newMaster, MASTER);
159 - } else {
160 - //no master before so just add.
161 - rv.add(MASTER, newMaster);
162 - }
163 - // remove newMaster from STANDBY
164 - rv.reassign(newMaster, STANDBY, NONE);
165 - updateTerm(deviceId);
166 - roleMap.put(deviceId, rv);
167 - return CompletableFuture.completedFuture(
168 - new MastershipEvent(MASTER_CHANGED, deviceId, rv.roleInfo()));
169 - default:
170 - log.warn("unknown Mastership Role {}", currentRole);
171 - return CompletableFuture.completedFuture(null);
172 - }
173 - } finally {
174 - roleMap.unlock(deviceId);
175 - }
176 - }
177 -
178 - @Override
179 - public NodeId getMaster(DeviceId deviceId) {
180 - return getNode(MASTER, deviceId);
181 - }
182 -
183 -
184 - @Override
185 - public RoleInfo getNodes(DeviceId deviceId) {
186 - RoleValue rv = roleMap.get(deviceId);
187 - if (rv != null) {
188 - return rv.roleInfo();
189 - } else {
190 - return new RoleInfo();
191 - }
192 - }
193 -
194 - @Override
195 - public Set<DeviceId> getDevices(NodeId nodeId) {
196 - Set<DeviceId> devices = new HashSet<>();
197 -
198 - for (Map.Entry<DeviceId, RoleValue> el : roleMap.entrySet()) {
199 - if (nodeId.equals(el.getValue().get(MASTER))) {
200 - devices.add(el.getKey());
201 - }
202 - }
203 -
204 - return devices;
205 - }
206 -
207 - @Override
208 - public CompletableFuture<MastershipRole> requestRole(DeviceId deviceId) {
209 -
210 - // if no master => become master
211 - // if there already exists a master:
212 - // if I was the master return MASTER
213 - // else put myself in STANDBY and return STANDBY
214 -
215 - final NodeId local = clusterService.getLocalNode().id();
216 - boolean modified = false;
217 - roleMap.lock(deviceId);
218 - try {
219 - final RoleValue rv = getRoleValue(deviceId);
220 - if (rv.get(MASTER) == null) {
221 - // there's no master become one
222 - // move out from STANDBY
223 - rv.reassign(local, STANDBY, NONE);
224 - rv.add(MASTER, local);
225 -
226 - updateTerm(deviceId);
227 - roleMap.put(deviceId, rv);
228 - return CompletableFuture.completedFuture(MASTER);
229 - }
230 - final MastershipRole currentRole = rv.getRole(local);
231 - switch (currentRole) {
232 - case MASTER:
233 - // RoleInfo integrity check
234 - modified = rv.reassign(local, STANDBY, NONE);
235 - if (modified) {
236 - log.warn("{} was in both MASTER and STANDBY for {}", local, deviceId);
237 - // should never reach here,
238 - // but heal if we happened to be there
239 - roleMap.put(deviceId, rv);
240 - // trigger BACKUPS_CHANGED?
241 - }
242 - return CompletableFuture.completedFuture(currentRole);
243 - case STANDBY:
244 - // RoleInfo integrity check
245 - modified = rv.reassign(local, NONE, STANDBY);
246 - if (modified) {
247 - log.warn("{} was in both NONE and STANDBY for {}", local, deviceId);
248 - // should never reach here,
249 - // but heal if we happened to be there
250 - roleMap.put(deviceId, rv);
251 - // trigger BACKUPS_CHANGED?
252 - }
253 - return CompletableFuture.completedFuture(currentRole);
254 - case NONE:
255 - rv.reassign(local, NONE, STANDBY);
256 - roleMap.put(deviceId, rv);
257 - // TODO: notifyDelegate BACKUPS_CHANGED
258 - return CompletableFuture.completedFuture(STANDBY);
259 - default:
260 - log.warn("unknown Mastership Role {}", currentRole);
261 - }
262 - return CompletableFuture.completedFuture(currentRole);
263 - } finally {
264 - roleMap.unlock(deviceId);
265 - }
266 - }
267 -
268 - @Override
269 - public MastershipTerm getTermFor(DeviceId deviceId) {
270 - // term information and role must be read atomically
271 - // acquiring write lock for the device
272 - roleMap.lock(deviceId);
273 - try {
274 - RoleValue rv = getRoleValue(deviceId);
275 - final Integer term = terms.get(deviceId);
276 - final NodeId master = rv.get(MASTER);
277 - if (term == null) {
278 - return MastershipTerm.of(null, NOTHING);
279 - }
280 - return MastershipTerm.of(master, term);
281 - } finally {
282 - roleMap.unlock(deviceId);
283 - }
284 - }
285 -
286 - @Override
287 - public CompletableFuture<MastershipEvent> setStandby(NodeId nodeId, DeviceId deviceId) {
288 - // if nodeId was MASTER, rotate STANDBY
289 - // if nodeId was STANDBY no-op
290 - // if nodeId was NONE, add to STANDBY
291 -
292 - roleMap.lock(deviceId);
293 - try {
294 - final RoleValue rv = getRoleValue(deviceId);
295 - final MastershipRole currentRole = getRole(nodeId, deviceId);
296 - switch (currentRole) {
297 - case MASTER:
298 - NodeId newMaster = reelect(nodeId, deviceId, rv);
299 - rv.reassign(nodeId, NONE, STANDBY);
300 - updateTerm(deviceId);
301 - if (newMaster != null) {
302 - roleMap.put(deviceId, rv);
303 - return CompletableFuture.completedFuture(
304 - new MastershipEvent(MASTER_CHANGED, deviceId, rv.roleInfo()));
305 - } else {
306 - // no master candidate
307 - roleMap.put(deviceId, rv);
308 - // TBD: Should there be new event type for no MASTER?
309 - return CompletableFuture.completedFuture(
310 - new MastershipEvent(MASTER_CHANGED, deviceId, rv.roleInfo()));
311 - }
312 - case STANDBY:
313 - return CompletableFuture.completedFuture(null);
314 - case NONE:
315 - rv.reassign(nodeId, NONE, STANDBY);
316 - roleMap.put(deviceId, rv);
317 - return CompletableFuture.completedFuture(
318 - new MastershipEvent(BACKUPS_CHANGED, deviceId, rv.roleInfo()));
319 - default:
320 - log.warn("unknown Mastership Role {}", currentRole);
321 - }
322 - return CompletableFuture.completedFuture(null);
323 - } finally {
324 - roleMap.unlock(deviceId);
325 - }
326 - }
327 -
328 - @Override
329 - public CompletableFuture<MastershipEvent> relinquishRole(NodeId nodeId, DeviceId deviceId) {
330 - // relinquishRole is basically set to None
331 -
332 - // If nodeId was master reelect next and remove nodeId
333 - // else remove from STANDBY
334 -
335 - roleMap.lock(deviceId);
336 - try {
337 - final RoleValue rv = getRoleValue(deviceId);
338 - final MastershipRole currentRole = rv.getRole(nodeId);
339 - switch (currentRole) {
340 - case MASTER:
341 - NodeId newMaster = reelect(nodeId, deviceId, rv);
342 - if (newMaster != null) {
343 - updateTerm(deviceId);
344 - roleMap.put(deviceId, rv);
345 - return CompletableFuture.completedFuture(
346 - new MastershipEvent(MASTER_CHANGED, deviceId, rv.roleInfo()));
347 - } else {
348 - // No master candidate - no more backups, device is likely
349 - // fully disconnected
350 - roleMap.put(deviceId, rv);
351 - // Should there be new event type?
352 - return CompletableFuture.completedFuture(null);
353 - }
354 - case STANDBY:
355 - //fall through to reinforce relinquishment
356 - case NONE:
357 - boolean modified = rv.reassign(nodeId, STANDBY, NONE);
358 - if (modified) {
359 - roleMap.put(deviceId, rv);
360 - return CompletableFuture.completedFuture(
361 - new MastershipEvent(BACKUPS_CHANGED, deviceId, rv.roleInfo()));
362 - }
363 - return CompletableFuture.completedFuture(null);
364 - default:
365 - log.warn("unknown Mastership Role {}", currentRole);
366 - }
367 - return CompletableFuture.completedFuture(null);
368 - } finally {
369 - roleMap.unlock(deviceId);
370 - }
371 - }
372 -
373 - @Override
374 - public void relinquishAllRole(NodeId nodeId) {
375 -
376 - List<MastershipEvent> events = new ArrayList<>();
377 - for (Entry<DeviceId, RoleValue> entry : roleMap.entrySet()) {
378 - final DeviceId deviceId = entry.getKey();
379 - final RoleValue roleValue = entry.getValue();
380 -
381 - if (roleValue.contains(MASTER, nodeId) ||
382 - roleValue.contains(STANDBY, nodeId)) {
383 -
384 - relinquishRole(nodeId, deviceId).whenComplete((event, error) -> {
385 - if (event != null) {
386 - events.add(event);
387 - }
388 - });
389 - }
390 - }
391 - notifyDelegate(events);
392 - }
393 -
394 - // TODO: Consider moving this to RoleValue method
395 - //helper to fetch a new master candidate for a given device.
396 - private NodeId reelect(
397 - NodeId current, DeviceId deviceId, RoleValue rv) {
398 -
399 - //if this is an queue it'd be neater.
400 - NodeId candidate = null;
401 - for (NodeId n : rv.nodesOfRole(STANDBY)) {
402 - if (!current.equals(n)) {
403 - candidate = n;
404 - break;
405 - }
406 - }
407 -
408 - if (candidate == null) {
409 - log.info("{} giving up and going to NONE for {}", current, deviceId);
410 - rv.remove(MASTER, current);
411 - // master did change, but there is no master candidate.
412 - return null;
413 - } else {
414 - log.info("{} trying to pass mastership for {} to {}", current, deviceId, candidate);
415 - rv.replace(current, candidate, MASTER);
416 - rv.reassign(candidate, STANDBY, NONE);
417 - return candidate;
418 - }
419 - }
420 -
421 - //return the RoleValue structure for a device, or create one
422 - private RoleValue getRoleValue(DeviceId deviceId) {
423 - RoleValue value = roleMap.get(deviceId);
424 - if (value == null) {
425 - value = new RoleValue();
426 - RoleValue concurrentlyAdded = roleMap.putIfAbsent(deviceId, value);
427 - if (concurrentlyAdded != null) {
428 - return concurrentlyAdded;
429 - }
430 - }
431 - return value;
432 - }
433 -
434 - //get first applicable node out of store-unique structure.
435 - private NodeId getNode(MastershipRole role, DeviceId deviceId) {
436 - RoleValue value = roleMap.get(deviceId);
437 - if (value != null) {
438 - return value.get(role);
439 - }
440 - return null;
441 - }
442 -
443 - //adds or updates term information.
444 - // must be guarded by roleMap.lock(deviceId)
445 - private void updateTerm(DeviceId deviceId) {
446 - Integer term = terms.get(deviceId);
447 - if (term == null) {
448 - term = terms.putIfAbsent(deviceId, INIT);
449 - if (term == null) {
450 - // initial term set successfully
451 - return;
452 - }
453 - // concurrent initialization detected,
454 - // fall through to try incrementing
455 - }
456 - Integer nextTerm = term + 1;
457 - boolean success = terms.replace(deviceId, term, nextTerm);
458 - while (!success) {
459 - term = terms.get(deviceId);
460 - if (term == null) {
461 - // something is very wrong, but write something to avoid
462 - // infinite loop.
463 - log.warn("Term info for {} disappeared.", deviceId);
464 - term = putIfAbsent(terms, deviceId, nextTerm);
465 - }
466 - nextTerm = term + 1;
467 - success = terms.replace(deviceId, term, nextTerm);
468 - }
469 - }
470 -
471 - private class RemoteMasterShipEventHandler implements EntryListener<DeviceId, RoleValue> {
472 -
473 - @Override
474 - public void entryAdded(EntryEvent<DeviceId, RoleValue> event) {
475 - entryUpdated(event);
476 - }
477 -
478 - @Override
479 - public void entryRemoved(EntryEvent<DeviceId, RoleValue> event) {
480 - }
481 -
482 - @Override
483 - public void entryUpdated(EntryEvent<DeviceId, RoleValue> event) {
484 - // compare old and current RoleValues. If master is different,
485 - // emit MASTER_CHANGED. else, emit BACKUPS_CHANGED.
486 - RoleValue oldValue = event.getOldValue();
487 - RoleValue newValue = event.getValue();
488 -
489 - // There will be no oldValue at the very first instance of an EntryEvent.
490 - // Technically, the progression is: null event -> null master -> some master;
491 - // We say a null master and a null oldValue are the same condition.
492 - NodeId oldMaster = null;
493 - if (oldValue != null) {
494 - oldMaster = oldValue.get(MASTER);
495 - }
496 - NodeId newMaster = newValue.get(MASTER);
497 -
498 - if (!Objects.equal(oldMaster, newMaster)) {
499 - notifyDelegate(new MastershipEvent(
500 - MASTER_CHANGED, event.getKey(), event.getValue().roleInfo()));
501 - } else {
502 - notifyDelegate(new MastershipEvent(
503 - BACKUPS_CHANGED, event.getKey(), event.getValue().roleInfo()));
504 - }
505 - }
506 -
507 - @Override
508 - public void entryEvicted(EntryEvent<DeviceId, RoleValue> event) {
509 - }
510 -
511 - @Override
512 - public void mapEvicted(MapEvent event) {
513 - }
514 -
515 - @Override
516 - public void mapCleared(MapEvent event) {
517 - }
518 - }
519 -
520 -}
...@@ -27,7 +27,7 @@ import com.esotericsoftware.kryo.io.Input; ...@@ -27,7 +27,7 @@ import com.esotericsoftware.kryo.io.Input;
27 import com.esotericsoftware.kryo.io.Output; 27 import com.esotericsoftware.kryo.io.Output;
28 28
29 /** 29 /**
30 - * Serializer for RoleValues used by {@link DistributedMastershipStore}. 30 + * Serializer for RoleValues used by {@link org.onosproject.mastership.MastershipStore}.
31 */ 31 */
32 public class RoleValueSerializer extends Serializer<RoleValue> { 32 public class RoleValueSerializer extends Serializer<RoleValue> {
33 33
......
1 -/*
2 - * Copyright 2014-2015 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.resource.impl;
17 -
18 -import java.util.ArrayList;
19 -import java.util.Collection;
20 -import java.util.Collections;
21 -import java.util.HashMap;
22 -import java.util.HashSet;
23 -import java.util.List;
24 -import java.util.Map;
25 -import java.util.Set;
26 -
27 -import org.apache.felix.scr.annotations.Activate;
28 -import org.apache.felix.scr.annotations.Component;
29 -import org.apache.felix.scr.annotations.Deactivate;
30 -import org.apache.felix.scr.annotations.Reference;
31 -import org.apache.felix.scr.annotations.ReferenceCardinality;
32 -import org.apache.felix.scr.annotations.Service;
33 -import org.onlab.util.Bandwidth;
34 -import org.onlab.util.PositionalParameterStringFormatter;
35 -import org.onosproject.net.AnnotationKeys;
36 -import org.onosproject.net.Link;
37 -import org.onosproject.net.LinkKey;
38 -import org.onosproject.net.intent.IntentId;
39 -import org.onosproject.net.link.LinkService;
40 -import org.onosproject.net.resource.BandwidthResource;
41 -import org.onosproject.net.resource.BandwidthResourceAllocation;
42 -import org.onosproject.net.resource.LambdaResource;
43 -import org.onosproject.net.resource.LambdaResourceAllocation;
44 -import org.onosproject.net.resource.LinkResourceAllocations;
45 -import org.onosproject.net.resource.LinkResourceEvent;
46 -import org.onosproject.net.resource.LinkResourceStore;
47 -import org.onosproject.net.resource.MplsLabel;
48 -import org.onosproject.net.resource.MplsLabelResourceAllocation;
49 -import org.onosproject.net.resource.ResourceAllocation;
50 -import org.onosproject.net.resource.ResourceAllocationException;
51 -import org.onosproject.net.resource.ResourceType;
52 -import org.onosproject.store.StoreDelegate;
53 -import org.onosproject.store.hz.AbstractHazelcastStore;
54 -import org.onosproject.store.hz.STxMap;
55 -import org.slf4j.Logger;
56 -
57 -import com.google.common.collect.ImmutableList;
58 -import com.google.common.collect.ImmutableSet;
59 -import com.google.common.collect.Sets;
60 -import com.hazelcast.config.Config;
61 -import com.hazelcast.config.MapConfig;
62 -import com.hazelcast.core.TransactionalMap;
63 -import com.hazelcast.transaction.TransactionContext;
64 -import com.hazelcast.transaction.TransactionException;
65 -import com.hazelcast.transaction.TransactionOptions;
66 -import com.hazelcast.transaction.TransactionOptions.TransactionType;
67 -
68 -import static com.google.common.base.Preconditions.checkNotNull;
69 -import static com.google.common.base.Preconditions.checkState;
70 -import static org.slf4j.LoggerFactory.getLogger;
71 -
72 -/**
73 - * Manages link resources using Hazelcast.
74 - */
75 -@Component(immediate = true, enabled = false)
76 -@Service
77 -public class HazelcastLinkResourceStore
78 - extends AbstractHazelcastStore<LinkResourceEvent, StoreDelegate<LinkResourceEvent>>
79 - implements LinkResourceStore {
80 -
81 -
82 - private final Logger log = getLogger(getClass());
83 -
84 - private static final BandwidthResource DEFAULT_BANDWIDTH = new BandwidthResource(Bandwidth.mbps(1_000));
85 -
86 - private static final BandwidthResource EMPTY_BW = new BandwidthResource(Bandwidth.bps(0));
87 -
88 - // table to store current allocations
89 - /** LinkKey -> List<LinkResourceAllocations>. */
90 - private static final String LINK_RESOURCE_ALLOCATIONS = "LinkResourceAllocations";
91 -
92 - /** IntentId -> LinkResourceAllocations. */
93 - private static final String INTENT_ALLOCATIONS = "IntentAllocations";
94 -
95 -
96 - // TODO make this configurable
97 - // number of retries to attempt on allocation failure, due to
98 - // concurrent update
99 - private static int maxAllocateRetries = 5;
100 -
101 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
102 - protected LinkService linkService;
103 -
104 - // Link annotation key name to use as bandwidth
105 - private String bandwidthAnnotation = AnnotationKeys.BANDWIDTH;
106 - // Link annotation key name to use as max lambda
107 - private String wavesAnnotation = AnnotationKeys.OPTICAL_WAVES;
108 -
109 - // Max MPLS labels: 2^20 – 1
110 - private int maxMplsLabel = 0xFFFFF;
111 -
112 - @Override
113 - @Activate
114 - public void activate() {
115 - super.activate();
116 -
117 - final Config config = theInstance.getConfig();
118 -
119 - MapConfig linkCfg = config.getMapConfig(LINK_RESOURCE_ALLOCATIONS);
120 - linkCfg.setAsyncBackupCount(MapConfig.MAX_BACKUP_COUNT - linkCfg.getBackupCount());
121 -
122 - MapConfig intentCfg = config.getMapConfig(INTENT_ALLOCATIONS);
123 - intentCfg.setAsyncBackupCount(MapConfig.MAX_BACKUP_COUNT - intentCfg.getBackupCount());
124 -
125 - log.info("Started");
126 - }
127 -
128 - @Deactivate
129 - public void deactivate() {
130 - log.info("Stopped");
131 - }
132 -
133 - private STxMap<IntentId, LinkResourceAllocations> getIntentAllocs(TransactionContext tx) {
134 - TransactionalMap<byte[], byte[]> raw = tx.getMap(INTENT_ALLOCATIONS);
135 - return new STxMap<>(raw, serializer);
136 - }
137 -
138 - private STxMap<LinkKey, List<LinkResourceAllocations>> getLinkAllocs(TransactionContext tx) {
139 - TransactionalMap<byte[], byte[]> raw = tx.getMap(LINK_RESOURCE_ALLOCATIONS);
140 - return new STxMap<>(raw, serializer);
141 - }
142 -
143 - private Set<? extends ResourceAllocation> getResourceCapacity(ResourceType type, Link link) {
144 - if (type == ResourceType.BANDWIDTH) {
145 - return ImmutableSet.of(getBandwidthResourceCapacity(link));
146 - }
147 - if (type == ResourceType.LAMBDA) {
148 - return getLambdaResourceCapacity(link);
149 - }
150 - if (type == ResourceType.MPLS_LABEL) {
151 - return getMplsResourceCapacity();
152 - }
153 - return null;
154 - }
155 -
156 - private Set<LambdaResourceAllocation> getLambdaResourceCapacity(Link link) {
157 - Set<LambdaResourceAllocation> allocations = new HashSet<>();
158 - try {
159 - final int waves = Integer.parseInt(link.annotations().value(wavesAnnotation));
160 - for (int i = 1; i <= waves; i++) {
161 - allocations.add(new LambdaResourceAllocation(LambdaResource.valueOf(i)));
162 - }
163 - } catch (NumberFormatException e) {
164 - log.debug("No {} annotation on link %s", wavesAnnotation, link);
165 - }
166 - return allocations;
167 - }
168 -
169 - private BandwidthResourceAllocation getBandwidthResourceCapacity(Link link) {
170 -
171 - // if Link annotation exist, use them
172 - // if all fails, use DEFAULT_BANDWIDTH
173 -
174 - BandwidthResource bandwidth = null;
175 - String strBw = link.annotations().value(bandwidthAnnotation);
176 - if (strBw != null) {
177 - try {
178 - bandwidth = new BandwidthResource(Bandwidth.mbps(Double.parseDouble(strBw)));
179 - } catch (NumberFormatException e) {
180 - // do nothings
181 - bandwidth = null;
182 - }
183 - }
184 -
185 - if (bandwidth == null) {
186 - // fall back, use fixed default
187 - bandwidth = DEFAULT_BANDWIDTH;
188 - }
189 - return new BandwidthResourceAllocation(bandwidth);
190 - }
191 -
192 - private Set<MplsLabelResourceAllocation> getMplsResourceCapacity() {
193 - Set<MplsLabelResourceAllocation> allocations = new HashSet<>();
194 - //Ignoring reserved labels of 0 through 15
195 - for (int i = 16; i <= maxMplsLabel; i++) {
196 - allocations.add(new MplsLabelResourceAllocation(MplsLabel
197 - .valueOf(i)));
198 -
199 - }
200 - return allocations;
201 - }
202 -
203 - private Map<ResourceType, Set<? extends ResourceAllocation>> getResourceCapacity(Link link) {
204 - Map<ResourceType, Set<? extends ResourceAllocation>> caps = new HashMap<>();
205 - for (ResourceType type : ResourceType.values()) {
206 - Set<? extends ResourceAllocation> cap = getResourceCapacity(type, link);
207 - if (cap != null) {
208 - caps.put(type, cap);
209 - }
210 - }
211 - return caps;
212 - }
213 -
214 - @Override
215 - public Set<ResourceAllocation> getFreeResources(Link link) {
216 - TransactionOptions opt = new TransactionOptions();
217 - // read-only and will never be commited, thus does not need durability
218 - opt.setTransactionType(TransactionType.LOCAL);
219 - TransactionContext tx = theInstance.newTransactionContext(opt);
220 - tx.beginTransaction();
221 - try {
222 - Map<ResourceType, Set<? extends ResourceAllocation>> freeResources = getFreeResourcesEx(tx, link);
223 - Set<ResourceAllocation> allFree = new HashSet<>();
224 - for (Set<? extends ResourceAllocation> r : freeResources.values()) {
225 - allFree.addAll(r);
226 - }
227 - return allFree;
228 - } finally {
229 - tx.rollbackTransaction();
230 - }
231 -
232 - }
233 -
234 - private Map<ResourceType, Set<? extends ResourceAllocation>> getFreeResourcesEx(TransactionContext tx, Link link) {
235 - // returns capacity - allocated
236 -
237 - checkNotNull(link);
238 - Map<ResourceType, Set<? extends ResourceAllocation>> free = new HashMap<>();
239 - final Map<ResourceType, Set<? extends ResourceAllocation>> caps = getResourceCapacity(link);
240 - final Iterable<LinkResourceAllocations> allocations = getAllocations(tx, link);
241 -
242 - for (ResourceType type : ResourceType.values()) {
243 - // there should be class/category of resources
244 - switch (type) {
245 - case BANDWIDTH:
246 - {
247 - Set<? extends ResourceAllocation> bw = caps.get(ResourceType.BANDWIDTH);
248 - if (bw == null || bw.isEmpty()) {
249 - bw = Sets.newHashSet(new BandwidthResourceAllocation(EMPTY_BW));
250 - }
251 -
252 - BandwidthResourceAllocation cap = (BandwidthResourceAllocation) bw.iterator().next();
253 - double freeBw = cap.bandwidth().toDouble();
254 -
255 - // enumerate current allocations, subtracting resources
256 - for (LinkResourceAllocations alloc : allocations) {
257 - Set<ResourceAllocation> types = alloc.getResourceAllocation(link);
258 - for (ResourceAllocation a : types) {
259 - if (a instanceof BandwidthResourceAllocation) {
260 - BandwidthResourceAllocation bwA = (BandwidthResourceAllocation) a;
261 - freeBw -= bwA.bandwidth().toDouble();
262 - }
263 - }
264 - }
265 -
266 - free.put(type, Sets.newHashSet(
267 - new BandwidthResourceAllocation(new BandwidthResource(Bandwidth.bps(freeBw)))));
268 - break;
269 - }
270 -
271 - case LAMBDA:
272 - {
273 - Set<? extends ResourceAllocation> lmd = caps.get(type);
274 - if (lmd == null || lmd.isEmpty()) {
275 - // nothing left
276 - break;
277 - }
278 - Set<LambdaResourceAllocation> freeL = new HashSet<>();
279 - for (ResourceAllocation r : lmd) {
280 - if (r instanceof LambdaResourceAllocation) {
281 - freeL.add((LambdaResourceAllocation) r);
282 - }
283 - }
284 -
285 - // enumerate current allocations, removing resources
286 - for (LinkResourceAllocations alloc : allocations) {
287 - Set<ResourceAllocation> types = alloc.getResourceAllocation(link);
288 - for (ResourceAllocation a : types) {
289 - if (a instanceof LambdaResourceAllocation) {
290 - freeL.remove(a);
291 - }
292 - }
293 - }
294 -
295 - free.put(type, freeL);
296 - break;
297 - }
298 -
299 - case MPLS_LABEL:
300 - Set<? extends ResourceAllocation> mpls = caps.get(type);
301 - if (mpls == null || mpls.isEmpty()) {
302 - // nothing left
303 - break;
304 - }
305 - Set<MplsLabelResourceAllocation> freeLabel = new HashSet<>();
306 - for (ResourceAllocation r : mpls) {
307 - if (r instanceof MplsLabelResourceAllocation) {
308 - freeLabel.add((MplsLabelResourceAllocation) r);
309 - }
310 - }
311 -
312 - // enumerate current allocations, removing resources
313 - for (LinkResourceAllocations alloc : allocations) {
314 - Set<ResourceAllocation> types = alloc
315 - .getResourceAllocation(link);
316 - for (ResourceAllocation a : types) {
317 - if (a instanceof MplsLabelResourceAllocation) {
318 - freeLabel.remove(a);
319 - }
320 - }
321 - }
322 -
323 - free.put(type, freeLabel);
324 - break;
325 -
326 - default:
327 - break;
328 - }
329 - }
330 - return free;
331 - }
332 -
333 - @Override
334 - public void allocateResources(LinkResourceAllocations allocations) {
335 - checkNotNull(allocations);
336 -
337 - for (int i = 0; i < maxAllocateRetries; ++i) {
338 - TransactionContext tx = theInstance.newTransactionContext();
339 - tx.beginTransaction();
340 - try {
341 -
342 - STxMap<IntentId, LinkResourceAllocations> intentAllocs = getIntentAllocs(tx);
343 - // should this be conditional write?
344 - intentAllocs.put(allocations.intentId(), allocations);
345 -
346 - for (Link link : allocations.links()) {
347 - allocateLinkResource(tx, link, allocations);
348 - }
349 -
350 - tx.commitTransaction();
351 - return;
352 - } catch (TransactionException e) {
353 - log.debug("Failed to commit allocations for {}. [retry={}]",
354 - allocations.intentId(), i);
355 - log.trace(" details {} ", allocations, e);
356 - continue;
357 - } catch (Exception e) {
358 - log.error("Exception thrown, rolling back", e);
359 - tx.rollbackTransaction();
360 - throw e;
361 - }
362 - }
363 - }
364 -
365 - private void allocateLinkResource(TransactionContext tx, Link link,
366 - LinkResourceAllocations allocations) {
367 -
368 - // requested resources
369 - Set<ResourceAllocation> reqs = allocations.getResourceAllocation(link);
370 -
371 - Map<ResourceType, Set<? extends ResourceAllocation>> available = getFreeResourcesEx(tx, link);
372 - for (ResourceAllocation req : reqs) {
373 - Set<? extends ResourceAllocation> avail = available.get(req.type());
374 - if (req instanceof BandwidthResourceAllocation) {
375 - // check if allocation should be accepted
376 - if (avail.isEmpty()) {
377 - checkState(!avail.isEmpty(),
378 - "There's no Bandwidth resource on %s?",
379 - link);
380 - }
381 - BandwidthResourceAllocation bw = (BandwidthResourceAllocation) avail.iterator().next();
382 - double bwLeft = bw.bandwidth().toDouble();
383 - BandwidthResourceAllocation bwReq = ((BandwidthResourceAllocation) req);
384 - bwLeft -= bwReq.bandwidth().toDouble();
385 - if (bwLeft < 0) {
386 - throw new ResourceAllocationException(
387 - PositionalParameterStringFormatter.format(
388 - "Unable to allocate bandwidth for link {} "
389 - + " requested amount is {} current allocation is {}",
390 - link,
391 - bwReq.bandwidth().toDouble(),
392 - bw));
393 - }
394 - } else if (req instanceof LambdaResourceAllocation) {
395 - LambdaResourceAllocation lambdaAllocation = (LambdaResourceAllocation) req;
396 - // check if allocation should be accepted
397 - if (!avail.contains(req)) {
398 - // requested lambda was not available
399 - throw new ResourceAllocationException(
400 - PositionalParameterStringFormatter.format(
401 - "Unable to allocate lambda for link {} lambda is {}",
402 - link,
403 - lambdaAllocation.lambda().toInt()));
404 - }
405 - } else if (req instanceof MplsLabelResourceAllocation) {
406 - MplsLabelResourceAllocation mplsAllocation = (MplsLabelResourceAllocation) req;
407 - if (!avail.contains(req)) {
408 - throw new ResourceAllocationException(
409 - PositionalParameterStringFormatter
410 - .format("Unable to allocate MPLS label for link "
411 - + "{} MPLS label is {}",
412 - link,
413 - mplsAllocation
414 - .mplsLabel()
415 - .toString()));
416 - }
417 - }
418 - }
419 - // all requests allocatable => add allocation
420 - final LinkKey linkKey = LinkKey.linkKey(link);
421 - STxMap<LinkKey, List<LinkResourceAllocations>> linkAllocs = getLinkAllocs(tx);
422 - List<LinkResourceAllocations> before = linkAllocs.get(linkKey);
423 - if (before == null) {
424 - List<LinkResourceAllocations> after = new ArrayList<>();
425 - after.add(allocations);
426 - before = linkAllocs.putIfAbsent(linkKey, after);
427 - if (before != null) {
428 - // concurrent allocation detected, retry transaction
429 - throw new TransactionException("Concurrent Allocation, retry");
430 - }
431 - } else {
432 - List<LinkResourceAllocations> after = new ArrayList<>(before.size() + 1);
433 - after.addAll(before);
434 - after.add(allocations);
435 - linkAllocs.replace(linkKey, before, after);
436 - }
437 - }
438 -
439 - @Override
440 - public LinkResourceEvent releaseResources(LinkResourceAllocations allocations) {
441 - checkNotNull(allocations);
442 -
443 - final IntentId intendId = allocations.intentId();
444 - final Collection<Link> links = allocations.links();
445 -
446 - boolean success = false;
447 - do {
448 - // Note: might want to break it down into smaller tx unit
449 - // to lower the chance of collisions.
450 - TransactionContext tx = theInstance.newTransactionContext();
451 - tx.beginTransaction();
452 - try {
453 - STxMap<IntentId, LinkResourceAllocations> intentAllocs = getIntentAllocs(tx);
454 - intentAllocs.remove(intendId);
455 -
456 - STxMap<LinkKey, List<LinkResourceAllocations>> linkAllocs = getLinkAllocs(tx);
457 -
458 - for (Link link : links) {
459 - final LinkKey linkId = LinkKey.linkKey(link);
460 -
461 - List<LinkResourceAllocations> before = linkAllocs.get(linkId);
462 - if (before == null || before.isEmpty()) {
463 - // something is wrong, but it is already freed
464 - log.warn("There was no resource left to release on {}", linkId);
465 - continue;
466 - }
467 - List<LinkResourceAllocations> after = new ArrayList<>(before);
468 - after.remove(allocations);
469 - linkAllocs.replace(linkId, before, after);
470 - }
471 -
472 - tx.commitTransaction();
473 - success = true;
474 - } catch (TransactionException e) {
475 - log.debug("Transaction failed, retrying");
476 - } catch (Exception e) {
477 - log.error("Exception thrown during releaseResource {}",
478 - allocations, e);
479 - tx.rollbackTransaction();
480 - throw e;
481 - }
482 - } while (!success);
483 -
484 - // Issue events to force recompilation of intents.
485 - final List<LinkResourceAllocations> releasedResources =
486 - ImmutableList.of(allocations);
487 - return new LinkResourceEvent(
488 - LinkResourceEvent.Type.ADDITIONAL_RESOURCES_AVAILABLE,
489 - releasedResources);
490 - }
491 -
492 - @Override
493 - public LinkResourceAllocations getAllocations(IntentId intentId) {
494 - checkNotNull(intentId);
495 - TransactionOptions opt = new TransactionOptions();
496 - // read-only and will never be commited, thus does not need durability
497 - opt.setTransactionType(TransactionType.LOCAL);
498 - TransactionContext tx = theInstance.newTransactionContext(opt);
499 - tx.beginTransaction();
500 - try {
501 - STxMap<IntentId, LinkResourceAllocations> intentAllocs = getIntentAllocs(tx);
502 - return intentAllocs.get(intentId);
503 - } finally {
504 - tx.rollbackTransaction();
505 - }
506 - }
507 -
508 - @Override
509 - public List<LinkResourceAllocations> getAllocations(Link link) {
510 - checkNotNull(link);
511 - final LinkKey key = LinkKey.linkKey(link);
512 -
513 - TransactionOptions opt = new TransactionOptions();
514 - // read-only and will never be commited, thus does not need durability
515 - opt.setTransactionType(TransactionType.LOCAL);
516 - TransactionContext tx = theInstance.newTransactionContext(opt);
517 - tx.beginTransaction();
518 - List<LinkResourceAllocations> res = null;
519 - try {
520 - STxMap<LinkKey, List<LinkResourceAllocations>> linkAllocs = getLinkAllocs(tx);
521 - res = linkAllocs.get(key);
522 - } finally {
523 - tx.rollbackTransaction();
524 - }
525 -
526 - if (res == null) {
527 - // try to add empty list
528 - TransactionContext tx2 = theInstance.newTransactionContext();
529 - tx2.beginTransaction();
530 - try {
531 - res = getLinkAllocs(tx2).putIfAbsent(key, new ArrayList<>());
532 - tx2.commitTransaction();
533 - if (res == null) {
534 - return Collections.emptyList();
535 - } else {
536 - return res;
537 - }
538 - } catch (TransactionException e) {
539 - // concurrently added?
540 - return getAllocations(link);
541 - } catch (Exception e) {
542 - tx.rollbackTransaction();
543 - }
544 - }
545 - return res;
546 - }
547 -
548 - private Iterable<LinkResourceAllocations> getAllocations(TransactionContext tx,
549 - Link link) {
550 - checkNotNull(tx);
551 - checkNotNull(link);
552 - final LinkKey key = LinkKey.linkKey(link);
553 -
554 - STxMap<LinkKey, List<LinkResourceAllocations>> linkAllocs = getLinkAllocs(tx);
555 - List<LinkResourceAllocations> res = null;
556 - res = linkAllocs.get(key);
557 - if (res == null) {
558 - res = linkAllocs.putIfAbsent(key, new ArrayList<>());
559 - if (res == null) {
560 - return Collections.emptyList();
561 - } else {
562 - return res;
563 - }
564 - }
565 - return res;
566 - }
567 -
568 - @Override
569 - public Iterable<LinkResourceAllocations> getAllocations() {
570 - TransactionContext tx = theInstance.newTransactionContext();
571 - tx.beginTransaction();
572 - try {
573 - STxMap<IntentId, LinkResourceAllocations> intentAllocs = getIntentAllocs(tx);
574 - return intentAllocs.values();
575 - } finally {
576 - tx.rollbackTransaction();
577 - }
578 - }
579 -}
1 -<?xml version="1.0" encoding="UTF-8"?>
2 -
3 -<!--
4 - ~ Copyright 2015 Open Networking Laboratory
5 - ~
6 - ~ Licensed under the Apache License, Version 2.0 (the "License");
7 - ~ you may not use this file except in compliance with the License.
8 - ~ You may obtain a copy of the License at
9 - ~
10 - ~ http://www.apache.org/licenses/LICENSE-2.0
11 - ~
12 - ~ Unless required by applicable law or agreed to in writing, software
13 - ~ distributed under the License is distributed on an "AS IS" BASIS,
14 - ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 - ~ See the License for the specific language governing permissions and
16 - ~ limitations under the License.
17 - -->
18 -
19 -<!--
20 - The default Hazelcast configuration. This is used when:
21 -
22 - - no hazelcast.xml if present
23 -
24 --->
25 -<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.3.xsd"
26 - xmlns="http://www.hazelcast.com/schema/config"
27 - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
28 - <group>
29 - <name>@NAME</name>
30 - <password>rocks</password>
31 - </group>
32 - <management-center enabled="false">http://localhost:8080/mancenter</management-center>
33 - <properties>
34 - <property name="hazelcast.max.no.heartbeat.seconds">30</property>
35 - <property name="hazelcast.merge.first.run.delay.seconds">30</property>
36 - <property name="hazelcast.merge.next.run.delay.seconds">30</property>
37 - </properties>
38 - <network>
39 - <port auto-increment="true" port-count="100">5701</port>
40 - <outbound-ports>
41 - <!--
42 - Allowed port range when connecting to other nodes.
43 - 0 or * means use system provided port.
44 - -->
45 - <ports>0</ports>
46 - </outbound-ports>
47 - <join>
48 - <multicast enabled="true">
49 - <multicast-group>224.2.2.3</multicast-group>
50 - <multicast-port>54327</multicast-port>
51 - </multicast>
52 - <tcp-ip enabled="false">
53 - <interface>127.0.0.1</interface>
54 - </tcp-ip>
55 - </join>
56 - <interfaces enabled="true">
57 - <interface>@PREFIX</interface>
58 - </interfaces>
59 - <ssl enabled="false"/>
60 - <socket-interceptor enabled="false"/>
61 - <symmetric-encryption enabled="false">
62 - <!--
63 - encryption algorithm such as
64 - DES/ECB/PKCS5Padding,
65 - PBEWithMD5AndDES,
66 - AES/CBC/PKCS5Padding,
67 - Blowfish,
68 - DESede
69 - -->
70 - <algorithm>PBEWithMD5AndDES</algorithm>
71 - <!-- salt value to use when generating the secret key -->
72 - <salt>thesalt</salt>
73 - <!-- pass phrase to use when generating the secret key -->
74 - <password>thepass</password>
75 - <!-- iteration count to use when generating the secret key -->
76 - <iteration-count>19</iteration-count>
77 - </symmetric-encryption>
78 - </network>
79 - <partition-group enabled="false"/>
80 - <executor-service name="default">
81 - <pool-size>16</pool-size>
82 - <!--Queue capacity. 0 means Integer.MAX_VALUE.-->
83 - <queue-capacity>0</queue-capacity>
84 - </executor-service>
85 - <queue name="default">
86 - <!--
87 - Maximum size of the queue. When a JVM's local queue size reaches the maximum,
88 - all put/offer operations will get blocked until the queue size
89 - of the JVM goes down below the maximum.
90 - Any integer between 0 and Integer.MAX_VALUE. 0 means
91 - Integer.MAX_VALUE. Default is 0.
92 - -->
93 - <max-size>0</max-size>
94 - <!--
95 - Number of backups. If 1 is set as the backup-count for example,
96 - then all entries of the map will be copied to another JVM for
97 - fail-safety. 0 means no backup.
98 - -->
99 - <backup-count>1</backup-count>
100 -
101 - <!--
102 - Number of async backups. 0 means no backup.
103 - -->
104 - <async-backup-count>0</async-backup-count>
105 -
106 - <empty-queue-ttl>-1</empty-queue-ttl>
107 - </queue>
108 - <map name="default">
109 - <!--
110 - Data type that will be used for storing recordMap.
111 - Possible values:
112 - BINARY (default): keys and values will be stored as binary data
113 - OBJECT : values will be stored in their object forms
114 - OFFHEAP : values will be stored in non-heap region of JVM
115 - -->
116 - <in-memory-format>BINARY</in-memory-format>
117 -
118 - <!--
119 - Number of backups. If 1 is set as the backup-count for example,
120 - then all entries of the map will be copied to another JVM for
121 - fail-safety. 0 means no backup.
122 - -->
123 - <backup-count>1</backup-count>
124 - <!--
125 - Number of async backups. 0 means no backup.
126 - -->
127 - <async-backup-count>0</async-backup-count>
128 - <!--
129 - Maximum number of seconds for each entry to stay in the map. Entries that are
130 - older than <time-to-live-seconds> and not updated for <time-to-live-seconds>
131 - will get automatically evicted from the map.
132 - Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
133 - -->
134 - <time-to-live-seconds>0</time-to-live-seconds>
135 - <!--
136 - Maximum number of seconds for each entry to stay idle in the map. Entries that are
137 - idle(not touched) for more than <max-idle-seconds> will get
138 - automatically evicted from the map. Entry is touched if get, put or containsKey is called.
139 - Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
140 - -->
141 - <max-idle-seconds>0</max-idle-seconds>
142 - <!--
143 - Valid values are:
144 - NONE (no eviction),
145 - LRU (Least Recently Used),
146 - LFU (Least Frequently Used).
147 - NONE is the default.
148 - -->
149 - <eviction-policy>NONE</eviction-policy>
150 - <!--
151 - Maximum size of the map. When max size is reached,
152 - map is evicted based on the policy defined.
153 - Any integer between 0 and Integer.MAX_VALUE. 0 means
154 - Integer.MAX_VALUE. Default is 0.
155 - -->
156 - <max-size policy="PER_NODE">0</max-size>
157 - <!--
158 - When max. size is reached, specified percentage of
159 - the map will be evicted. Any integer between 0 and 100.
160 - If 25 is set for example, 25% of the entries will
161 - get evicted.
162 - -->
163 - <eviction-percentage>25</eviction-percentage>
164 - <!--
165 - Minimum time in milliseconds which should pass before checking
166 - if a partition of this map is evictable or not.
167 - Default value is 100 millis.
168 - -->
169 - <min-eviction-check-millis>100</min-eviction-check-millis>
170 - <!--
171 - While recovering from split-brain (network partitioning),
172 - map entries in the small cluster will merge into the bigger cluster
173 - based on the policy set here. When an entry merge into the
174 - cluster, there might an existing entry with the same key already.
175 - Values of these entries might be different for that same key.
176 - Which value should be set for the key? Conflict is resolved by
177 - the policy set here. Default policy is PutIfAbsentMapMergePolicy
178 -
179 - There are built-in merge policies such as
180 - com.hazelcast.map.merge.PassThroughMergePolicy; entry will be added if there is no existing entry for the key.
181 - com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster.
182 - com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins.
183 - com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins.
184 - -->
185 - <merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy>
186 - </map>
187 -
188 - <multimap name="default">
189 - <backup-count>1</backup-count>
190 - <value-collection-type>SET</value-collection-type>
191 - </multimap>
192 -
193 - <multimap name="default">
194 - <backup-count>1</backup-count>
195 - <value-collection-type>SET</value-collection-type>
196 - </multimap>
197 -
198 - <list name="default">
199 - <backup-count>1</backup-count>
200 - </list>
201 -
202 - <set name="default">
203 - <backup-count>1</backup-count>
204 - </set>
205 -
206 - <jobtracker name="default">
207 - <max-thread-size>0</max-thread-size>
208 - <!-- Queue size 0 means number of partitions * 2 -->
209 - <queue-size>0</queue-size>
210 - <retry-count>0</retry-count>
211 - <chunk-size>1000</chunk-size>
212 - <communicate-stats>true</communicate-stats>
213 - <topology-changed-strategy>CANCEL_RUNNING_OPERATION</topology-changed-strategy>
214 - </jobtracker>
215 -
216 - <semaphore name="default">
217 - <initial-permits>0</initial-permits>
218 - <backup-count>1</backup-count>
219 - <async-backup-count>0</async-backup-count>
220 - </semaphore>
221 -
222 - <serialization>
223 - <portable-version>0</portable-version>
224 - </serialization>
225 -
226 - <services enable-defaults="true"/>
227 -
228 -</hazelcast>
1 -/*
2 - * Copyright 2014-2015 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.hz;
17 -
18 -import static com.google.common.base.Preconditions.checkArgument;
19 -import static com.google.common.base.Preconditions.checkState;
20 -
21 -import java.io.FileNotFoundException;
22 -import java.util.UUID;
23 -
24 -import com.hazelcast.config.Config;
25 -import com.hazelcast.config.FileSystemXmlConfig;
26 -import com.hazelcast.core.HazelcastInstance;
27 -import com.hazelcast.test.TestHazelcastInstanceFactory;
28 -
29 -/**
30 - * Dummy StoreManager to use specified Hazelcast instance.
31 - */
32 -public class TestStoreManager extends StoreManager {
33 -
34 - private TestHazelcastInstanceFactory factory;
35 -
36 - /**
37 - * Gets the Hazelcast Config for testing.
38 - *
39 - * @return Hazelcast Configuration for testing
40 - */
41 - public static Config getTestConfig() {
42 - Config config;
43 - try {
44 - config = new FileSystemXmlConfig(HAZELCAST_XML_FILE);
45 - } catch (FileNotFoundException e) {
46 - // falling back to default
47 - config = new Config();
48 - }
49 - // avoid accidentally joining other cluster
50 - config.getGroupConfig().setName(UUID.randomUUID().toString());
51 - // quickly form single node cluster
52 - config.getNetworkConfig().getJoin()
53 - .getTcpIpConfig()
54 - .setEnabled(true).setConnectionTimeoutSeconds(0);
55 - config.getNetworkConfig().getJoin()
56 - .getMulticastConfig()
57 - .setEnabled(false);
58 - return config;
59 - }
60 -
61 - /**
62 - * Creates an instance of dummy Hazelcast instance for testing.
63 - *
64 - * @return HazelcastInstance
65 - */
66 - public HazelcastInstance initSingleInstance() {
67 - return initInstances(1)[0];
68 - }
69 -
70 - /**
71 - * Creates some instances of dummy Hazelcast instances for testing.
72 - *
73 - * @param count number of instances to create
74 - * @return array of HazelcastInstances
75 - */
76 - public HazelcastInstance[] initInstances(int count) {
77 - checkArgument(count > 0, "Cluster size must be > 0");
78 - factory = new TestHazelcastInstanceFactory(count);
79 - return factory.newInstances(getTestConfig());
80 - }
81 -
82 - /**
83 - * Sets the Hazelast instance to return on #getHazelcastInstance().
84 - *
85 - * @param instance Hazelast instance to return on #getHazelcastInstance()
86 - */
87 - public void setHazelcastInstance(HazelcastInstance instance) {
88 - this.instance = instance;
89 - }
90 -
91 - @Override
92 - public void activate() {
93 - // Hazelcast setup removed from original code.
94 - checkState(this.instance != null, "HazelcastInstance needs to be set");
95 - }
96 -
97 - @Override
98 - public void deactivate() {
99 - // Hazelcast instance shutdown removed from original code.
100 - factory.shutdownAll();
101 - }
102 -}
...@@ -15,49 +15,11 @@ ...@@ -15,49 +15,11 @@
15 */ 15 */
16 package org.onosproject.store.mastership.impl; 16 package org.onosproject.store.mastership.impl;
17 17
18 -import java.util.Map;
19 -import java.util.Set;
20 -import java.util.concurrent.CountDownLatch;
21 -import java.util.concurrent.TimeUnit;
22 -
23 -import org.junit.After;
24 -import org.junit.AfterClass;
25 -import org.junit.Before;
26 -import org.junit.BeforeClass;
27 -import org.junit.Ignore;
28 -import org.junit.Test;
29 -import org.onlab.junit.TestTools;
30 -import org.onlab.packet.IpAddress;
31 -import org.onosproject.cluster.ClusterServiceAdapter;
32 -import org.onosproject.cluster.ControllerNode;
33 -import org.onosproject.cluster.DefaultControllerNode;
34 -import org.onosproject.cluster.NodeId;
35 -import org.onosproject.mastership.MastershipEvent;
36 -import org.onosproject.mastership.MastershipEvent.Type;
37 -import org.onosproject.mastership.MastershipStoreDelegate;
38 -import org.onosproject.mastership.MastershipTerm;
39 -import org.onosproject.net.DeviceId;
40 -import org.onosproject.net.MastershipRole;
41 -import org.onosproject.store.hz.StoreManager;
42 -import org.onosproject.store.hz.StoreService;
43 -import org.onosproject.store.hz.TestStoreManager;
44 -import org.onosproject.store.serializers.KryoSerializer;
45 -
46 -import com.google.common.collect.Sets;
47 -import com.google.common.util.concurrent.Futures;
48 -
49 -import static org.junit.Assert.assertEquals;
50 -import static org.junit.Assert.assertNull;
51 -import static org.junit.Assert.assertTrue;
52 -import static org.onosproject.net.MastershipRole.MASTER;
53 -import static org.onosproject.net.MastershipRole.NONE;
54 -import static org.onosproject.net.MastershipRole.STANDBY;
55 -
56 /** 18 /**
57 * Test of the Hazelcast-based distributed MastershipStore implementation. 19 * Test of the Hazelcast-based distributed MastershipStore implementation.
58 */ 20 */
59 public class DistributedMastershipStoreTest { 21 public class DistributedMastershipStoreTest {
60 - 22 +/*
61 private static final DeviceId DID1 = DeviceId.deviceId("of:01"); 23 private static final DeviceId DID1 = DeviceId.deviceId("of:01");
62 private static final DeviceId DID2 = DeviceId.deviceId("of:02"); 24 private static final DeviceId DID2 = DeviceId.deviceId("of:02");
63 private static final DeviceId DID3 = DeviceId.deviceId("of:03"); 25 private static final DeviceId DID3 = DeviceId.deviceId("of:03");
...@@ -320,5 +282,5 @@ public class DistributedMastershipStoreTest { ...@@ -320,5 +282,5 @@ public class DistributedMastershipStoreTest {
320 } 282 }
321 283
322 } 284 }
323 - 285 +*/
324 } 286 }
......
...@@ -15,50 +15,11 @@ ...@@ -15,50 +15,11 @@
15 */ 15 */
16 package org.onosproject.store.resource.impl; 16 package org.onosproject.store.resource.impl;
17 17
18 -import java.util.HashSet;
19 -import java.util.Set;
20 -
21 -import org.junit.After;
22 -import org.junit.Before;
23 -import org.junit.Test;
24 -import org.onlab.util.Bandwidth;
25 -import org.onosproject.net.AnnotationKeys;
26 -import org.onosproject.net.Annotations;
27 -import org.onosproject.net.ConnectPoint;
28 -import org.onosproject.net.DefaultAnnotations;
29 -import org.onosproject.net.DefaultLink;
30 -import org.onosproject.net.Link;
31 -import org.onosproject.net.intent.IntentId;
32 -import org.onosproject.net.provider.ProviderId;
33 -import org.onosproject.net.resource.BandwidthResource;
34 -import org.onosproject.net.resource.BandwidthResourceAllocation;
35 -import org.onosproject.net.resource.DefaultLinkResourceAllocations;
36 -import org.onosproject.net.resource.DefaultLinkResourceRequest;
37 -import org.onosproject.net.resource.LambdaResource;
38 -import org.onosproject.net.resource.LambdaResourceAllocation;
39 -import org.onosproject.net.resource.LinkResourceAllocations;
40 -import org.onosproject.net.resource.LinkResourceRequest;
41 -import org.onosproject.net.resource.LinkResourceStore;
42 -import org.onosproject.net.resource.ResourceAllocation;
43 -import org.onosproject.net.resource.ResourceAllocationException;
44 -import org.onosproject.net.resource.ResourceType;
45 -import org.onosproject.store.hz.StoreService;
46 -import org.onosproject.store.hz.TestStoreManager;
47 -
48 -import com.google.common.collect.ImmutableMap;
49 -import com.google.common.collect.ImmutableSet;
50 -import static org.junit.Assert.assertEquals;
51 -import static org.junit.Assert.assertFalse;
52 -import static org.junit.Assert.assertNotNull;
53 -import static org.onosproject.net.DeviceId.deviceId;
54 -import static org.onosproject.net.Link.Type.DIRECT;
55 -import static org.onosproject.net.PortNumber.portNumber;
56 -
57 /** 18 /**
58 * Test of the simple LinkResourceStore implementation. 19 * Test of the simple LinkResourceStore implementation.
59 */ 20 */
60 public class HazelcastLinkResourceStoreTest { 21 public class HazelcastLinkResourceStoreTest {
61 - 22 +/*
62 private LinkResourceStore store; 23 private LinkResourceStore store;
63 private HazelcastLinkResourceStore storeImpl; 24 private HazelcastLinkResourceStore storeImpl;
64 private Link link1; 25 private Link link1;
...@@ -74,7 +35,7 @@ public class HazelcastLinkResourceStoreTest { ...@@ -74,7 +35,7 @@ public class HazelcastLinkResourceStoreTest {
74 * @param dev2 destination device 35 * @param dev2 destination device
75 * @param port2 destination port 36 * @param port2 destination port
76 * @return created {@link Link} object 37 * @return created {@link Link} object
77 - */ 38 + * /
78 private Link newLink(String dev1, int port1, String dev2, int port2) { 39 private Link newLink(String dev1, int port1, String dev2, int port2) {
79 Annotations annotations = DefaultAnnotations.builder() 40 Annotations annotations = DefaultAnnotations.builder()
80 .set(AnnotationKeys.OPTICAL_WAVES, "80") 41 .set(AnnotationKeys.OPTICAL_WAVES, "80")
...@@ -112,9 +73,6 @@ public class HazelcastLinkResourceStoreTest { ...@@ -112,9 +73,6 @@ public class HazelcastLinkResourceStoreTest {
112 storeMgr.deactivate(); 73 storeMgr.deactivate();
113 } 74 }
114 75
115 - /**
116 - * Tests constructor and activate method.
117 - */
118 @Test 76 @Test
119 public void testConstructorAndActivate() { 77 public void testConstructorAndActivate() {
120 final Iterable<LinkResourceAllocations> allAllocations = store.getAllocations(); 78 final Iterable<LinkResourceAllocations> allAllocations = store.getAllocations();
...@@ -130,13 +88,6 @@ public class HazelcastLinkResourceStoreTest { ...@@ -130,13 +88,6 @@ public class HazelcastLinkResourceStoreTest {
130 assertNotNull(res); 88 assertNotNull(res);
131 } 89 }
132 90
133 - /**
134 - * Picks up and returns one of bandwidth allocations from a given set.
135 - *
136 - * @param resources the set of {@link ResourceAllocation}s
137 - * @return {@link BandwidthResourceAllocation} object if found, null
138 - * otherwise
139 - */
140 private BandwidthResourceAllocation getBandwidthObj(Set<ResourceAllocation> resources) { 91 private BandwidthResourceAllocation getBandwidthObj(Set<ResourceAllocation> resources) {
141 for (ResourceAllocation res : resources) { 92 for (ResourceAllocation res : resources) {
142 if (res.type() == ResourceType.BANDWIDTH) { 93 if (res.type() == ResourceType.BANDWIDTH) {
...@@ -146,12 +97,6 @@ public class HazelcastLinkResourceStoreTest { ...@@ -146,12 +97,6 @@ public class HazelcastLinkResourceStoreTest {
146 return null; 97 return null;
147 } 98 }
148 99
149 - /**
150 - * Returns all lambda allocations from a given set.
151 - *
152 - * @param resources the set of {@link ResourceAllocation}s
153 - * @return a set of {@link LambdaResourceAllocation} objects
154 - */
155 private Set<LambdaResourceAllocation> getLambdaObjs(Set<ResourceAllocation> resources) { 100 private Set<LambdaResourceAllocation> getLambdaObjs(Set<ResourceAllocation> resources) {
156 Set<LambdaResourceAllocation> lambdaResources = new HashSet<>(); 101 Set<LambdaResourceAllocation> lambdaResources = new HashSet<>();
157 for (ResourceAllocation res : resources) { 102 for (ResourceAllocation res : resources) {
...@@ -162,9 +107,6 @@ public class HazelcastLinkResourceStoreTest { ...@@ -162,9 +107,6 @@ public class HazelcastLinkResourceStoreTest {
162 return lambdaResources; 107 return lambdaResources;
163 } 108 }
164 109
165 - /**
166 - * Tests initial free bandwidth for a link.
167 - */
168 @Test 110 @Test
169 public void testInitialBandwidth() { 111 public void testInitialBandwidth() {
170 final Set<ResourceAllocation> freeRes = store.getFreeResources(link1); 112 final Set<ResourceAllocation> freeRes = store.getFreeResources(link1);
...@@ -176,9 +118,6 @@ public class HazelcastLinkResourceStoreTest { ...@@ -176,9 +118,6 @@ public class HazelcastLinkResourceStoreTest {
176 assertEquals(new BandwidthResource(Bandwidth.mbps(1000.0)), alloc.bandwidth()); 118 assertEquals(new BandwidthResource(Bandwidth.mbps(1000.0)), alloc.bandwidth());
177 } 119 }
178 120
179 - /**
180 - * Tests initial free lambda for a link.
181 - */
182 @Test 121 @Test
183 public void testInitialLambdas() { 122 public void testInitialLambdas() {
184 final Set<ResourceAllocation> freeRes = store.getFreeResources(link3); 123 final Set<ResourceAllocation> freeRes = store.getFreeResources(link3);
...@@ -198,9 +137,6 @@ public class HazelcastLinkResourceStoreTest { ...@@ -198,9 +137,6 @@ public class HazelcastLinkResourceStoreTest {
198 137
199 } 138 }
200 139
201 - /**
202 - * Tests a successful bandwidth allocation.
203 - */
204 @Test 140 @Test
205 public void testSuccessfulBandwidthAllocation() { 141 public void testSuccessfulBandwidthAllocation() {
206 final Link link = newLink("of:1", 1, "of:2", 2); 142 final Link link = newLink("of:1", 1, "of:2", 2);
...@@ -219,9 +155,6 @@ public class HazelcastLinkResourceStoreTest { ...@@ -219,9 +155,6 @@ public class HazelcastLinkResourceStoreTest {
219 store.allocateResources(allocations); 155 store.allocateResources(allocations);
220 } 156 }
221 157
222 - /**
223 - * Tests a unsuccessful bandwidth allocation.
224 - */
225 @Test 158 @Test
226 public void testUnsuccessfulBandwidthAllocation() { 159 public void testUnsuccessfulBandwidthAllocation() {
227 final Link link = newLink("of:1", 1, "of:2", 2); 160 final Link link = newLink("of:1", 1, "of:2", 2);
...@@ -247,9 +180,6 @@ public class HazelcastLinkResourceStoreTest { ...@@ -247,9 +180,6 @@ public class HazelcastLinkResourceStoreTest {
247 assertEquals(true, gotException); 180 assertEquals(true, gotException);
248 } 181 }
249 182
250 - /**
251 - * Tests a successful bandwidth allocation.
252 - */
253 @Test 183 @Test
254 public void testSuccessfulLambdaAllocation() { 184 public void testSuccessfulLambdaAllocation() {
255 final Link link = newLink("of:1", 1, "of:2", 2); 185 final Link link = newLink("of:1", 1, "of:2", 2);
...@@ -268,9 +198,6 @@ public class HazelcastLinkResourceStoreTest { ...@@ -268,9 +198,6 @@ public class HazelcastLinkResourceStoreTest {
268 store.allocateResources(allocations); 198 store.allocateResources(allocations);
269 } 199 }
270 200
271 - /**
272 - * Tests a unsuccessful bandwidth allocation.
273 - */
274 @Test 201 @Test
275 public void testUnsuccessfulLambdaAllocation() { 202 public void testUnsuccessfulLambdaAllocation() {
276 final Link link = newLink("of:1", 1, "of:2", 2); 203 final Link link = newLink("of:1", 1, "of:2", 2);
...@@ -296,4 +223,5 @@ public class HazelcastLinkResourceStoreTest { ...@@ -296,4 +223,5 @@ public class HazelcastLinkResourceStoreTest {
296 } 223 }
297 assertEquals(true, gotException); 224 assertEquals(true, gotException);
298 } 225 }
226 + */
299 } 227 }
......
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
37 37
38 <bundle>mvn:joda-time/joda-time/2.5</bundle> 38 <bundle>mvn:joda-time/joda-time/2.5</bundle>
39 39
40 - <bundle>mvn:com.hazelcast/hazelcast/3.4</bundle>
41 <bundle>mvn:io.dropwizard.metrics/metrics-core/3.1.0</bundle> 40 <bundle>mvn:io.dropwizard.metrics/metrics-core/3.1.0</bundle>
42 <bundle>mvn:io.dropwizard.metrics/metrics-json/3.1.0</bundle> 41 <bundle>mvn:io.dropwizard.metrics/metrics-json/3.1.0</bundle>
43 <bundle>mvn:com.eclipsesource.minimal-json/minimal-json/0.9.1</bundle> 42 <bundle>mvn:com.eclipsesource.minimal-json/minimal-json/0.9.1</bundle>
......
...@@ -58,21 +58,15 @@ ...@@ -58,21 +58,15 @@
58 <scope>test</scope> 58 <scope>test</scope>
59 </dependency> 59 </dependency>
60 <dependency> 60 <dependency>
61 - <groupId>org.easymock</groupId> 61 + <groupId>org.easymock</groupId>
62 - <artifactId>easymock</artifactId> 62 + <artifactId>easymock</artifactId>
63 - <scope>test</scope> 63 + <scope>test</scope>
64 - </dependency>
65 - <dependency>
66 - <groupId>org.onosproject</groupId>
67 - <artifactId>onos-api</artifactId>
68 - <classifier>tests</classifier>
69 - <scope>test</scope>
70 </dependency> 64 </dependency>
71 <dependency> 65 <dependency>
72 - <groupId>com.hazelcast</groupId> 66 + <groupId>org.onosproject</groupId>
73 - <artifactId>hazelcast</artifactId> 67 + <artifactId>onos-api</artifactId>
74 - <classifier>tests</classifier> 68 + <classifier>tests</classifier>
75 - <scope>test</scope> 69 + <scope>test</scope>
76 </dependency> 70 </dependency>
77 </dependencies> 71 </dependencies>
78 72
......
...@@ -287,18 +287,6 @@ ...@@ -287,18 +287,6 @@
287 </dependency> 287 </dependency>
288 288
289 <dependency> 289 <dependency>
290 - <groupId>com.hazelcast</groupId>
291 - <artifactId>hazelcast</artifactId>
292 - <version>3.4</version>
293 - </dependency>
294 - <dependency>
295 - <groupId>com.hazelcast</groupId>
296 - <artifactId>hazelcast</artifactId>
297 - <version>3.4</version>
298 - <classifier>tests</classifier>
299 - <scope>test</scope>
300 - </dependency>
301 - <dependency>
302 <groupId>com.eclipsesource.minimal-json</groupId> 290 <groupId>com.eclipsesource.minimal-json</groupId>
303 <artifactId>minimal-json</artifactId> 291 <artifactId>minimal-json</artifactId>
304 <version>0.9.1</version> 292 <version>0.9.1</version>
......
...@@ -17,7 +17,7 @@ export STAGE=$(dirname $KARAF_ROOT) ...@@ -17,7 +17,7 @@ export STAGE=$(dirname $KARAF_ROOT)
17 # Validates the specified IP regular expression against existing adapters. 17 # Validates the specified IP regular expression against existing adapters.
18 # Excludes local-loopback. 18 # Excludes local-loopback.
19 function validateIp { 19 function validateIp {
20 - ifconfig | awk '{ print $2}' | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" | grep -v "127\.0\.0\.1" | grep $1 20 + ifconfig | awk '{ print $2}' | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" | grep $1
21 } 21 }
22 22
23 # Clean the previous Karaf directory if requested and if it exists. 23 # Clean the previous Karaf directory if requested and if it exists.
...@@ -26,6 +26,7 @@ if [ "$1" = "clean" ]; then ...@@ -26,6 +26,7 @@ if [ "$1" = "clean" ]; then
26 [ -d $KARAF_ROOT ] && rm -fr $KARAF_ROOT $STAGE/apps $STAGE/config 26 [ -d $KARAF_ROOT ] && rm -fr $KARAF_ROOT $STAGE/apps $STAGE/config
27 fi 27 fi
28 28
29 +ONOS_IP=${ONOS_IP:-127.0.0.1}
29 IP="${1:-$ONOS_IP}" 30 IP="${1:-$ONOS_IP}"
30 31
31 # If IP was not given, nor configured attempt to use ONOS_NIC env. variable 32 # If IP was not given, nor configured attempt to use ONOS_NIC env. variable
...@@ -104,11 +105,6 @@ cat > $STAGE/config/tablets.json <<EOF ...@@ -104,11 +105,6 @@ cat > $STAGE/config/tablets.json <<EOF
104 "partitions": { "p1": [ { "ip": "$IP", "id": "$IP", "tcpPort": 9876 }]}} 105 "partitions": { "p1": [ { "ip": "$IP", "id": "$IP", "tcpPort": 9876 }]}}
105 EOF 106 EOF
106 107
107 -echo "Setting up hazelcast.xml for subnet $SUBNET.*..."
108 -cp $ONOS_ROOT/tools/package/etc/hazelcast.xml $KARAF_ROOT/etc/hazelcast.xml
109 -perl -pi.old -e "s/192.168.56/$SUBNET/" $KARAF_ROOT/etc/hazelcast.xml
110 -perl -pi.old -e "s/ <name>onos</ <name>$IP</" $KARAF_ROOT/etc/hazelcast.xml
111 -
112 echo "Staging builtin apps..." 108 echo "Staging builtin apps..."
113 rm -fr $STAGE/apps 109 rm -fr $STAGE/apps
114 onos-stage-apps $STAGE/apps $KARAF_ROOT/system 110 onos-stage-apps $STAGE/apps $KARAF_ROOT/system
......
1 #!/bin/bash 1 #!/bin/bash
2 -# ----------------------------------------------------------------------------- 2 +echo "This command has been deprecated as this step is no longer required."
3 -# Configures ONOS to multicast on the specified IP prefix/subnet.
4 -# -----------------------------------------------------------------------------
5 -
6 -[ $# -lt 2 ] && echo "usage: $(basename $0) name ipPrefix" && exit 1
7 -
8 -name=$1
9 -ipPrefix=$2
10 -
11 -hzXml=$(dirname $0)/../apache-karaf-*/etc/hazelcast.xml
12 -
13 -perl -pi.bak -e "s/^ <interface>[^<]*/ <interface>$ipPrefix/g" $hzXml
14 -perl -pi -e "s/ <name>[^<]*/ <name>$name/g" $hzXml
...\ No newline at end of file ...\ No newline at end of file
......
1 -<?xml version="1.0" encoding="UTF-8"?>
2 -
3 -<!--
4 - ~ Copyright (c) 2008-2013, Hazelcast, Inc. All Rights Reserved.
5 - ~ Copyright 2014 Open Networking Laboratory
6 - ~
7 - ~ Licensed under the Apache License, Version 2.0 (the "License");
8 - ~ you may not use this file except in compliance with the License.
9 - ~ You may obtain a copy of the License at
10 - ~
11 - ~ http://www.apache.org/licenses/LICENSE-2.0
12 - ~
13 - ~ Unless required by applicable law or agreed to in writing, software
14 - ~ distributed under the License is distributed on an "AS IS" BASIS,
15 - ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 - ~ See the License for the specific language governing permissions and
17 - ~ limitations under the License.
18 - -->
19 -
20 -<!--
21 - The default Hazelcast configuration. This is used when:
22 -
23 - - no hazelcast.xml if present
24 -
25 --->
26 -<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.3.xsd"
27 - xmlns="http://www.hazelcast.com/schema/config"
28 - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
29 - <group>
30 - <name>onos</name>
31 - <password>rocks</password>
32 - </group>
33 - <management-center enabled="false">http://localhost:8080/mancenter</management-center>
34 - <properties>
35 - <property name="hazelcast.max.no.heartbeat.seconds">30</property>
36 - <property name="hazelcast.merge.first.run.delay.seconds">30</property>
37 - <property name="hazelcast.merge.next.run.delay.seconds">30</property>
38 - </properties>
39 - <network>
40 - <port auto-increment="true" port-count="100">5701</port>
41 - <outbound-ports>
42 - <!--
43 - Allowed port range when connecting to other nodes.
44 - 0 or * means use system provided port.
45 - -->
46 - <ports>0</ports>
47 - </outbound-ports>
48 - <join>
49 - <multicast enabled="true">
50 - <multicast-group>224.2.2.3</multicast-group>
51 - <multicast-port>54327</multicast-port>
52 - </multicast>
53 - <tcp-ip enabled="false">
54 - <interface>127.0.0.1</interface>
55 - </tcp-ip>
56 - </join>
57 - <interfaces enabled="true">
58 - <interface>192.168.56.*</interface>
59 - </interfaces>
60 - <ssl enabled="false"/>
61 - <socket-interceptor enabled="false"/>
62 - <symmetric-encryption enabled="false">
63 - <!--
64 - encryption algorithm such as
65 - DES/ECB/PKCS5Padding,
66 - PBEWithMD5AndDES,
67 - AES/CBC/PKCS5Padding,
68 - Blowfish,
69 - DESede
70 - -->
71 - <algorithm>PBEWithMD5AndDES</algorithm>
72 - <!-- salt value to use when generating the secret key -->
73 - <salt>thesalt</salt>
74 - <!-- pass phrase to use when generating the secret key -->
75 - <password>thepass</password>
76 - <!-- iteration count to use when generating the secret key -->
77 - <iteration-count>19</iteration-count>
78 - </symmetric-encryption>
79 - </network>
80 - <partition-group enabled="false"/>
81 - <executor-service name="default">
82 - <pool-size>16</pool-size>
83 - <!--Queue capacity. 0 means Integer.MAX_VALUE.-->
84 - <queue-capacity>0</queue-capacity>
85 - </executor-service>
86 - <queue name="default">
87 - <!--
88 - Maximum size of the queue. When a JVM's local queue size reaches the maximum,
89 - all put/offer operations will get blocked until the queue size
90 - of the JVM goes down below the maximum.
91 - Any integer between 0 and Integer.MAX_VALUE. 0 means
92 - Integer.MAX_VALUE. Default is 0.
93 - -->
94 - <max-size>0</max-size>
95 - <!--
96 - Number of backups. If 1 is set as the backup-count for example,
97 - then all entries of the map will be copied to another JVM for
98 - fail-safety. 0 means no backup.
99 - -->
100 - <backup-count>1</backup-count>
101 -
102 - <!--
103 - Number of async backups. 0 means no backup.
104 - -->
105 - <async-backup-count>0</async-backup-count>
106 -
107 - <empty-queue-ttl>-1</empty-queue-ttl>
108 - </queue>
109 - <map name="default">
110 - <!--
111 - Data type that will be used for storing recordMap.
112 - Possible values:
113 - BINARY (default): keys and values will be stored as binary data
114 - OBJECT : values will be stored in their object forms
115 - OFFHEAP : values will be stored in non-heap region of JVM
116 - -->
117 - <in-memory-format>BINARY</in-memory-format>
118 -
119 - <!--
120 - Number of backups. If 1 is set as the backup-count for example,
121 - then all entries of the map will be copied to another JVM for
122 - fail-safety. 0 means no backup.
123 - -->
124 - <backup-count>1</backup-count>
125 - <!--
126 - Number of async backups. 0 means no backup.
127 - -->
128 - <async-backup-count>0</async-backup-count>
129 - <!--
130 - Maximum number of seconds for each entry to stay in the map. Entries that are
131 - older than <time-to-live-seconds> and not updated for <time-to-live-seconds>
132 - will get automatically evicted from the map.
133 - Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
134 - -->
135 - <time-to-live-seconds>0</time-to-live-seconds>
136 - <!--
137 - Maximum number of seconds for each entry to stay idle in the map. Entries that are
138 - idle(not touched) for more than <max-idle-seconds> will get
139 - automatically evicted from the map. Entry is touched if get, put or containsKey is called.
140 - Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
141 - -->
142 - <max-idle-seconds>0</max-idle-seconds>
143 - <!--
144 - Valid values are:
145 - NONE (no eviction),
146 - LRU (Least Recently Used),
147 - LFU (Least Frequently Used).
148 - NONE is the default.
149 - -->
150 - <eviction-policy>NONE</eviction-policy>
151 - <!--
152 - Maximum size of the map. When max size is reached,
153 - map is evicted based on the policy defined.
154 - Any integer between 0 and Integer.MAX_VALUE. 0 means
155 - Integer.MAX_VALUE. Default is 0.
156 - -->
157 - <max-size policy="PER_NODE">0</max-size>
158 - <!--
159 - When max. size is reached, specified percentage of
160 - the map will be evicted. Any integer between 0 and 100.
161 - If 25 is set for example, 25% of the entries will
162 - get evicted.
163 - -->
164 - <eviction-percentage>25</eviction-percentage>
165 - <!--
166 - Minimum time in milliseconds which should pass before checking
167 - if a partition of this map is evictable or not.
168 - Default value is 100 millis.
169 - -->
170 - <min-eviction-check-millis>100</min-eviction-check-millis>
171 - <!--
172 - While recovering from split-brain (network partitioning),
173 - map entries in the small cluster will merge into the bigger cluster
174 - based on the policy set here. When an entry merge into the
175 - cluster, there might an existing entry with the same key already.
176 - Values of these entries might be different for that same key.
177 - Which value should be set for the key? Conflict is resolved by
178 - the policy set here. Default policy is PutIfAbsentMapMergePolicy
179 -
180 - There are built-in merge policies such as
181 - com.hazelcast.map.merge.PassThroughMergePolicy; entry will be added if there is no existing entry for the key.
182 - com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster.
183 - com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins.
184 - com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins.
185 - -->
186 - <merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy>
187 - </map>
188 -
189 - <multimap name="default">
190 - <backup-count>1</backup-count>
191 - <value-collection-type>SET</value-collection-type>
192 - </multimap>
193 -
194 - <multimap name="default">
195 - <backup-count>1</backup-count>
196 - <value-collection-type>SET</value-collection-type>
197 - </multimap>
198 -
199 - <list name="default">
200 - <backup-count>1</backup-count>
201 - </list>
202 -
203 - <set name="default">
204 - <backup-count>1</backup-count>
205 - </set>
206 -
207 - <jobtracker name="default">
208 - <max-thread-size>0</max-thread-size>
209 - <!-- Queue size 0 means number of partitions * 2 -->
210 - <queue-size>0</queue-size>
211 - <retry-count>0</retry-count>
212 - <chunk-size>1000</chunk-size>
213 - <communicate-stats>true</communicate-stats>
214 - <topology-changed-strategy>CANCEL_RUNNING_OPERATION</topology-changed-strategy>
215 - </jobtracker>
216 -
217 - <semaphore name="default">
218 - <initial-permits>0</initial-permits>
219 - <backup-count>1</backup-count>
220 - <async-backup-count>0</async-backup-count>
221 - </semaphore>
222 -
223 - <serialization>
224 - <portable-version>0</portable-version>
225 - </serialization>
226 -
227 - <services enable-defaults="true"/>
228 -
229 -</hazelcast>
...@@ -26,11 +26,6 @@ echo "]}" >> $CDEF_FILE ...@@ -26,11 +26,6 @@ echo "]}" >> $CDEF_FILE
26 scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/cluster.json 26 scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/cluster.json
27 27
28 ssh $remote " 28 ssh $remote "
29 - sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \
30 - $ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml
31 - sudo perl -pi -e \"s/ <name>onos</ <name>${ONOS_CELL:-onos}</g\" \
32 - $ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml
33 -
34 echo \"onos.ip = \$(sudo ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \ 29 echo \"onos.ip = \$(sudo ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \
35 >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties 30 >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties
36 31
...@@ -38,10 +33,6 @@ ssh $remote " ...@@ -38,10 +33,6 @@ ssh $remote "
38 echo "log4j.logger.net.kuujo.copycat= INFO" \ 33 echo "log4j.logger.net.kuujo.copycat= INFO" \
39 >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/org.ops4j.pax.logging.cfg 34 >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/org.ops4j.pax.logging.cfg
40 35
41 - # Suppress Hazelcast multicast joiner warning
42 - echo "log4j.logger.com.hazelcast.cluster.impl.MulticastService= ERROR" \
43 - >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/org.ops4j.pax.logging.cfg
44 -
45 # Patch the Apache Karaf distribution file to load ONOS boot features 36 # Patch the Apache Karaf distribution file to load ONOS boot features
46 perl -pi.old -e \"s|^(featuresBoot=.*,management)(,webconsole,.*)|\1,$ONOS_BOOT_FEATURES|\" \ 37 perl -pi.old -e \"s|^(featuresBoot=.*,management)(,webconsole,.*)|\1,$ONOS_BOOT_FEATURES|\" \
47 $ONOS_INSTALL_DIR/$KARAF_DIST/etc/org.apache.karaf.features.cfg 38 $ONOS_INSTALL_DIR/$KARAF_DIST/etc/org.apache.karaf.features.cfg
......