Committed by
Ray Milkey
Added bootstrap code to auto-generate cluster.json, tablets.json and hazelcast.x…
…ml using local site address. Change-Id: I3210aadc63403022b4aac3bc3591736801240b50
Showing
6 changed files
with
337 additions
and
19 deletions
| ... | @@ -71,15 +71,15 @@ public class ApplicationManager implements ApplicationService, ApplicationAdminS | ... | @@ -71,15 +71,15 @@ public class ApplicationManager implements ApplicationService, ApplicationAdminS |
| 71 | 71 | ||
| 72 | @Activate | 72 | @Activate |
| 73 | public void activate() { | 73 | public void activate() { |
| 74 | - store.setDelegate(delegate); | ||
| 75 | eventDispatcher.addSink(ApplicationEvent.class, listenerRegistry); | 74 | eventDispatcher.addSink(ApplicationEvent.class, listenerRegistry); |
| 75 | + store.setDelegate(delegate); | ||
| 76 | log.info("Started"); | 76 | log.info("Started"); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | @Deactivate | 79 | @Deactivate |
| 80 | public void deactivate() { | 80 | public void deactivate() { |
| 81 | - store.unsetDelegate(delegate); | ||
| 82 | eventDispatcher.removeSink(ApplicationEvent.class); | 81 | eventDispatcher.removeSink(ApplicationEvent.class); |
| 82 | + store.unsetDelegate(delegate); | ||
| 83 | log.info("Stopped"); | 83 | log.info("Stopped"); |
| 84 | } | 84 | } |
| 85 | 85 | ... | ... |
| ... | @@ -27,6 +27,8 @@ import org.onosproject.core.CoreService; | ... | @@ -27,6 +27,8 @@ import org.onosproject.core.CoreService; |
| 27 | import org.onosproject.core.IdBlockStore; | 27 | import org.onosproject.core.IdBlockStore; |
| 28 | import org.onosproject.core.IdGenerator; | 28 | import org.onosproject.core.IdGenerator; |
| 29 | import org.onosproject.core.Version; | 29 | import org.onosproject.core.Version; |
| 30 | +import org.slf4j.Logger; | ||
| 31 | +import org.slf4j.LoggerFactory; | ||
| 30 | 32 | ||
| 31 | import java.io.File; | 33 | import java.io.File; |
| 32 | import java.util.List; | 34 | import java.util.List; |
| ... | @@ -41,6 +43,8 @@ import static com.google.common.base.Preconditions.checkNotNull; | ... | @@ -41,6 +43,8 @@ import static com.google.common.base.Preconditions.checkNotNull; |
| 41 | @Service | 43 | @Service |
| 42 | public class CoreManager implements CoreService { | 44 | public class CoreManager implements CoreService { |
| 43 | 45 | ||
| 46 | + private final Logger log = LoggerFactory.getLogger(getClass()); | ||
| 47 | + | ||
| 44 | private static final File VERSION_FILE = new File("../VERSION"); | 48 | private static final File VERSION_FILE = new File("../VERSION"); |
| 45 | private static Version version = Version.version("1.2.0-SNAPSHOT"); | 49 | private static Version version = Version.version("1.2.0-SNAPSHOT"); |
| 46 | 50 | ... | ... |
| ... | @@ -57,6 +57,8 @@ import java.util.stream.Collectors; | ... | @@ -57,6 +57,8 @@ import java.util.stream.Collectors; |
| 57 | 57 | ||
| 58 | import static com.google.common.base.Preconditions.checkArgument; | 58 | import static com.google.common.base.Preconditions.checkArgument; |
| 59 | import static com.google.common.base.Preconditions.checkNotNull; | 59 | import static com.google.common.base.Preconditions.checkNotNull; |
| 60 | +import static java.net.NetworkInterface.getNetworkInterfaces; | ||
| 61 | +import static java.util.Collections.list; | ||
| 60 | import static org.onlab.util.Tools.groupedThreads; | 62 | import static org.onlab.util.Tools.groupedThreads; |
| 61 | import static org.slf4j.LoggerFactory.getLogger; | 63 | import static org.slf4j.LoggerFactory.getLogger; |
| 62 | 64 | ||
| ... | @@ -70,7 +72,7 @@ public class DistributedClusterStore | ... | @@ -70,7 +72,7 @@ public class DistributedClusterStore |
| 70 | extends AbstractStore<ClusterEvent, ClusterStoreDelegate> | 72 | extends AbstractStore<ClusterEvent, ClusterStoreDelegate> |
| 71 | implements ClusterStore { | 73 | implements ClusterStore { |
| 72 | 74 | ||
| 73 | - private final Logger log = getLogger(DistributedClusterStore.class); | 75 | + private static final Logger log = getLogger(DistributedClusterStore.class); |
| 74 | 76 | ||
| 75 | // TODO: make these configurable. | 77 | // TODO: make these configurable. |
| 76 | private static final int HEARTBEAT_FD_PORT = 2419; | 78 | private static final int HEARTBEAT_FD_PORT = 2419; |
| ... | @@ -81,14 +83,16 @@ public class DistributedClusterStore | ... | @@ -81,14 +83,16 @@ public class DistributedClusterStore |
| 81 | private static final String CLUSTER_DEFINITION_FILE = "cluster.json"; | 83 | private static final String CLUSTER_DEFINITION_FILE = "cluster.json"; |
| 82 | private static final String HEARTBEAT_MESSAGE = "onos-cluster-heartbeat"; | 84 | private static final String HEARTBEAT_MESSAGE = "onos-cluster-heartbeat"; |
| 83 | 85 | ||
| 86 | + public static final int DEFAULT_PORT = 9876; | ||
| 87 | + | ||
| 84 | private static final KryoSerializer SERIALIZER = new KryoSerializer() { | 88 | private static final KryoSerializer SERIALIZER = new KryoSerializer() { |
| 85 | @Override | 89 | @Override |
| 86 | protected void setupKryoPool() { | 90 | protected void setupKryoPool() { |
| 87 | serializerPool = KryoNamespace.newBuilder() | 91 | serializerPool = KryoNamespace.newBuilder() |
| 88 | - .register(KryoNamespaces.API) | 92 | + .register(KryoNamespaces.API) |
| 89 | - .register(HeartbeatMessage.class) | 93 | + .register(HeartbeatMessage.class) |
| 90 | - .build() | 94 | + .build() |
| 91 | - .populate(1); | 95 | + .populate(1); |
| 92 | } | 96 | } |
| 93 | }; | 97 | }; |
| 94 | 98 | ||
| ... | @@ -112,18 +116,22 @@ public class DistributedClusterStore | ... | @@ -112,18 +116,22 @@ public class DistributedClusterStore |
| 112 | 116 | ||
| 113 | @Activate | 117 | @Activate |
| 114 | public void activate() { | 118 | public void activate() { |
| 115 | - File clusterDefinitionFile = new File(CONFIG_DIR, | 119 | + File clusterDefinitionFile = new File(CONFIG_DIR, CLUSTER_DEFINITION_FILE); |
| 116 | - CLUSTER_DEFINITION_FILE); | 120 | + ClusterDefinitionStore clusterDefinitionStore = |
| 121 | + new ClusterDefinitionStore(clusterDefinitionFile.getPath()); | ||
| 122 | + | ||
| 123 | + if (!clusterDefinitionFile.exists()) { | ||
| 124 | + createDefaultClusterDefinition(clusterDefinitionStore); | ||
| 125 | + } | ||
| 117 | 126 | ||
| 118 | try { | 127 | try { |
| 119 | - clusterDefinition = new ClusterDefinitionStore( | 128 | + clusterDefinition = clusterDefinitionStore.read(); |
| 120 | - clusterDefinitionFile.getPath()).read(); | ||
| 121 | seedNodes = ImmutableSet | 129 | seedNodes = ImmutableSet |
| 122 | .copyOf(clusterDefinition.getNodes()) | 130 | .copyOf(clusterDefinition.getNodes()) |
| 123 | .stream() | 131 | .stream() |
| 124 | - .map(nodeInfo -> new DefaultControllerNode(new NodeId( | 132 | + .map(nodeInfo -> new DefaultControllerNode(new NodeId(nodeInfo.getId()), |
| 125 | - nodeInfo.getId()), IpAddress.valueOf(nodeInfo | 133 | + IpAddress.valueOf(nodeInfo.getIp()), |
| 126 | - .getIp()), nodeInfo.getTcpPort())) | 134 | + nodeInfo.getTcpPort())) |
| 127 | .collect(Collectors.toSet()); | 135 | .collect(Collectors.toSet()); |
| 128 | } catch (IOException e) { | 136 | } catch (IOException e) { |
| 129 | throw new IllegalStateException( | 137 | throw new IllegalStateException( |
| ... | @@ -148,16 +156,51 @@ public class DistributedClusterStore | ... | @@ -148,16 +156,51 @@ public class DistributedClusterStore |
| 148 | + " failure detector communication channel.", e); | 156 | + " failure detector communication channel.", e); |
| 149 | } | 157 | } |
| 150 | messagingService.registerHandler(HEARTBEAT_MESSAGE, | 158 | messagingService.registerHandler(HEARTBEAT_MESSAGE, |
| 151 | - new HeartbeatMessageHandler(), heartBeatMessageHandler); | 159 | + new HeartbeatMessageHandler(), heartBeatMessageHandler); |
| 152 | 160 | ||
| 153 | failureDetector = new PhiAccrualFailureDetector(); | 161 | failureDetector = new PhiAccrualFailureDetector(); |
| 154 | 162 | ||
| 155 | heartBeatSender.scheduleWithFixedDelay(this::heartbeat, 0, | 163 | heartBeatSender.scheduleWithFixedDelay(this::heartbeat, 0, |
| 156 | - HEARTBEAT_INTERVAL_MS, TimeUnit.MILLISECONDS); | 164 | + HEARTBEAT_INTERVAL_MS, TimeUnit.MILLISECONDS); |
| 157 | 165 | ||
| 158 | log.info("Started"); | 166 | log.info("Started"); |
| 159 | } | 167 | } |
| 160 | 168 | ||
| 169 | + private void createDefaultClusterDefinition(ClusterDefinitionStore store) { | ||
| 170 | + // Assumes IPv4 is returned. | ||
| 171 | + String ip = DistributedClusterStore.getSiteLocalAddress(); | ||
| 172 | + String ipPrefix = ip.replaceFirst("\\.[0-9]*$", ".*"); | ||
| 173 | + NodeInfo node = NodeInfo.from(ip, ip, DEFAULT_PORT); | ||
| 174 | + try { | ||
| 175 | + store.write(ClusterDefinition.from(ImmutableSet.of(node), ipPrefix)); | ||
| 176 | + } catch (IOException e) { | ||
| 177 | + log.warn("Unable to write default cluster definition", e); | ||
| 178 | + } | ||
| 179 | + } | ||
| 180 | + | ||
| 181 | + /** | ||
| 182 | + * Returns the site local address if one can be found, loopback otherwise. | ||
| 183 | + * | ||
| 184 | + * @return site-local address in string form | ||
| 185 | + */ | ||
| 186 | + public static String getSiteLocalAddress() { | ||
| 187 | + try { | ||
| 188 | + for (NetworkInterface nif : list(getNetworkInterfaces())) { | ||
| 189 | + for (InetAddress address : list(nif.getInetAddresses())) { | ||
| 190 | + if (address.getAddress()[0] == (byte) 0xC0) { | ||
| 191 | + return address.toString().substring(1); | ||
| 192 | + } | ||
| 193 | + } | ||
| 194 | + } | ||
| 195 | + return InetAddress.getLoopbackAddress().toString().substring(1); | ||
| 196 | + | ||
| 197 | + } catch (SocketException e) { | ||
| 198 | + log.error("Unable to get network interfaces", e); | ||
| 199 | + } | ||
| 200 | + | ||
| 201 | + return null; | ||
| 202 | + } | ||
| 203 | + | ||
| 161 | @Deactivate | 204 | @Deactivate |
| 162 | public void deactivate() { | 205 | public void deactivate() { |
| 163 | try { | 206 | try { |
| ... | @@ -300,7 +343,7 @@ public class DistributedClusterStore | ... | @@ -300,7 +343,7 @@ public class DistributedClusterStore |
| 300 | NetworkInterface.getNetworkInterfaces(); | 343 | NetworkInterface.getNetworkInterfaces(); |
| 301 | while (interfaces.hasMoreElements()) { | 344 | while (interfaces.hasMoreElements()) { |
| 302 | NetworkInterface iface = interfaces.nextElement(); | 345 | NetworkInterface iface = interfaces.nextElement(); |
| 303 | - Enumeration<InetAddress> inetAddresses = iface.getInetAddresses(); | 346 | + Enumeration<InetAddress> inetAddresses = iface.getInetAddresses(); |
| 304 | while (inetAddresses.hasMoreElements()) { | 347 | while (inetAddresses.hasMoreElements()) { |
| 305 | IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement()); | 348 | IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement()); |
| 306 | if (AddressUtil.matchInterface(ip.toString(), clusterDefinition.getIpPrefix())) { | 349 | if (AddressUtil.matchInterface(ip.toString(), clusterDefinition.getIpPrefix())) { | ... | ... |
| ... | @@ -16,6 +16,8 @@ | ... | @@ -16,6 +16,8 @@ |
| 16 | 16 | ||
| 17 | package org.onosproject.store.consistent.impl; | 17 | package org.onosproject.store.consistent.impl; |
| 18 | 18 | ||
| 19 | +import com.google.common.collect.ImmutableMap; | ||
| 20 | +import com.google.common.collect.ImmutableSet; | ||
| 19 | import com.google.common.collect.Lists; | 21 | import com.google.common.collect.Lists; |
| 20 | import com.google.common.collect.Sets; | 22 | import com.google.common.collect.Sets; |
| 21 | 23 | ||
| ... | @@ -40,6 +42,7 @@ import org.apache.felix.scr.annotations.Reference; | ... | @@ -40,6 +42,7 @@ import org.apache.felix.scr.annotations.Reference; |
| 40 | import org.apache.felix.scr.annotations.ReferenceCardinality; | 42 | import org.apache.felix.scr.annotations.ReferenceCardinality; |
| 41 | import org.apache.felix.scr.annotations.Service; | 43 | import org.apache.felix.scr.annotations.Service; |
| 42 | import org.onosproject.cluster.ClusterService; | 44 | import org.onosproject.cluster.ClusterService; |
| 45 | +import org.onosproject.store.cluster.impl.DistributedClusterStore; | ||
| 43 | import org.onosproject.store.cluster.impl.NodeInfo; | 46 | import org.onosproject.store.cluster.impl.NodeInfo; |
| 44 | import org.onosproject.store.cluster.messaging.ClusterCommunicationService; | 47 | import org.onosproject.store.cluster.messaging.ClusterCommunicationService; |
| 45 | import org.onosproject.store.ecmap.EventuallyConsistentMapBuilderImpl; | 48 | import org.onosproject.store.ecmap.EventuallyConsistentMapBuilderImpl; |
| ... | @@ -102,8 +105,11 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -102,8 +105,11 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
| 102 | 105 | ||
| 103 | Map<String, Set<NodeInfo>> partitionMap; | 106 | Map<String, Set<NodeInfo>> partitionMap; |
| 104 | try { | 107 | try { |
| 105 | - DatabaseDefinitionStore databaseDef = new DatabaseDefinitionStore(file); | 108 | + DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(file); |
| 106 | - partitionMap = databaseDef.read().getPartitions(); | 109 | + if (!file.exists()) { |
| 110 | + createDefaultDatabaseDefinition(databaseDefStore); | ||
| 111 | + } | ||
| 112 | + partitionMap = databaseDefStore.read().getPartitions(); | ||
| 107 | } catch (IOException e) { | 113 | } catch (IOException e) { |
| 108 | throw new IllegalStateException("Failed to load database config", e); | 114 | throw new IllegalStateException("Failed to load database config", e); |
| 109 | } | 115 | } |
| ... | @@ -180,6 +186,18 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -180,6 +186,18 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
| 180 | log.info("Started"); | 186 | log.info("Started"); |
| 181 | } | 187 | } |
| 182 | 188 | ||
| 189 | + private void createDefaultDatabaseDefinition(DatabaseDefinitionStore store) { | ||
| 190 | + // Assumes IPv4 is returned. | ||
| 191 | + String ip = DistributedClusterStore.getSiteLocalAddress(); | ||
| 192 | + NodeInfo node = NodeInfo.from(ip, ip, DistributedClusterStore.DEFAULT_PORT); | ||
| 193 | + try { | ||
| 194 | + store.write(DatabaseDefinition.from(ImmutableMap.of("p1", ImmutableSet.of(node)), | ||
| 195 | + ImmutableSet.of(node))); | ||
| 196 | + } catch (IOException e) { | ||
| 197 | + log.warn("Unable to write default cluster definition", e); | ||
| 198 | + } | ||
| 199 | + } | ||
| 200 | + | ||
| 183 | @Deactivate | 201 | @Deactivate |
| 184 | public void deactivate() { | 202 | public void deactivate() { |
| 185 | CompletableFuture.allOf(inMemoryDatabase.close(), partitionedDatabase.close()) | 203 | CompletableFuture.allOf(inMemoryDatabase.close(), partitionedDatabase.close()) | ... | ... |
| ... | @@ -15,6 +15,8 @@ | ... | @@ -15,6 +15,8 @@ |
| 15 | */ | 15 | */ |
| 16 | package org.onosproject.store.hz; | 16 | package org.onosproject.store.hz; |
| 17 | 17 | ||
| 18 | +import com.google.common.io.ByteStreams; | ||
| 19 | +import com.google.common.io.Files; | ||
| 18 | import com.hazelcast.config.Config; | 20 | import com.hazelcast.config.Config; |
| 19 | import com.hazelcast.config.FileSystemXmlConfig; | 21 | import com.hazelcast.config.FileSystemXmlConfig; |
| 20 | import com.hazelcast.core.Hazelcast; | 22 | import com.hazelcast.core.Hazelcast; |
| ... | @@ -24,10 +26,14 @@ import org.apache.felix.scr.annotations.Activate; | ... | @@ -24,10 +26,14 @@ import org.apache.felix.scr.annotations.Activate; |
| 24 | import org.apache.felix.scr.annotations.Component; | 26 | import org.apache.felix.scr.annotations.Component; |
| 25 | import org.apache.felix.scr.annotations.Deactivate; | 27 | import org.apache.felix.scr.annotations.Deactivate; |
| 26 | import org.apache.felix.scr.annotations.Service; | 28 | import org.apache.felix.scr.annotations.Service; |
| 29 | +import org.onosproject.store.cluster.impl.DistributedClusterStore; | ||
| 27 | import org.slf4j.Logger; | 30 | import org.slf4j.Logger; |
| 28 | import org.slf4j.LoggerFactory; | 31 | import org.slf4j.LoggerFactory; |
| 29 | 32 | ||
| 33 | +import java.io.File; | ||
| 30 | import java.io.FileNotFoundException; | 34 | import java.io.FileNotFoundException; |
| 35 | +import java.io.IOException; | ||
| 36 | +import java.io.InputStream; | ||
| 31 | 37 | ||
| 32 | /** | 38 | /** |
| 33 | * Auxiliary bootstrap of distributed store. | 39 | * Auxiliary bootstrap of distributed store. |
| ... | @@ -45,6 +51,11 @@ public class StoreManager implements StoreService { | ... | @@ -45,6 +51,11 @@ public class StoreManager implements StoreService { |
| 45 | @Activate | 51 | @Activate |
| 46 | public void activate() { | 52 | public void activate() { |
| 47 | try { | 53 | try { |
| 54 | + File hazelcastFile = new File(HAZELCAST_XML_FILE); | ||
| 55 | + if (!hazelcastFile.exists()) { | ||
| 56 | + createDefaultHazelcastFile(hazelcastFile); | ||
| 57 | + } | ||
| 58 | + | ||
| 48 | Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE); | 59 | Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE); |
| 49 | 60 | ||
| 50 | instance = Hazelcast.newHazelcastInstance(config); | 61 | instance = Hazelcast.newHazelcastInstance(config); |
| ... | @@ -54,6 +65,20 @@ public class StoreManager implements StoreService { | ... | @@ -54,6 +65,20 @@ public class StoreManager implements StoreService { |
| 54 | } | 65 | } |
| 55 | } | 66 | } |
| 56 | 67 | ||
| 68 | + private void createDefaultHazelcastFile(File hazelcastFile) { | ||
| 69 | + String ip = DistributedClusterStore.getSiteLocalAddress(); | ||
| 70 | + String ipPrefix = ip.replaceFirst("\\.[0-9]*$", ".*"); | ||
| 71 | + InputStream his = getClass().getResourceAsStream("/hazelcast.xml"); | ||
| 72 | + try { | ||
| 73 | + String hzCfg = new String(ByteStreams.toByteArray(his), "UTF-8"); | ||
| 74 | + hzCfg = hzCfg.replaceFirst("@NAME", ip); | ||
| 75 | + hzCfg = hzCfg.replaceFirst("@PREFIX", ipPrefix); | ||
| 76 | + Files.write(hzCfg.getBytes("UTF-8"), hazelcastFile); | ||
| 77 | + } catch (IOException e) { | ||
| 78 | + log.error("Unable to write default hazelcast file", e); | ||
| 79 | + } | ||
| 80 | + } | ||
| 81 | + | ||
| 57 | @Deactivate | 82 | @Deactivate |
| 58 | public void deactivate() { | 83 | public void deactivate() { |
| 59 | instance.shutdown(); | 84 | instance.shutdown(); | ... | ... |
| 1 | +<?xml version="1.0" encoding="UTF-8"?> | ||
| 2 | + | ||
| 3 | +<!-- | ||
| 4 | + ~ Copyright 2015 Open Networking Laboratory | ||
| 5 | + ~ | ||
| 6 | + ~ Licensed under the Apache License, Version 2.0 (the "License"); | ||
| 7 | + ~ you may not use this file except in compliance with the License. | ||
| 8 | + ~ You may obtain a copy of the License at | ||
| 9 | + ~ | ||
| 10 | + ~ http://www.apache.org/licenses/LICENSE-2.0 | ||
| 11 | + ~ | ||
| 12 | + ~ Unless required by applicable law or agreed to in writing, software | ||
| 13 | + ~ distributed under the License is distributed on an "AS IS" BASIS, | ||
| 14 | + ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| 15 | + ~ See the License for the specific language governing permissions and | ||
| 16 | + ~ limitations under the License. | ||
| 17 | + --> | ||
| 18 | + | ||
| 19 | +<!-- | ||
| 20 | + The default Hazelcast configuration. This is used when: | ||
| 21 | + | ||
| 22 | + - no hazelcast.xml if present | ||
| 23 | + | ||
| 24 | +--> | ||
| 25 | +<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.3.xsd" | ||
| 26 | + xmlns="http://www.hazelcast.com/schema/config" | ||
| 27 | + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> | ||
| 28 | + <group> | ||
| 29 | + <name>@NAME</name> | ||
| 30 | + <password>rocks</password> | ||
| 31 | + </group> | ||
| 32 | + <management-center enabled="false">http://localhost:8080/mancenter</management-center> | ||
| 33 | + <properties> | ||
| 34 | + <property name="hazelcast.max.no.heartbeat.seconds">30</property> | ||
| 35 | + <property name="hazelcast.merge.first.run.delay.seconds">30</property> | ||
| 36 | + <property name="hazelcast.merge.next.run.delay.seconds">30</property> | ||
| 37 | + </properties> | ||
| 38 | + <network> | ||
| 39 | + <port auto-increment="true" port-count="100">5701</port> | ||
| 40 | + <outbound-ports> | ||
| 41 | + <!-- | ||
| 42 | + Allowed port range when connecting to other nodes. | ||
| 43 | + 0 or * means use system provided port. | ||
| 44 | + --> | ||
| 45 | + <ports>0</ports> | ||
| 46 | + </outbound-ports> | ||
| 47 | + <join> | ||
| 48 | + <multicast enabled="true"> | ||
| 49 | + <multicast-group>224.2.2.3</multicast-group> | ||
| 50 | + <multicast-port>54327</multicast-port> | ||
| 51 | + </multicast> | ||
| 52 | + <tcp-ip enabled="false"> | ||
| 53 | + <interface>127.0.0.1</interface> | ||
| 54 | + </tcp-ip> | ||
| 55 | + </join> | ||
| 56 | + <interfaces enabled="true"> | ||
| 57 | + <interface>@PREFIX</interface> | ||
| 58 | + </interfaces> | ||
| 59 | + <ssl enabled="false"/> | ||
| 60 | + <socket-interceptor enabled="false"/> | ||
| 61 | + <symmetric-encryption enabled="false"> | ||
| 62 | + <!-- | ||
| 63 | + encryption algorithm such as | ||
| 64 | + DES/ECB/PKCS5Padding, | ||
| 65 | + PBEWithMD5AndDES, | ||
| 66 | + AES/CBC/PKCS5Padding, | ||
| 67 | + Blowfish, | ||
| 68 | + DESede | ||
| 69 | + --> | ||
| 70 | + <algorithm>PBEWithMD5AndDES</algorithm> | ||
| 71 | + <!-- salt value to use when generating the secret key --> | ||
| 72 | + <salt>thesalt</salt> | ||
| 73 | + <!-- pass phrase to use when generating the secret key --> | ||
| 74 | + <password>thepass</password> | ||
| 75 | + <!-- iteration count to use when generating the secret key --> | ||
| 76 | + <iteration-count>19</iteration-count> | ||
| 77 | + </symmetric-encryption> | ||
| 78 | + </network> | ||
| 79 | + <partition-group enabled="false"/> | ||
| 80 | + <executor-service name="default"> | ||
| 81 | + <pool-size>16</pool-size> | ||
| 82 | + <!--Queue capacity. 0 means Integer.MAX_VALUE.--> | ||
| 83 | + <queue-capacity>0</queue-capacity> | ||
| 84 | + </executor-service> | ||
| 85 | + <queue name="default"> | ||
| 86 | + <!-- | ||
| 87 | + Maximum size of the queue. When a JVM's local queue size reaches the maximum, | ||
| 88 | + all put/offer operations will get blocked until the queue size | ||
| 89 | + of the JVM goes down below the maximum. | ||
| 90 | + Any integer between 0 and Integer.MAX_VALUE. 0 means | ||
| 91 | + Integer.MAX_VALUE. Default is 0. | ||
| 92 | + --> | ||
| 93 | + <max-size>0</max-size> | ||
| 94 | + <!-- | ||
| 95 | + Number of backups. If 1 is set as the backup-count for example, | ||
| 96 | + then all entries of the map will be copied to another JVM for | ||
| 97 | + fail-safety. 0 means no backup. | ||
| 98 | + --> | ||
| 99 | + <backup-count>1</backup-count> | ||
| 100 | + | ||
| 101 | + <!-- | ||
| 102 | + Number of async backups. 0 means no backup. | ||
| 103 | + --> | ||
| 104 | + <async-backup-count>0</async-backup-count> | ||
| 105 | + | ||
| 106 | + <empty-queue-ttl>-1</empty-queue-ttl> | ||
| 107 | + </queue> | ||
| 108 | + <map name="default"> | ||
| 109 | + <!-- | ||
| 110 | + Data type that will be used for storing recordMap. | ||
| 111 | + Possible values: | ||
| 112 | + BINARY (default): keys and values will be stored as binary data | ||
| 113 | + OBJECT : values will be stored in their object forms | ||
| 114 | + OFFHEAP : values will be stored in non-heap region of JVM | ||
| 115 | + --> | ||
| 116 | + <in-memory-format>BINARY</in-memory-format> | ||
| 117 | + | ||
| 118 | + <!-- | ||
| 119 | + Number of backups. If 1 is set as the backup-count for example, | ||
| 120 | + then all entries of the map will be copied to another JVM for | ||
| 121 | + fail-safety. 0 means no backup. | ||
| 122 | + --> | ||
| 123 | + <backup-count>1</backup-count> | ||
| 124 | + <!-- | ||
| 125 | + Number of async backups. 0 means no backup. | ||
| 126 | + --> | ||
| 127 | + <async-backup-count>0</async-backup-count> | ||
| 128 | + <!-- | ||
| 129 | + Maximum number of seconds for each entry to stay in the map. Entries that are | ||
| 130 | + older than <time-to-live-seconds> and not updated for <time-to-live-seconds> | ||
| 131 | + will get automatically evicted from the map. | ||
| 132 | + Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0. | ||
| 133 | + --> | ||
| 134 | + <time-to-live-seconds>0</time-to-live-seconds> | ||
| 135 | + <!-- | ||
| 136 | + Maximum number of seconds for each entry to stay idle in the map. Entries that are | ||
| 137 | + idle(not touched) for more than <max-idle-seconds> will get | ||
| 138 | + automatically evicted from the map. Entry is touched if get, put or containsKey is called. | ||
| 139 | + Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0. | ||
| 140 | + --> | ||
| 141 | + <max-idle-seconds>0</max-idle-seconds> | ||
| 142 | + <!-- | ||
| 143 | + Valid values are: | ||
| 144 | + NONE (no eviction), | ||
| 145 | + LRU (Least Recently Used), | ||
| 146 | + LFU (Least Frequently Used). | ||
| 147 | + NONE is the default. | ||
| 148 | + --> | ||
| 149 | + <eviction-policy>NONE</eviction-policy> | ||
| 150 | + <!-- | ||
| 151 | + Maximum size of the map. When max size is reached, | ||
| 152 | + map is evicted based on the policy defined. | ||
| 153 | + Any integer between 0 and Integer.MAX_VALUE. 0 means | ||
| 154 | + Integer.MAX_VALUE. Default is 0. | ||
| 155 | + --> | ||
| 156 | + <max-size policy="PER_NODE">0</max-size> | ||
| 157 | + <!-- | ||
| 158 | + When max. size is reached, specified percentage of | ||
| 159 | + the map will be evicted. Any integer between 0 and 100. | ||
| 160 | + If 25 is set for example, 25% of the entries will | ||
| 161 | + get evicted. | ||
| 162 | + --> | ||
| 163 | + <eviction-percentage>25</eviction-percentage> | ||
| 164 | + <!-- | ||
| 165 | + Minimum time in milliseconds which should pass before checking | ||
| 166 | + if a partition of this map is evictable or not. | ||
| 167 | + Default value is 100 millis. | ||
| 168 | + --> | ||
| 169 | + <min-eviction-check-millis>100</min-eviction-check-millis> | ||
| 170 | + <!-- | ||
| 171 | + While recovering from split-brain (network partitioning), | ||
| 172 | + map entries in the small cluster will merge into the bigger cluster | ||
| 173 | + based on the policy set here. When an entry merge into the | ||
| 174 | + cluster, there might an existing entry with the same key already. | ||
| 175 | + Values of these entries might be different for that same key. | ||
| 176 | + Which value should be set for the key? Conflict is resolved by | ||
| 177 | + the policy set here. Default policy is PutIfAbsentMapMergePolicy | ||
| 178 | + | ||
| 179 | + There are built-in merge policies such as | ||
| 180 | + com.hazelcast.map.merge.PassThroughMergePolicy; entry will be added if there is no existing entry for the key. | ||
| 181 | + com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster. | ||
| 182 | + com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins. | ||
| 183 | + com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins. | ||
| 184 | + --> | ||
| 185 | + <merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy> | ||
| 186 | + </map> | ||
| 187 | + | ||
| 188 | + <multimap name="default"> | ||
| 189 | + <backup-count>1</backup-count> | ||
| 190 | + <value-collection-type>SET</value-collection-type> | ||
| 191 | + </multimap> | ||
| 192 | + | ||
| 193 | + <multimap name="default"> | ||
| 194 | + <backup-count>1</backup-count> | ||
| 195 | + <value-collection-type>SET</value-collection-type> | ||
| 196 | + </multimap> | ||
| 197 | + | ||
| 198 | + <list name="default"> | ||
| 199 | + <backup-count>1</backup-count> | ||
| 200 | + </list> | ||
| 201 | + | ||
| 202 | + <set name="default"> | ||
| 203 | + <backup-count>1</backup-count> | ||
| 204 | + </set> | ||
| 205 | + | ||
| 206 | + <jobtracker name="default"> | ||
| 207 | + <max-thread-size>0</max-thread-size> | ||
| 208 | + <!-- Queue size 0 means number of partitions * 2 --> | ||
| 209 | + <queue-size>0</queue-size> | ||
| 210 | + <retry-count>0</retry-count> | ||
| 211 | + <chunk-size>1000</chunk-size> | ||
| 212 | + <communicate-stats>true</communicate-stats> | ||
| 213 | + <topology-changed-strategy>CANCEL_RUNNING_OPERATION</topology-changed-strategy> | ||
| 214 | + </jobtracker> | ||
| 215 | + | ||
| 216 | + <semaphore name="default"> | ||
| 217 | + <initial-permits>0</initial-permits> | ||
| 218 | + <backup-count>1</backup-count> | ||
| 219 | + <async-backup-count>0</async-backup-count> | ||
| 220 | + </semaphore> | ||
| 221 | + | ||
| 222 | + <serialization> | ||
| 223 | + <portable-version>0</portable-version> | ||
| 224 | + </serialization> | ||
| 225 | + | ||
| 226 | + <services enable-defaults="true"/> | ||
| 227 | + | ||
| 228 | +</hazelcast> |
-
Please register or login to post a comment