Madan Jampani
Committed by Jonathan Hart

WIP: Partitioned Database based on Raft.

Removed the implementation based on previous Copycat API.

Change-Id: I6b9d67e943e17095f585ae2a2cb6304c248cd686
Showing 35 changed files with 1364 additions and 3517 deletions
...@@ -39,7 +39,6 @@ import org.onosproject.store.serializers.KryoNamespaces; ...@@ -39,7 +39,6 @@ import org.onosproject.store.serializers.KryoNamespaces;
39 import org.onosproject.store.serializers.KryoSerializer; 39 import org.onosproject.store.serializers.KryoSerializer;
40 import org.onosproject.store.service.Lock; 40 import org.onosproject.store.service.Lock;
41 import org.onosproject.store.service.LockService; 41 import org.onosproject.store.service.LockService;
42 -import org.onosproject.store.service.impl.DistributedLockManager;
43 import org.slf4j.Logger; 42 import org.slf4j.Logger;
44 43
45 import java.util.Map; 44 import java.util.Map;
...@@ -62,9 +61,7 @@ public class LeadershipManager implements LeadershipService { ...@@ -62,9 +61,7 @@ public class LeadershipManager implements LeadershipService {
62 61
63 private final Logger log = getLogger(getClass()); 62 private final Logger log = getLogger(getClass());
64 63
65 - // TODO: Remove this dependency 64 + private static final int TERM_DURATION_MS = 2000;
66 - private static final int TERM_DURATION_MS =
67 - DistributedLockManager.DEAD_LOCK_TIMEOUT_MS;
68 65
69 // Time to wait before retrying leadership after 66 // Time to wait before retrying leadership after
70 // a unexpected error. 67 // a unexpected error.
......
1 +package org.onosproject.store.consistent.impl;
2 +
3 +
4 +import net.kuujo.copycat.cluster.ClusterConfig;
5 +import net.kuujo.copycat.cluster.internal.coordinator.ClusterCoordinator;
6 +import net.kuujo.copycat.cluster.internal.coordinator.CoordinatorConfig;
7 +import net.kuujo.copycat.cluster.internal.coordinator.DefaultClusterCoordinator;
8 +import net.kuujo.copycat.resource.Resource;
9 +
10 +/**
11 + * Database.
12 + */
13 +public interface Database extends DatabaseProxy<String, byte[]>, Resource<Database> {
14 +
15 + /**
16 + * Creates a new database with the default cluster configuration.<p>
17 + *
18 + * The database will be constructed with the default cluster configuration. The default cluster configuration
19 + * searches for two resources on the classpath - {@code cluster} and {cluster-defaults} - in that order. Configuration
20 + * options specified in {@code cluster.conf} will override those in {cluster-defaults.conf}.<p>
21 + *
22 + * Additionally, the database will be constructed with an database configuration that searches the classpath for
23 + * three configuration files - {@code {name}}, {@code database}, {@code database-defaults}, {@code resource}, and
24 + * {@code resource-defaults} - in that order. The first resource is a configuration resource with the same name
25 + * as the map resource. If the resource is namespaced - e.g. `databases.my-database.conf` - then resource
26 + * configurations will be loaded according to namespaces as well; for example, `databases.conf`.
27 + *
28 + * @param name The database name.
29 + * @return The database.
30 + */
31 + static Database create(String name) {
32 + return create(name, new ClusterConfig(), new DatabaseConfig());
33 + }
34 +
35 + /**
36 + * Creates a new database.<p>
37 + *
38 + * The database will be constructed with an database configuration that searches the classpath for
39 + * three configuration files - {@code {name}}, {@code database}, {@code database-defaults}, {@code resource}, and
40 + * {@code resource-defaults} - in that order. The first resource is a configuration resource with the same name
41 + * as the database resource. If the resource is namespaced - e.g. `databases.my-database.conf` - then resource
42 + * configurations will be loaded according to namespaces as well; for example, `databases.conf`.
43 + *
44 + * @param name The database name.
45 + * @param cluster The cluster configuration.
46 + * @return The database.
47 + */
48 + static Database create(String name, ClusterConfig cluster) {
49 + return create(name, cluster, new DatabaseConfig());
50 + }
51 +
52 + /**
53 + * Creates a new database.
54 + *
55 + * @param name The database name.
56 + * @param cluster The cluster configuration.
57 + * @param config The database configuration.
58 +
59 + * @return The database.
60 + */
61 + static Database create(String name, ClusterConfig cluster, DatabaseConfig config) {
62 + ClusterCoordinator coordinator =
63 + new DefaultClusterCoordinator(new CoordinatorConfig().withName(name).withClusterConfig(cluster));
64 + return coordinator.<Database>getResource(name, config.resolve(cluster))
65 + .addStartupTask(() -> coordinator.open().thenApply(v -> null))
66 + .addShutdownTask(coordinator::close);
67 + }
68 +
69 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import com.typesafe.config.ConfigValueFactory;
4 +import net.kuujo.copycat.cluster.ClusterConfig;
5 +import net.kuujo.copycat.cluster.internal.coordinator.CoordinatedResourceConfig;
6 +import net.kuujo.copycat.protocol.Consistency;
7 +import net.kuujo.copycat.resource.ResourceConfig;
8 +import net.kuujo.copycat.state.StateLogConfig;
9 +import net.kuujo.copycat.util.internal.Assert;
10 +
11 +import java.util.Map;
12 +
13 +/**
14 + * Database configuration.
15 + *
16 + */
17 +public class DatabaseConfig extends ResourceConfig<DatabaseConfig> {
18 + private static final String DATABASE_CONSISTENCY = "consistency";
19 +
20 + private static final String DEFAULT_CONFIGURATION = "database-defaults";
21 + private static final String CONFIGURATION = "database";
22 +
23 + public DatabaseConfig() {
24 + super(CONFIGURATION, DEFAULT_CONFIGURATION);
25 + }
26 +
27 + public DatabaseConfig(Map<String, Object> config) {
28 + super(config, CONFIGURATION, DEFAULT_CONFIGURATION);
29 + }
30 +
31 + public DatabaseConfig(String resource) {
32 + super(resource, CONFIGURATION, DEFAULT_CONFIGURATION);
33 + }
34 +
35 + protected DatabaseConfig(DatabaseConfig config) {
36 + super(config);
37 + }
38 +
39 + @Override
40 + public DatabaseConfig copy() {
41 + return new DatabaseConfig(this);
42 + }
43 +
44 + /**
45 + * Sets the database read consistency.
46 + *
47 + * @param consistency The database read consistency.
48 + * @throws java.lang.NullPointerException If the consistency is {@code null}
49 + */
50 + public void setConsistency(String consistency) {
51 + this.config = config.withValue(DATABASE_CONSISTENCY,
52 + ConfigValueFactory.fromAnyRef(
53 + Consistency.parse(Assert.isNotNull(consistency, "consistency")).toString()));
54 + }
55 +
56 + /**
57 + * Sets the database read consistency.
58 + *
59 + * @param consistency The database read consistency.
60 + * @throws java.lang.NullPointerException If the consistency is {@code null}
61 + */
62 + public void setConsistency(Consistency consistency) {
63 + this.config = config.withValue(DATABASE_CONSISTENCY,
64 + ConfigValueFactory.fromAnyRef(
65 + Assert.isNotNull(consistency, "consistency").toString()));
66 + }
67 +
68 + /**
69 + * Returns the database read consistency.
70 + *
71 + * @return The database read consistency.
72 + */
73 + public Consistency getConsistency() {
74 + return Consistency.parse(config.getString(DATABASE_CONSISTENCY));
75 + }
76 +
77 + /**
78 + * Sets the database read consistency, returning the configuration for method chaining.
79 + *
80 + * @param consistency The database read consistency.
81 + * @return The database configuration.
82 + * @throws java.lang.NullPointerException If the consistency is {@code null}
83 + */
84 + public DatabaseConfig withConsistency(String consistency) {
85 + setConsistency(consistency);
86 + return this;
87 + }
88 +
89 + /**
90 + * Sets the database read consistency, returning the configuration for method chaining.
91 + *
92 + * @param consistency The database read consistency.
93 + * @return The database configuration.
94 + * @throws java.lang.NullPointerException If the consistency is {@code null}
95 + */
96 + public DatabaseConfig withConsistency(Consistency consistency) {
97 + setConsistency(consistency);
98 + return this;
99 + }
100 +
101 + @Override
102 + public CoordinatedResourceConfig resolve(ClusterConfig cluster) {
103 + return new StateLogConfig(toMap())
104 + .resolve(cluster)
105 + .withResourceType(DefaultDatabase.class);
106 + }
107 +
108 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import java.util.Collection;
4 +import java.util.List;
5 +import java.util.Map;
6 +import java.util.Set;
7 +import java.util.concurrent.CompletableFuture;
8 +
9 +/**
10 + * Database proxy.
11 + */
12 +public interface DatabaseProxy<K, V> {
13 +
14 + /**
15 + * Gets the table size.
16 + *
17 + * @param tableName table name
18 + * @return A completable future to be completed with the result once complete.
19 + */
20 + CompletableFuture<Integer> size(String tableName);
21 +
22 + /**
23 + * Checks whether the table is empty.
24 + *
25 + * @param tableName table name
26 + * @return A completable future to be completed with the result once complete.
27 + */
28 + CompletableFuture<Boolean> isEmpty(String tableName);
29 +
30 + /**
31 + * Checks whether the table contains a key.
32 + *
33 + * @param tableName table name
34 + * @param key The key to check.
35 + * @return A completable future to be completed with the result once complete.
36 + */
37 + CompletableFuture<Boolean> containsKey(String tableName, K key);
38 +
39 + /**
40 + * Checks whether the table contains a value.
41 + *
42 + * @param tableName table name
43 + * @param value The value to check.
44 + * @return A completable future to be completed with the result once complete.
45 + */
46 + CompletableFuture<Boolean> containsValue(String tableName, V value);
47 +
48 + /**
49 + * Gets a value from the table.
50 + *
51 + * @param tableName table name
52 + * @param key The key to get.
53 + * @return A completable future to be completed with the result once complete.
54 + */
55 + CompletableFuture<Versioned<V>> get(String tableName, K key);
56 +
57 + /**
58 + * Puts a value in the table.
59 + *
60 + * @param tableName table name
61 + * @param key The key to set.
62 + * @param value The value to set.
63 + * @return A completable future to be completed with the result once complete.
64 + */
65 + CompletableFuture<Versioned<V>> put(String tableName, K key, V value);
66 +
67 + /**
68 + * Removes a value from the table.
69 + *
70 + * @param tableName table name
71 + * @param key The key to remove.
72 + * @return A completable future to be completed with the result once complete.
73 + */
74 + CompletableFuture<Versioned<V>> remove(String tableName, K key);
75 +
76 + /**
77 + * Clears the table.
78 + *
79 + * @param tableName table name
80 + * @return A completable future to be completed with the result once complete.
81 + */
82 + CompletableFuture<Void> clear(String tableName);
83 +
84 + /**
85 + * Gets a set of keys in the table.
86 + *
87 + * @param tableName table name
88 + * @return A completable future to be completed with the result once complete.
89 + */
90 + CompletableFuture<Set<K>> keySet(String tableName);
91 +
92 + /**
93 + * Gets a collection of values in the table.
94 + *
95 + * @param tableName table name
96 + * @return A completable future to be completed with the result once complete.
97 + */
98 + CompletableFuture<Collection<Versioned<V>>> values(String tableName);
99 +
100 + /**
101 + * Gets a set of entries in the table.
102 + *
103 + * @param tableName table name
104 + * @return A completable future to be completed with the result once complete.
105 + */
106 + CompletableFuture<Set<Map.Entry<K, Versioned<V>>>> entrySet(String tableName);
107 +
108 + /**
109 + * Puts a value in the table if the given key does not exist.
110 + *
111 + * @param tableName table name
112 + * @param key The key to set.
113 + * @param value The value to set if the given key does not exist.
114 + * @return A completable future to be completed with the result once complete.
115 + */
116 + CompletableFuture<Versioned<V>> putIfAbsent(String tableName, K key, V value);
117 +
118 + /**
119 + * Removes a key and if the existing value for that key matches the specified value.
120 + *
121 + * @param tableName table name
122 + * @param key The key to remove.
123 + * @param value The value to remove.
124 + * @return A completable future to be completed with the result once complete.
125 + */
126 + CompletableFuture<Boolean> remove(String tableName, K key, V value);
127 +
128 + /**
129 + * Removes a key and if the existing version for that key matches the specified version.
130 + *
131 + * @param tableName table name
132 + * @param key The key to remove.
133 + * @param version The expected version.
134 + * @return A completable future to be completed with the result once complete.
135 + */
136 + CompletableFuture<Boolean> remove(String tableName, K key, long version);
137 +
138 + /**
139 + * Replaces the entry for the specified key only if currently mapped to the specified value.
140 + *
141 + * @param tableName table name
142 + * @param key The key to replace.
143 + * @param oldValue The value to replace.
144 + * @param newValue The value with which to replace the given key and value.
145 + * @return A completable future to be completed with the result once complete.
146 + */
147 + CompletableFuture<Boolean> replace(String tableName, K key, V oldValue, V newValue);
148 +
149 + /**
150 + * Replaces the entry for the specified key only if currently mapped to the specified version.
151 + *
152 + * @param tableName table name
153 + * @param key The key to update
154 + * @param oldVersion existing version in the map for this replace to succeed.
155 + * @param newValue The value with which to replace the given key and version.
156 + * @return A completable future to be completed with the result once complete.
157 + */
158 + CompletableFuture<Boolean> replace(String tableName, K key, long oldVersion, V newValue);
159 +
160 + /**
161 + * Perform a atomic batch update operation i.e. either all operations in batch succeed or
162 + * none do and no state changes are made.
163 + *
164 + * @param updates list of updates to apply atomically.
165 + * @return A completable future to be completed with the result once complete.
166 + */
167 + CompletableFuture<Boolean> atomicBatchUpdate(List<UpdateOperation<K, V>> updates);
168 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import java.util.Collection;
4 +import java.util.List;
5 +import java.util.Map.Entry;
6 +import java.util.Set;
7 +
8 +import net.kuujo.copycat.state.Command;
9 +import net.kuujo.copycat.state.Initializer;
10 +import net.kuujo.copycat.state.Query;
11 +import net.kuujo.copycat.state.StateContext;
12 +
13 +/**
14 + * Database state.
15 + *
16 + */
17 +public interface DatabaseState<K, V> {
18 +
19 + /**
20 + * Initializes the database state.
21 + *
22 + * @param context The map state context.
23 + */
24 + @Initializer
25 + public void init(StateContext<DatabaseState<K, V>> context);
26 +
27 + @Query
28 + int size(String tableName);
29 +
30 + @Query
31 + boolean isEmpty(String tableName);
32 +
33 + @Query
34 + boolean containsKey(String tableName, K key);
35 +
36 + @Query
37 + boolean containsValue(String tableName, V value);
38 +
39 + @Query
40 + Versioned<V> get(String tableName, K key);
41 +
42 + @Command
43 + Versioned<V> put(String tableName, K key, V value);
44 +
45 + @Command
46 + Versioned<V> remove(String tableName, K key);
47 +
48 + @Command
49 + void clear(String tableName);
50 +
51 + @Query
52 + Set<K> keySet(String tableName);
53 +
54 + @Query
55 + Collection<Versioned<V>> values(String tableName);
56 +
57 + @Query
58 + Set<Entry<K, Versioned<V>>> entrySet(String tableName);
59 +
60 + @Command
61 + Versioned<V> putIfAbsent(String tableName, K key, V value);
62 +
63 + @Command
64 + boolean remove(String tableName, K key, V value);
65 +
66 + @Command
67 + boolean remove(String tableName, K key, long version);
68 +
69 + @Command
70 + boolean replace(String tableName, K key, V oldValue, V newValue);
71 +
72 + @Command
73 + boolean replace(String tableName, K key, long oldVersion, V newValue);
74 +
75 + @Command
76 + boolean batchUpdate(List<UpdateOperation<K, V>> updates);
77 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import net.kuujo.copycat.resource.internal.ResourceContext;
4 +import net.kuujo.copycat.state.StateMachine;
5 +import net.kuujo.copycat.resource.internal.AbstractResource;
6 +import net.kuujo.copycat.state.internal.DefaultStateMachine;
7 +import net.kuujo.copycat.util.concurrent.Futures;
8 +
9 +import java.util.Collection;
10 +import java.util.List;
11 +import java.util.Map;
12 +import java.util.Set;
13 +import java.util.concurrent.CompletableFuture;
14 +import java.util.function.Supplier;
15 +
16 +/**
17 + * Default database.
18 + */
19 +public class DefaultDatabase extends AbstractResource<Database> implements Database {
20 + private final StateMachine<DatabaseState<String, byte[]>> stateMachine;
21 + private DatabaseProxy<String, byte[]> proxy;
22 +
23 + @SuppressWarnings("unchecked")
24 + public DefaultDatabase(ResourceContext context) {
25 + super(context);
26 + this.stateMachine = new DefaultStateMachine(context, DatabaseState.class, DefaultDatabaseState.class);
27 + }
28 +
29 + /**
30 + * If the database is closed, returning a failed CompletableFuture. Otherwise, calls the given supplier to
31 + * return the completed future result.
32 + *
33 + * @param supplier The supplier to call if the database is open.
34 + * @param <T> The future result type.
35 + * @return A completable future that if this database is closed is immediately failed.
36 + */
37 + protected <T> CompletableFuture<T> checkOpen(Supplier<CompletableFuture<T>> supplier) {
38 + if (proxy == null) {
39 + return Futures.exceptionalFuture(new IllegalStateException("Database closed"));
40 + }
41 + return supplier.get();
42 + }
43 +
44 + @Override
45 + public CompletableFuture<Integer> size(String tableName) {
46 + return checkOpen(() -> proxy.size(tableName));
47 + }
48 +
49 + @Override
50 + public CompletableFuture<Boolean> isEmpty(String tableName) {
51 + return checkOpen(() -> proxy.isEmpty(tableName));
52 + }
53 +
54 + @Override
55 + public CompletableFuture<Boolean> containsKey(String tableName, String key) {
56 + return checkOpen(() -> proxy.containsKey(tableName, key));
57 + }
58 +
59 + @Override
60 + public CompletableFuture<Boolean> containsValue(String tableName, byte[] value) {
61 + return checkOpen(() -> proxy.containsValue(tableName, value));
62 + }
63 +
64 + @Override
65 + public CompletableFuture<Versioned<byte[]>> get(String tableName, String key) {
66 + return checkOpen(() -> proxy.get(tableName, key));
67 + }
68 +
69 + @Override
70 + public CompletableFuture<Versioned<byte[]>> put(String tableName, String key, byte[] value) {
71 + return checkOpen(() -> proxy.put(tableName, key, value));
72 + }
73 +
74 + @Override
75 + public CompletableFuture<Versioned<byte[]>> remove(String tableName, String key) {
76 + return checkOpen(() -> proxy.remove(tableName, key));
77 + }
78 +
79 + @Override
80 + public CompletableFuture<Void> clear(String tableName) {
81 + return checkOpen(() -> proxy.clear(tableName));
82 + }
83 +
84 + @Override
85 + public CompletableFuture<Set<String>> keySet(String tableName) {
86 + return checkOpen(() -> proxy.keySet(tableName));
87 + }
88 +
89 + @Override
90 + public CompletableFuture<Collection<Versioned<byte[]>>> values(String tableName) {
91 + return checkOpen(() -> proxy.values(tableName));
92 + }
93 +
94 + @Override
95 + public CompletableFuture<Set<Map.Entry<String, Versioned<byte[]>>>> entrySet(String tableName) {
96 + return checkOpen(() -> proxy.entrySet(tableName));
97 + }
98 +
99 + @Override
100 + public CompletableFuture<Versioned<byte[]>> putIfAbsent(String tableName, String key, byte[] value) {
101 + return checkOpen(() -> proxy.putIfAbsent(tableName, key, value));
102 + }
103 +
104 + @Override
105 + public CompletableFuture<Boolean> remove(String tableName, String key, byte[] value) {
106 + return checkOpen(() -> proxy.remove(tableName, key, value));
107 + }
108 +
109 + @Override
110 + public CompletableFuture<Boolean> remove(String tableName, String key, long version) {
111 + return checkOpen(() -> proxy.remove(tableName, key, version));
112 + }
113 +
114 + @Override
115 + public CompletableFuture<Boolean> replace(String tableName, String key, byte[] oldValue, byte[] newValue) {
116 + return checkOpen(() -> proxy.replace(tableName, key, oldValue, newValue));
117 + }
118 +
119 + @Override
120 + public CompletableFuture<Boolean> replace(String tableName, String key, long oldVersion, byte[] newValue) {
121 + return checkOpen(() -> proxy.replace(tableName, key, oldVersion, newValue));
122 + }
123 +
124 + @Override
125 + public CompletableFuture<Boolean> atomicBatchUpdate(List<UpdateOperation<String, byte[]>> updates) {
126 + return checkOpen(() -> proxy.atomicBatchUpdate(updates));
127 + }
128 +
129 + @Override
130 + @SuppressWarnings("unchecked")
131 + public synchronized CompletableFuture<Database> open() {
132 + return runStartupTasks()
133 + .thenCompose(v -> stateMachine.open())
134 + .thenRun(() -> {
135 + this.proxy = stateMachine.createProxy(DatabaseProxy.class);
136 + })
137 + .thenApply(v -> null);
138 + }
139 +
140 + @Override
141 + public synchronized CompletableFuture<Void> close() {
142 + proxy = null;
143 + return stateMachine.close()
144 + .thenCompose(v -> runShutdownTasks());
145 + }
146 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import java.util.Arrays;
4 +import java.util.Collection;
5 +import java.util.HashMap;
6 +import java.util.List;
7 +import java.util.Map;
8 +import java.util.Map.Entry;
9 +import java.util.Set;
10 +
11 +import net.kuujo.copycat.state.Initializer;
12 +import net.kuujo.copycat.state.StateContext;
13 +
14 +/**
15 + * Default database state.
16 + *
17 + * @param <K> key type
18 + * @param <V> value type
19 + */
20 +public class DefaultDatabaseState<K, V> implements DatabaseState<K, V> {
21 +
22 + private Long nextVersion;
23 + private Map<String, Map<K, Versioned<V>>> tables;
24 +
25 + @Initializer
26 + @Override
27 + public void init(StateContext<DatabaseState<K, V>> context) {
28 + tables = context.get("tables");
29 + if (tables == null) {
30 + tables = new HashMap<>();
31 + context.put("tables", tables);
32 + }
33 + nextVersion = context.get("nextVersion");
34 + if (nextVersion == null) {
35 + nextVersion = new Long(0);
36 + context.put("nextVersion", nextVersion);
37 + }
38 + }
39 +
40 + private Map<K, Versioned<V>> getTableMap(String tableName) {
41 + Map<K, Versioned<V>> table = tables.get(tableName);
42 + if (table == null) {
43 + table = new HashMap<>();
44 + tables.put(tableName, table);
45 + }
46 + return table;
47 + }
48 +
49 + @Override
50 + public int size(String tableName) {
51 + return getTableMap(tableName).size();
52 + }
53 +
54 + @Override
55 + public boolean isEmpty(String tableName) {
56 + return getTableMap(tableName).isEmpty();
57 + }
58 +
59 + @Override
60 + public boolean containsKey(String tableName, K key) {
61 + return getTableMap(tableName).containsKey(key);
62 + }
63 +
64 + @Override
65 + public boolean containsValue(String tableName, V value) {
66 + return getTableMap(tableName).values().stream().anyMatch(v -> checkEquality(v.value(), value));
67 + }
68 +
69 + @Override
70 + public Versioned<V> get(String tableName, K key) {
71 + return getTableMap(tableName).get(key);
72 + }
73 +
74 + @Override
75 + public Versioned<V> put(String tableName, K key, V value) {
76 + return getTableMap(tableName).put(key, new Versioned<>(value, ++nextVersion));
77 + }
78 +
79 + @Override
80 + public Versioned<V> remove(String tableName, K key) {
81 + return getTableMap(tableName).remove(key);
82 + }
83 +
84 + @Override
85 + public void clear(String tableName) {
86 + getTableMap(tableName).clear();
87 + }
88 +
89 + @Override
90 + public Set<K> keySet(String tableName) {
91 + return getTableMap(tableName).keySet();
92 + }
93 +
94 + @Override
95 + public Collection<Versioned<V>> values(String tableName) {
96 + return getTableMap(tableName).values();
97 + }
98 +
99 + @Override
100 + public Set<Entry<K, Versioned<V>>> entrySet(String tableName) {
101 + return getTableMap(tableName).entrySet();
102 + }
103 +
104 + @Override
105 + public Versioned<V> putIfAbsent(String tableName, K key, V value) {
106 + Versioned<V> existingValue = getTableMap(tableName).get(key);
107 + return existingValue != null ? existingValue : put(tableName, key, value);
108 + }
109 +
110 + @Override
111 + public boolean remove(String tableName, K key, V value) {
112 + Versioned<V> existing = getTableMap(tableName).get(key);
113 + if (existing != null && existing.value().equals(value)) {
114 + getTableMap(tableName).remove(key);
115 + return true;
116 + }
117 + return false;
118 + }
119 +
120 + @Override
121 + public boolean remove(String tableName, K key, long version) {
122 + Versioned<V> existing = getTableMap(tableName).get(key);
123 + if (existing != null && existing.version() == version) {
124 + remove(tableName, key);
125 + return true;
126 + }
127 + return false;
128 + }
129 +
130 + @Override
131 + public boolean replace(String tableName, K key, V oldValue, V newValue) {
132 + Versioned<V> existing = getTableMap(tableName).get(key);
133 + if (existing != null && existing.value().equals(oldValue)) {
134 + put(tableName, key, newValue);
135 + return true;
136 + }
137 + return false;
138 + }
139 +
140 + @Override
141 + public boolean replace(String tableName, K key, long oldVersion, V newValue) {
142 + Versioned<V> existing = getTableMap(tableName).get(key);
143 + if (existing != null && existing.version() == oldVersion) {
144 + put(tableName, key, newValue);
145 + return true;
146 + }
147 + return false;
148 + }
149 +
150 + @Override
151 + public boolean batchUpdate(List<UpdateOperation<K, V>> updates) {
152 + if (updates.stream().anyMatch(update -> !checkIfUpdateIsPossible(update))) {
153 + return false;
154 + } else {
155 + updates.stream().forEach(this::doUpdate);
156 + return true;
157 + }
158 + }
159 +
160 + private void doUpdate(UpdateOperation<K, V> update) {
161 + String tableName = update.tableName();
162 + K key = update.key();
163 + switch (update.type()) {
164 + case PUT:
165 + put(tableName, key, update.value());
166 + return;
167 + case REMOVE:
168 + remove(tableName, key);
169 + return;
170 + case PUT_IF_ABSENT:
171 + putIfAbsent(tableName, key, update.value());
172 + return;
173 + case PUT_IF_VERSION_MATCH:
174 + replace(tableName, key, update.currentValue(), update.value());
175 + return;
176 + case PUT_IF_VALUE_MATCH:
177 + replace(tableName, key, update.currentVersion(), update.value());
178 + return;
179 + case REMOVE_IF_VERSION_MATCH:
180 + remove(tableName, key, update.currentVersion());
181 + return;
182 + case REMOVE_IF_VALUE_MATCH:
183 + remove(tableName, key, update.currentValue());
184 + return;
185 + default:
186 + throw new IllegalStateException("Unsupported type: " + update.type());
187 + }
188 + }
189 +
190 + private boolean checkIfUpdateIsPossible(UpdateOperation<K, V> update) {
191 + Versioned<V> existingEntry = get(update.tableName(), update.key());
192 + switch (update.type()) {
193 + case PUT:
194 + case REMOVE:
195 + return true;
196 + case PUT_IF_ABSENT:
197 + return existingEntry == null;
198 + case PUT_IF_VERSION_MATCH:
199 + return existingEntry != null && existingEntry.version() == update.currentVersion();
200 + case PUT_IF_VALUE_MATCH:
201 + return existingEntry != null && existingEntry.value().equals(update.currentValue());
202 + case REMOVE_IF_VERSION_MATCH:
203 + return existingEntry == null || existingEntry.version() == update.currentVersion();
204 + case REMOVE_IF_VALUE_MATCH:
205 + return existingEntry == null || existingEntry.value().equals(update.currentValue());
206 + default:
207 + throw new IllegalStateException("Unsupported type: " + update.type());
208 + }
209 + }
210 +
211 + private boolean checkEquality(V value1, V value2) {
212 + if (value1 instanceof byte[]) {
213 + return Arrays.equals((byte[]) value1, (byte[]) value2);
214 + }
215 + return value1.equals(value2);
216 + }
217 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import java.util.Collection;
4 +import java.util.List;
5 +import java.util.Map;
6 +import java.util.Map.Entry;
7 +import java.util.Set;
8 +import java.util.concurrent.CompletableFuture;
9 +import java.util.concurrent.CopyOnWriteArrayList;
10 +import java.util.concurrent.atomic.AtomicBoolean;
11 +import java.util.concurrent.atomic.AtomicInteger;
12 +import java.util.stream.Collectors;
13 +
14 +import com.google.common.collect.ImmutableMap;
15 +import com.google.common.collect.Lists;
16 +import com.google.common.collect.Maps;
17 +import com.google.common.collect.Sets;
18 +
19 +import net.kuujo.copycat.cluster.internal.coordinator.ClusterCoordinator;
20 +
21 +/**
22 + * A database that partitions the keys across one or more database partitions.
23 + */
24 +public class PartitionedDatabase implements DatabaseProxy<String, byte[]>, PartitionedDatabaseManager {
25 +
26 + private Partitioner<String> partitioner;
27 + private final ClusterCoordinator coordinator;
28 + private final Map<String, Database> partitions = Maps.newConcurrentMap();
29 +
30 + protected PartitionedDatabase(ClusterCoordinator coordinator) {
31 + this.coordinator = coordinator;
32 + }
33 +
34 + @Override
35 + public void registerPartition(String name, Database partition) {
36 + partitions.put(name, partition);
37 + }
38 +
39 + @Override
40 + public Map<String, Database> getRegisteredPartitions() {
41 + return ImmutableMap.copyOf(partitions);
42 + }
43 +
44 + @Override
45 + public CompletableFuture<Integer> size(String tableName) {
46 + AtomicInteger totalSize = new AtomicInteger(0);
47 + return CompletableFuture.allOf(partitions
48 + .values()
49 + .stream()
50 + .map(p -> p.size(tableName).thenApply(totalSize::addAndGet))
51 + .toArray(CompletableFuture[]::new))
52 + .thenApply(v -> totalSize.get());
53 + }
54 +
55 + @Override
56 + public CompletableFuture<Boolean> isEmpty(String tableName) {
57 + return size(tableName).thenApply(size -> size == 0);
58 + }
59 +
60 + @Override
61 + public CompletableFuture<Boolean> containsKey(String tableName, String key) {
62 + return partitioner.getPartition(tableName, key).containsKey(tableName, key);
63 + }
64 +
65 + @Override
66 + public CompletableFuture<Boolean> containsValue(String tableName, byte[] value) {
67 + AtomicBoolean containsValue = new AtomicBoolean(false);
68 + return CompletableFuture.allOf(partitions
69 + .values()
70 + .stream()
71 + .map(p -> p.containsValue(tableName, value).thenApply(v -> containsValue.compareAndSet(false, v)))
72 + .toArray(CompletableFuture[]::new))
73 + .thenApply(v -> containsValue.get());
74 + }
75 +
76 + @Override
77 + public CompletableFuture<Versioned<byte[]>> get(String tableName, String key) {
78 + return partitioner.getPartition(tableName, key).get(tableName, key);
79 + }
80 +
81 + @Override
82 + public CompletableFuture<Versioned<byte[]>> put(String tableName, String key, byte[] value) {
83 + return partitioner.getPartition(tableName, key).put(tableName, key, value);
84 + }
85 +
86 + @Override
87 + public CompletableFuture<Versioned<byte[]>> remove(String tableName, String key) {
88 + return partitioner.getPartition(tableName, key).remove(tableName, key);
89 + }
90 +
91 + @Override
92 + public CompletableFuture<Void> clear(String tableName) {
93 + return CompletableFuture.allOf(partitions
94 + .values()
95 + .stream()
96 + .map(p -> p.clear(tableName))
97 + .toArray(CompletableFuture[]::new));
98 + }
99 +
100 + @Override
101 + public CompletableFuture<Set<String>> keySet(String tableName) {
102 + Set<String> keySet = Sets.newConcurrentHashSet();
103 + return CompletableFuture.allOf(partitions
104 + .values()
105 + .stream()
106 + .map(p -> p.keySet(tableName).thenApply(keySet::addAll))
107 + .toArray(CompletableFuture[]::new))
108 + .thenApply(v -> keySet);
109 + }
110 +
111 + @Override
112 + public CompletableFuture<Collection<Versioned<byte[]>>> values(String tableName) {
113 + List<Versioned<byte[]>> values = new CopyOnWriteArrayList<>();
114 + return CompletableFuture.allOf(partitions
115 + .values()
116 + .stream()
117 + .map(p -> p.values(tableName))
118 + .toArray(CompletableFuture[]::new))
119 + .thenApply(v -> values);
120 + }
121 +
122 + @Override
123 + public CompletableFuture<Set<Entry<String, Versioned<byte[]>>>> entrySet(String tableName) {
124 + Set<Entry<String, Versioned<byte[]>>> entrySet = Sets.newConcurrentHashSet();
125 + return CompletableFuture.allOf(partitions
126 + .values()
127 + .stream()
128 + .map(p -> p.entrySet(tableName).thenApply(entrySet::addAll))
129 + .toArray(CompletableFuture[]::new))
130 + .thenApply(v -> entrySet);
131 + }
132 +
133 + @Override
134 + public CompletableFuture<Versioned<byte[]>> putIfAbsent(String tableName, String key, byte[] value) {
135 + return partitioner.getPartition(tableName, key).putIfAbsent(tableName, key, value);
136 + }
137 +
138 + @Override
139 + public CompletableFuture<Boolean> remove(String tableName, String key, byte[] value) {
140 + return partitioner.getPartition(tableName, key).remove(tableName, key, value);
141 + }
142 +
143 + @Override
144 + public CompletableFuture<Boolean> remove(String tableName, String key, long version) {
145 + return partitioner.getPartition(tableName, key).remove(tableName, key, version);
146 + }
147 +
148 + @Override
149 + public CompletableFuture<Boolean> replace(String tableName, String key, byte[] oldValue, byte[] newValue) {
150 + return partitioner.getPartition(tableName, key).replace(tableName, key, oldValue, newValue);
151 + }
152 +
153 + @Override
154 + public CompletableFuture<Boolean> replace(String tableName, String key, long oldVersion, byte[] newValue) {
155 + return partitioner.getPartition(tableName, key).replace(tableName, key, oldVersion, newValue);
156 + }
157 +
158 + @Override
159 + public CompletableFuture<Boolean> atomicBatchUpdate(List<UpdateOperation<String, byte[]>> updates) {
160 + Map<Database, List<UpdateOperation<String, byte[]>>> perPartitionUpdates = Maps.newHashMap();
161 + for (UpdateOperation<String, byte[]> update : updates) {
162 + Database partition = partitioner.getPartition(update.tableName(), update.key());
163 + List<UpdateOperation<String, byte[]>> partitionUpdates = perPartitionUpdates.get(partition);
164 + if (partitionUpdates == null) {
165 + partitionUpdates = Lists.newArrayList();
166 + perPartitionUpdates.put(partition, partitionUpdates);
167 + }
168 + partitionUpdates.add(update);
169 + }
170 + if (perPartitionUpdates.size() > 1) {
171 + // TODO
172 + throw new UnsupportedOperationException("Cross partition transactional updates are not supported.");
173 + } else {
174 + Entry<Database, List<UpdateOperation<String, byte[]>>> only =
175 + perPartitionUpdates.entrySet().iterator().next();
176 + return only.getKey().atomicBatchUpdate(only.getValue());
177 + }
178 + }
179 +
180 + @Override
181 + public void setPartitioner(Partitioner<String> partitioner) {
182 + this.partitioner = partitioner;
183 + }
184 +
185 + @Override
186 + public CompletableFuture<PartitionedDatabase> open() {
187 + return coordinator.open().thenCompose(c -> CompletableFuture.allOf(partitions
188 + .values()
189 + .stream()
190 + .map(Database::open)
191 + .collect(Collectors.toList())
192 + .toArray(new CompletableFuture[partitions.size()]))
193 + .thenApply(v -> this));
194 +
195 + }
196 +
197 + @Override
198 + public CompletableFuture<Void> close() {
199 + CompletableFuture<Void> closePartitions = CompletableFuture.allOf(partitions
200 + .values()
201 + .stream()
202 + .map(database -> database.close())
203 + .collect(Collectors.toList())
204 + .toArray(new CompletableFuture[partitions.size()]));
205 + CompletableFuture<Void> closeCoordinator = coordinator.close();
206 + return closePartitions.thenCompose(v -> closeCoordinator);
207 + }
208 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import java.util.Collections;
4 +import java.util.HashMap;
5 +import java.util.Map;
6 +
7 +/**
8 + * Partitioned database configuration.
9 + */
10 +public class PartitionedDatabaseConfig {
11 + private final Map<String, DatabaseConfig> partitions = new HashMap<>();
12 +
13 + /**
14 + * Returns the configuration for all partitions.
15 + * @return partition map to configuartion mapping.
16 + */
17 + public Map<String, DatabaseConfig> partitions() {
18 + return Collections.unmodifiableMap(partitions);
19 + }
20 +
21 + /**
22 + * Adds the specified partition name and configuration.
23 + * @param name partition name.
24 + * @param config partition config
25 + * @return this instance
26 + */
27 + public PartitionedDatabaseConfig withPartition(String name, DatabaseConfig config) {
28 + partitions.put(name, config);
29 + return this;
30 + }
31 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import java.util.Map;
4 +import java.util.concurrent.CompletableFuture;
5 +import java.util.concurrent.Executors;
6 +
7 +import net.kuujo.copycat.CopycatConfig;
8 +import net.kuujo.copycat.cluster.ClusterConfig;
9 +import net.kuujo.copycat.cluster.internal.coordinator.ClusterCoordinator;
10 +import net.kuujo.copycat.cluster.internal.coordinator.DefaultClusterCoordinator;
11 +import net.kuujo.copycat.util.concurrent.NamedThreadFactory;
12 +
13 +public interface PartitionedDatabaseManager {
14 + /**
15 + * Opens the database.
16 + *
17 + * @return A completable future to be completed with the result once complete.
18 + */
19 + CompletableFuture<PartitionedDatabase> open();
20 +
21 + /**
22 + * Closes the database.
23 + *
24 + * @return A completable future to be completed with the result once complete.
25 + */
26 + CompletableFuture<Void> close();
27 +
28 + /**
29 + * Sets the partitioner to use for mapping keys to partitions.
30 + *
31 + * @param partitioner partitioner
32 + */
33 + void setPartitioner(Partitioner<String> partitioner);
34 +
35 + /**
36 + * Registers a new partition.
37 + *
38 + * @param partitionName partition name.
39 + * @param partition partition.
40 + */
41 + void registerPartition(String partitionName, Database partition);
42 +
43 + /**
44 + * Returns all the registered database partitions.
45 + *
46 + * @return mapping of all registered database partitions.
47 + */
48 + Map<String, Database> getRegisteredPartitions();
49 +
50 +
51 + /**
52 + * Creates a new partitioned database.
53 + *
54 + * @param name The database name.
55 + * @param clusterConfig The cluster configuration.
56 + * @param partitionedDatabaseConfig The database configuration.
57 +
58 + * @return The database.
59 + */
60 + public static PartitionedDatabase create(
61 + String name,
62 + ClusterConfig clusterConfig,
63 + PartitionedDatabaseConfig partitionedDatabaseConfig) {
64 + CopycatConfig copycatConfig = new CopycatConfig()
65 + .withName(name)
66 + .withClusterConfig(clusterConfig)
67 + .withDefaultExecutor(Executors.newSingleThreadExecutor(new NamedThreadFactory("copycat-coordinator-%d")));
68 + ClusterCoordinator coordinator = new DefaultClusterCoordinator(copycatConfig.resolve());
69 + PartitionedDatabase partitionedDatabase = new PartitionedDatabase(coordinator);
70 + partitionedDatabaseConfig.partitions().forEach((partitionName, partitionConfig) ->
71 + partitionedDatabase.registerPartition(partitionName ,
72 + coordinator.getResource(partitionName, partitionConfig.resolve(clusterConfig)
73 + .withDefaultSerializer(copycatConfig.getDefaultSerializer().copy())
74 + .withDefaultExecutor(copycatConfig.getDefaultExecutor()))));
75 + partitionedDatabase.setPartitioner(
76 + new SimpleKeyHashPartitioner<>(partitionedDatabase.getRegisteredPartitions()));
77 + return partitionedDatabase;
78 + }
79 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +/**
4 + * Partitioner is responsible for mapping keys to individual database partitions.
5 + *
6 + * @param <K> key type.
7 + */
8 +public interface Partitioner<K> {
9 +
10 + /**
11 + * Returns the database partition.
12 + * @param tableName table name
13 + * @param key key
14 + * @return Database partition
15 + */
16 + Database getPartition(String tableName, K key);
17 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import java.util.Collections;
4 +import java.util.List;
5 +import java.util.Map;
6 +
7 +import com.google.common.collect.ImmutableMap;
8 +import com.google.common.collect.Lists;
9 +
10 +/**
11 + * A simple Partitioner that uses the key hashCode to map
12 + * key to a partition.
13 + *
14 + * @param <K> key type.
15 + */
16 +public class SimpleKeyHashPartitioner<K> implements Partitioner<K> {
17 +
18 + private final Map<String, Database> partitionMap;
19 + private final List<String> sortedPartitionNames;
20 +
21 + public SimpleKeyHashPartitioner(Map<String, Database> partitionMap) {
22 + this.partitionMap = ImmutableMap.copyOf(partitionMap);
23 + sortedPartitionNames = Lists.newArrayList(this.partitionMap.keySet());
24 + Collections.sort(sortedPartitionNames);
25 + }
26 +
27 + @Override
28 + public Database getPartition(String tableName, K key) {
29 + return partitionMap.get(sortedPartitionNames.get(Math.abs(key.hashCode()) % partitionMap.size()));
30 + }
31 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import com.google.common.base.MoreObjects;
4 +
5 +/**
6 + * Database update operation.
7 + *
8 + * @param <K> key type.
9 + * @param <V> value type.
10 + */
11 +public class UpdateOperation<K, V> {
12 +
13 + /**
14 + * Type of database update operation.
15 + */
16 + public static enum Type {
17 + PUT,
18 + PUT_IF_ABSENT,
19 + PUT_IF_VERSION_MATCH,
20 + PUT_IF_VALUE_MATCH,
21 + REMOVE,
22 + REMOVE_IF_VERSION_MATCH,
23 + REMOVE_IF_VALUE_MATCH,
24 + }
25 +
26 + private Type type;
27 + private String tableName;
28 + private K key;
29 + private V value;
30 + private V currentValue;
31 + private long currentVersion;
32 +
33 + /**
34 + * Returns the type of update operation.
35 + * @return type of update.
36 + */
37 + public Type type() {
38 + return type;
39 + }
40 +
41 + /**
42 + * Returns the tableName being updated.
43 + * @return table name.
44 + */
45 + public String tableName() {
46 + return tableName;
47 + }
48 +
49 + /**
50 + * Returns the item key being updated.
51 + * @return item key
52 + */
53 + public K key() {
54 + return key;
55 + }
56 +
57 + /**
58 + * Returns the new value.
59 + * @return item's target value.
60 + */
61 + public V value() {
62 + return value;
63 + }
64 +
65 + /**
66 + * Returns the expected current value in the database value for the key.
67 + * @return current value in database.
68 + */
69 + public V currentValue() {
70 + return currentValue;
71 + }
72 +
73 + /**
74 + * Returns the expected current version in the database for the key.
75 + * @return expected version.
76 + */
77 + public long currentVersion() {
78 + return currentVersion;
79 + }
80 +
81 + @Override
82 + public String toString() {
83 + return MoreObjects.toStringHelper(this)
84 + .add("type", type)
85 + .add("tableName", tableName)
86 + .add("key", key)
87 + .add("value", value)
88 + .add("currentValue", currentValue)
89 + .add("currentVersion", currentVersion)
90 + .toString();
91 + }
92 +
93 + /**
94 + * UpdatOperation builder.
95 + *
96 + * @param <K> key type.
97 + * @param <V> value type.
98 + */
99 + public static final class Builder<K, V> {
100 +
101 + private UpdateOperation<K, V> operation = new UpdateOperation<>();
102 +
103 + /**
104 + * Creates a new builder instance.
105 + * @param <K> key type.
106 + * @param <V> value type.
107 + *
108 + * @return builder.
109 + */
110 + public static <K, V> Builder<K, V> builder() {
111 + return new Builder<>();
112 + }
113 +
114 + private Builder() {
115 + }
116 +
117 + public UpdateOperation<K, V> build() {
118 + return operation;
119 + }
120 +
121 + public Builder<K, V> withType(Type type) {
122 + operation.type = type;
123 + return this;
124 + }
125 +
126 + public Builder<K, V> withTableName(String tableName) {
127 + operation.tableName = tableName;
128 + return this;
129 + }
130 +
131 + public Builder<K, V> withKey(K key) {
132 + operation.key = key;
133 + return this;
134 + }
135 +
136 + public Builder<K, V> withCurrentValue(V value) {
137 + operation.currentValue = value;
138 + return this;
139 + }
140 +
141 + public Builder<K, V> withValue(V value) {
142 + operation.value = value;
143 + return this;
144 + }
145 +
146 + public Builder<K, V> withCurrentVersion(long version) {
147 + operation.currentVersion = version;
148 + return this;
149 + }
150 + }
151 +}
1 +package org.onosproject.store.consistent.impl;
2 +
3 +import com.google.common.base.MoreObjects;
4 +
5 +/**
6 + * Versioned value.
7 + *
8 + * @param <V> value type.
9 + */
10 +public class Versioned<V> {
11 +
12 + private final V value;
13 + private final long version;
14 +
15 + /**
16 + * Constructs a new versioned value.
17 + * @param value value
18 + * @param version version
19 + */
20 + public Versioned(V value, long version) {
21 + this.value = value;
22 + this.version = version;
23 + }
24 +
25 + /**
26 + * Returns the value.
27 + *
28 + * @return value.
29 + */
30 + public V value() {
31 + return value;
32 + }
33 +
34 + /**
35 + * Returns the version.
36 + *
37 + * @return version
38 + */
39 + public long version() {
40 + return version;
41 + }
42 +
43 + @Override
44 + public String toString() {
45 + return MoreObjects.toStringHelper(this)
46 + .add("value", value)
47 + .add("version", version)
48 + .toString();
49 + }
50 +}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import static org.slf4j.LoggerFactory.getLogger;
19 -
20 -import java.util.Vector;
21 -
22 -import net.kuujo.copycat.cluster.TcpClusterConfig;
23 -import net.kuujo.copycat.cluster.TcpMember;
24 -import net.kuujo.copycat.event.LeaderElectEvent;
25 -import net.kuujo.copycat.internal.log.ConfigurationEntry;
26 -import net.kuujo.copycat.internal.log.CopycatEntry;
27 -import net.kuujo.copycat.internal.log.OperationEntry;
28 -import net.kuujo.copycat.internal.log.SnapshotEntry;
29 -import net.kuujo.copycat.protocol.PingRequest;
30 -import net.kuujo.copycat.protocol.PingResponse;
31 -import net.kuujo.copycat.protocol.PollRequest;
32 -import net.kuujo.copycat.protocol.PollResponse;
33 -import net.kuujo.copycat.protocol.Response.Status;
34 -import net.kuujo.copycat.protocol.SubmitRequest;
35 -import net.kuujo.copycat.protocol.SubmitResponse;
36 -import net.kuujo.copycat.protocol.SyncRequest;
37 -import net.kuujo.copycat.protocol.SyncResponse;
38 -import net.kuujo.copycat.spi.protocol.Protocol;
39 -import net.kuujo.copycat.spi.protocol.ProtocolClient;
40 -import net.kuujo.copycat.spi.protocol.ProtocolServer;
41 -
42 -import org.apache.felix.scr.annotations.Activate;
43 -import org.apache.felix.scr.annotations.Component;
44 -import org.apache.felix.scr.annotations.Deactivate;
45 -import org.apache.felix.scr.annotations.Reference;
46 -import org.apache.felix.scr.annotations.ReferenceCardinality;
47 -import org.apache.felix.scr.annotations.Service;
48 -import org.onosproject.cluster.ClusterService;
49 -import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
50 -import org.onosproject.store.cluster.messaging.MessageSubject;
51 -import org.onosproject.store.serializers.KryoNamespaces;
52 -import org.onosproject.store.serializers.KryoSerializer;
53 -import org.onosproject.store.serializers.StoreSerializer;
54 -import org.onosproject.store.service.impl.DatabaseStateMachine.State;
55 -import org.onosproject.store.service.impl.DatabaseStateMachine.TableMetadata;
56 -import org.onlab.util.KryoNamespace;
57 -import org.slf4j.Logger;
58 -
59 -/**
60 - * ONOS Cluster messaging based Copycat protocol.
61 - */
62 -@Component(immediate = false)
63 -@Service
64 -public class ClusterMessagingProtocol
65 - implements DatabaseProtocolService, Protocol<TcpMember> {
66 -
67 - private final Logger log = getLogger(getClass());
68 -
69 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
70 - protected ClusterService clusterService;
71 -
72 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
73 - protected ClusterCommunicationService clusterCommunicator;
74 -
75 - public static final MessageSubject COPYCAT_PING =
76 - new MessageSubject("copycat-raft-consensus-ping");
77 - public static final MessageSubject COPYCAT_SYNC =
78 - new MessageSubject("copycat-raft-consensus-sync");
79 - public static final MessageSubject COPYCAT_POLL =
80 - new MessageSubject("copycat-raft-consensus-poll");
81 - public static final MessageSubject COPYCAT_SUBMIT =
82 - new MessageSubject("copycat-raft-consensus-submit");
83 -
84 - static final int AFTER_COPYCAT = KryoNamespaces.BEGIN_USER_CUSTOM_ID + 50;
85 -
86 - static final KryoNamespace COPYCAT = KryoNamespace.newBuilder()
87 - .register(KryoNamespaces.API)
88 - .nextId(KryoNamespaces.BEGIN_USER_CUSTOM_ID)
89 - .register(PingRequest.class)
90 - .register(PingResponse.class)
91 - .register(PollRequest.class)
92 - .register(PollResponse.class)
93 - .register(SyncRequest.class)
94 - .register(SyncResponse.class)
95 - .register(SubmitRequest.class)
96 - .register(SubmitResponse.class)
97 - .register(Status.class)
98 - .register(ConfigurationEntry.class)
99 - .register(SnapshotEntry.class)
100 - .register(CopycatEntry.class)
101 - .register(OperationEntry.class)
102 - .register(TcpClusterConfig.class)
103 - .register(TcpMember.class)
104 - .register(LeaderElectEvent.class)
105 - .register(Vector.class)
106 - .build();
107 -
108 - // serializer used for CopyCat Protocol
109 - public static final StoreSerializer DB_SERIALIZER = new KryoSerializer() {
110 - @Override
111 - protected void setupKryoPool() {
112 - serializerPool = KryoNamespace.newBuilder()
113 - .register(COPYCAT)
114 - .nextId(AFTER_COPYCAT)
115 - // for snapshot
116 - .register(State.class)
117 - .register(TableMetadata.class)
118 - // TODO: Move this out to API?
119 - .register(TableModificationEvent.class)
120 - .register(TableModificationEvent.Type.class)
121 - .build();
122 - }
123 - };
124 -
125 - @Activate
126 - public void activate() {
127 - log.info("Started");
128 - }
129 -
130 - @Deactivate
131 - public void deactivate() {
132 - log.info("Stopped");
133 - }
134 -
135 - @Override
136 - public ProtocolServer createServer(TcpMember member) {
137 - return new ClusterMessagingProtocolServer(clusterCommunicator);
138 - }
139 -
140 - @Override
141 - public ProtocolClient createClient(TcpMember member) {
142 - return new ClusterMessagingProtocolClient(clusterService,
143 - clusterCommunicator,
144 - clusterService.getLocalNode(),
145 - member);
146 - }
147 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import static com.google.common.base.Verify.verifyNotNull;
19 -import static org.onosproject.store.service.impl.ClusterMessagingProtocol.DB_SERIALIZER;
20 -import static org.onlab.util.Tools.namedThreads;
21 -import static org.slf4j.LoggerFactory.getLogger;
22 -import static java.util.concurrent.Executors.newCachedThreadPool;
23 -
24 -import java.io.IOException;
25 -import java.time.Duration;
26 -import java.util.concurrent.CompletableFuture;
27 -import java.util.concurrent.ExecutionException;
28 -import java.util.concurrent.ExecutorService;
29 -import java.util.concurrent.TimeUnit;
30 -import java.util.concurrent.TimeoutException;
31 -import java.util.concurrent.atomic.AtomicBoolean;
32 -
33 -import net.kuujo.copycat.cluster.TcpMember;
34 -import net.kuujo.copycat.protocol.PingRequest;
35 -import net.kuujo.copycat.protocol.PingResponse;
36 -import net.kuujo.copycat.protocol.PollRequest;
37 -import net.kuujo.copycat.protocol.PollResponse;
38 -import net.kuujo.copycat.protocol.SubmitRequest;
39 -import net.kuujo.copycat.protocol.SubmitResponse;
40 -import net.kuujo.copycat.protocol.SyncRequest;
41 -import net.kuujo.copycat.protocol.SyncResponse;
42 -import net.kuujo.copycat.spi.protocol.ProtocolClient;
43 -
44 -import org.onosproject.cluster.ClusterService;
45 -import org.onosproject.cluster.ControllerNode;
46 -import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
47 -import org.onosproject.store.cluster.messaging.ClusterMessage;
48 -import org.onosproject.store.cluster.messaging.MessageSubject;
49 -import org.slf4j.Logger;
50 -
51 -/**
52 - * ONOS Cluster messaging based Copycat protocol client.
53 - */
54 -public class ClusterMessagingProtocolClient implements ProtocolClient {
55 -
56 - private final Logger log = getLogger(getClass());
57 -
58 - public static final Duration RETRY_INTERVAL = Duration.ofMillis(2000);
59 -
60 - private final ClusterService clusterService;
61 - private final ClusterCommunicationService clusterCommunicator;
62 - private final ControllerNode localNode;
63 - private final TcpMember remoteMember;
64 -
65 - private ControllerNode remoteNode;
66 - private final AtomicBoolean connectionOK = new AtomicBoolean(true);
67 -
68 - private ExecutorService pool;
69 -
70 - public ClusterMessagingProtocolClient(
71 - ClusterService clusterService,
72 - ClusterCommunicationService clusterCommunicator,
73 - ControllerNode localNode,
74 - TcpMember remoteMember) {
75 -
76 - this.clusterService = clusterService;
77 - this.clusterCommunicator = clusterCommunicator;
78 - this.localNode = localNode;
79 - this.remoteMember = remoteMember;
80 - }
81 -
82 - @Override
83 - public CompletableFuture<PingResponse> ping(PingRequest request) {
84 - return requestReply(request);
85 - }
86 -
87 - @Override
88 - public CompletableFuture<SyncResponse> sync(SyncRequest request) {
89 - return requestReply(request);
90 - }
91 -
92 - @Override
93 - public CompletableFuture<PollResponse> poll(PollRequest request) {
94 - return requestReply(request);
95 - }
96 -
97 - @Override
98 - public CompletableFuture<SubmitResponse> submit(SubmitRequest request) {
99 - return requestReply(request);
100 - }
101 -
102 - @Override
103 - public synchronized CompletableFuture<Void> connect() {
104 - if (pool == null || pool.isShutdown()) {
105 - // TODO include remote name?
106 - pool = newCachedThreadPool(namedThreads("onos-copycat-netty-messaging-client-%d"));
107 - }
108 - return CompletableFuture.completedFuture(null);
109 - }
110 -
111 - @Override
112 - public synchronized CompletableFuture<Void> close() {
113 - if (pool != null) {
114 - pool.shutdownNow();
115 - pool = null;
116 - }
117 - return CompletableFuture.completedFuture(null);
118 - }
119 -
120 - private <I> MessageSubject messageType(I input) {
121 - Class<?> clazz = input.getClass();
122 - if (clazz.equals(PollRequest.class)) {
123 - return ClusterMessagingProtocol.COPYCAT_POLL;
124 - } else if (clazz.equals(SyncRequest.class)) {
125 - return ClusterMessagingProtocol.COPYCAT_SYNC;
126 - } else if (clazz.equals(SubmitRequest.class)) {
127 - return ClusterMessagingProtocol.COPYCAT_SUBMIT;
128 - } else if (clazz.equals(PingRequest.class)) {
129 - return ClusterMessagingProtocol.COPYCAT_PING;
130 - } else {
131 - throw new IllegalArgumentException("Unknown class " + clazz.getName());
132 - }
133 - }
134 -
135 - private <I, O> CompletableFuture<O> requestReply(I request) {
136 - CompletableFuture<O> future = new CompletableFuture<>();
137 - if (pool == null) {
138 - log.info("Attempted to use closed client, connecting now. {}", request);
139 - connect();
140 - }
141 - pool.submit(new RPCTask<I, O>(request, future));
142 - return future;
143 - }
144 -
145 - private ControllerNode getControllerNode(TcpMember remoteMember) {
146 - final String host = remoteMember.host();
147 - final int port = remoteMember.port();
148 - for (ControllerNode node : clusterService.getNodes()) {
149 - if (node.ip().toString().equals(host) && node.tcpPort() == port) {
150 - return node;
151 - }
152 - }
153 - return null;
154 - }
155 -
156 - private class RPCTask<I, O> implements Runnable {
157 -
158 - private final I request;
159 - private final ClusterMessage message;
160 - private final CompletableFuture<O> future;
161 -
162 - public RPCTask(I request, CompletableFuture<O> future) {
163 - this.request = request;
164 - this.message =
165 - new ClusterMessage(
166 - localNode.id(),
167 - messageType(request),
168 - verifyNotNull(DB_SERIALIZER.encode(request)));
169 - this.future = future;
170 - }
171 -
172 - @Override
173 - public void run() {
174 - try {
175 - if (remoteNode == null) {
176 - remoteNode = getControllerNode(remoteMember);
177 - if (remoteNode == null) {
178 - throw new IOException("Remote node is offline!");
179 - }
180 - }
181 - byte[] response = clusterCommunicator
182 - .sendAndReceive(message, remoteNode.id())
183 - .get(RETRY_INTERVAL.toMillis(), TimeUnit.MILLISECONDS);
184 - if (!connectionOK.getAndSet(true)) {
185 - log.info("Connectivity to {} restored", remoteNode);
186 - }
187 - future.complete(verifyNotNull(DB_SERIALIZER.decode(response)));
188 -
189 - } catch (IOException | TimeoutException e) {
190 - if (connectionOK.getAndSet(false)) {
191 - log.warn("Detected connectivity issues with {}. Reason: {}", remoteNode, e.getMessage());
192 - }
193 - log.debug("RPCTask for {} failed.", request, e);
194 - future.completeExceptionally(e);
195 - } catch (ExecutionException e) {
196 - log.warn("RPCTask execution for {} failed: {}", request, e.getMessage());
197 - log.debug("RPCTask execution for {} failed.", request, e);
198 - future.completeExceptionally(e);
199 - } catch (InterruptedException e) {
200 - log.warn("RPCTask for {} was interrupted: {}", request, e.getMessage());
201 - log.debug("RPCTask for {} was interrupted.", request, e);
202 - future.completeExceptionally(e);
203 - Thread.currentThread().interrupt();
204 - } catch (Exception e) {
205 - log.warn("RPCTask for {} terribly failed.", request, e);
206 - future.completeExceptionally(e);
207 - }
208 - }
209 - }
210 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import static java.util.concurrent.Executors.newCachedThreadPool;
19 -import static org.onlab.util.Tools.namedThreads;
20 -import static org.slf4j.LoggerFactory.getLogger;
21 -import static org.onosproject.store.service.impl.ClusterMessagingProtocol.*;
22 -import static org.onosproject.store.service.impl.ClusterMessagingProtocol.DB_SERIALIZER;
23 -
24 -import java.util.concurrent.CompletableFuture;
25 -import java.util.concurrent.ExecutorService;
26 -import java.util.function.BiConsumer;
27 -
28 -import net.kuujo.copycat.protocol.PingRequest;
29 -import net.kuujo.copycat.protocol.PollRequest;
30 -import net.kuujo.copycat.protocol.RequestHandler;
31 -import net.kuujo.copycat.protocol.SubmitRequest;
32 -import net.kuujo.copycat.protocol.SyncRequest;
33 -import net.kuujo.copycat.spi.protocol.ProtocolServer;
34 -
35 -import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
36 -import org.onosproject.store.cluster.messaging.ClusterMessage;
37 -import org.onosproject.store.cluster.messaging.ClusterMessageHandler;
38 -import org.slf4j.Logger;
39 -
40 -/**
41 - * ONOS Cluster messaging based Copycat protocol server.
42 - */
43 -public class ClusterMessagingProtocolServer implements ProtocolServer {
44 -
45 - private final Logger log = getLogger(getClass());
46 -
47 - private final ClusterCommunicationService clusterCommunicator;
48 -
49 - private volatile RequestHandler handler;
50 -
51 - private ExecutorService pool;
52 -
53 - public ClusterMessagingProtocolServer(ClusterCommunicationService clusterCommunicator) {
54 - this.clusterCommunicator = clusterCommunicator;
55 - }
56 -
57 - @Override
58 - public void requestHandler(RequestHandler handler) {
59 - this.handler = handler;
60 - }
61 -
62 - @Override
63 - public CompletableFuture<Void> listen() {
64 - if (pool == null || pool.isShutdown()) {
65 - pool = newCachedThreadPool(namedThreads("onos-copycat-netty-messaging-server-%d"));
66 - }
67 -
68 - clusterCommunicator.addSubscriber(COPYCAT_PING, new PingHandler());
69 - clusterCommunicator.addSubscriber(COPYCAT_SYNC, new SyncHandler());
70 - clusterCommunicator.addSubscriber(COPYCAT_POLL, new PollHandler());
71 - clusterCommunicator.addSubscriber(COPYCAT_SUBMIT, new SubmitHandler());
72 - return CompletableFuture.completedFuture(null);
73 - }
74 -
75 - @Override
76 - public CompletableFuture<Void> close() {
77 - clusterCommunicator.removeSubscriber(COPYCAT_PING);
78 - clusterCommunicator.removeSubscriber(COPYCAT_SYNC);
79 - clusterCommunicator.removeSubscriber(COPYCAT_POLL);
80 - clusterCommunicator.removeSubscriber(COPYCAT_SUBMIT);
81 - if (pool != null) {
82 - pool.shutdownNow();
83 - pool = null;
84 - }
85 - return CompletableFuture.completedFuture(null);
86 - }
87 -
88 - private final class PingHandler extends CopycatMessageHandler<PingRequest> {
89 -
90 - @Override
91 - public void raftHandle(PingRequest request, ClusterMessage message) {
92 - pool.submit(new Runnable() {
93 -
94 - @Override
95 - public void run() {
96 - currentHandler().ping(request)
97 - .whenComplete(new PostExecutionTask<>(message));
98 - }
99 - });
100 - }
101 - }
102 -
103 - private final class SyncHandler extends CopycatMessageHandler<SyncRequest> {
104 -
105 - @Override
106 - public void raftHandle(SyncRequest request, ClusterMessage message) {
107 - pool.submit(new Runnable() {
108 -
109 - @Override
110 - public void run() {
111 - currentHandler().sync(request)
112 - .whenComplete(new PostExecutionTask<>(message));
113 - }
114 - });
115 - }
116 - }
117 -
118 - private final class PollHandler extends CopycatMessageHandler<PollRequest> {
119 -
120 - @Override
121 - public void raftHandle(PollRequest request, ClusterMessage message) {
122 - pool.submit(new Runnable() {
123 -
124 - @Override
125 - public void run() {
126 - currentHandler().poll(request)
127 - .whenComplete(new PostExecutionTask<>(message));
128 - }
129 - });
130 - }
131 - }
132 -
133 - private final class SubmitHandler extends CopycatMessageHandler<SubmitRequest> {
134 -
135 - @Override
136 - public void raftHandle(SubmitRequest request, ClusterMessage message) {
137 - pool.submit(new Runnable() {
138 -
139 - @Override
140 - public void run() {
141 - currentHandler().submit(request)
142 - .whenComplete(new PostExecutionTask<>(message));
143 - }
144 - });
145 - }
146 - }
147 -
148 - private abstract class CopycatMessageHandler<T> implements ClusterMessageHandler {
149 -
150 - public abstract void raftHandle(T request, ClusterMessage message);
151 -
152 - @Override
153 - public void handle(ClusterMessage message) {
154 - T request = DB_SERIALIZER.decode(message.payload());
155 - raftHandle(request, message);
156 - }
157 -
158 - RequestHandler currentHandler() {
159 - RequestHandler currentHandler = handler;
160 - if (currentHandler == null) {
161 - // there is a slight window of time during state transition,
162 - // where handler becomes null
163 - long sleepMs = 1;
164 - for (int i = 0; i < 10; ++i) {
165 - currentHandler = handler;
166 - if (currentHandler != null) {
167 - break;
168 - }
169 - try {
170 - sleepMs <<= 1;
171 - Thread.sleep(sleepMs);
172 - } catch (InterruptedException e) {
173 - log.error("Interrupted", e);
174 - return handler;
175 - }
176 - }
177 - if (currentHandler == null) {
178 - log.error("There was no handler registered!");
179 - return handler;
180 - }
181 - }
182 - return currentHandler;
183 - }
184 -
185 - final class PostExecutionTask<R> implements BiConsumer<R, Throwable> {
186 -
187 - private final ClusterMessage message;
188 -
189 - public PostExecutionTask(ClusterMessage message) {
190 - this.message = message;
191 - }
192 -
193 - @Override
194 - public void accept(R response, Throwable error) {
195 - if (error != null) {
196 - log.error("Processing {} failed.", message.subject(), error);
197 - } else {
198 - try {
199 - log.trace("responding to {}", message.subject());
200 - message.respond(DB_SERIALIZER.encode(response));
201 - } catch (Exception e) {
202 - log.error("Failed responding with {}", response.getClass().getName(), e);
203 - }
204 - }
205 - }
206 - }
207 - }
208 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import static com.google.common.base.Preconditions.checkNotNull;
19 -import static org.slf4j.LoggerFactory.getLogger;
20 -
21 -import java.util.Arrays;
22 -import java.util.List;
23 -import java.util.Map;
24 -import java.util.Set;
25 -import java.util.UUID;
26 -import java.util.concurrent.CompletableFuture;
27 -import java.util.concurrent.ExecutionException;
28 -import java.util.concurrent.TimeUnit;
29 -import java.util.concurrent.TimeoutException;
30 -
31 -import net.kuujo.copycat.cluster.Member;
32 -import net.kuujo.copycat.cluster.TcpMember;
33 -import net.kuujo.copycat.event.LeaderElectEvent;
34 -import net.kuujo.copycat.protocol.Response.Status;
35 -import net.kuujo.copycat.protocol.SubmitRequest;
36 -import net.kuujo.copycat.protocol.SubmitResponse;
37 -import net.kuujo.copycat.spi.protocol.ProtocolClient;
38 -
39 -import org.onosproject.store.cluster.messaging.ClusterMessage;
40 -import org.onosproject.store.cluster.messaging.ClusterMessageHandler;
41 -import org.onosproject.store.service.BatchReadRequest;
42 -import org.onosproject.store.service.BatchWriteRequest;
43 -import org.onosproject.store.service.DatabaseException;
44 -import org.onosproject.store.service.ReadResult;
45 -import org.onosproject.store.service.VersionedValue;
46 -import org.onosproject.store.service.WriteResult;
47 -import org.slf4j.Logger;
48 -
49 -/**
50 - * Client for interacting with the Copycat Raft cluster.
51 - */
52 -public class DatabaseClient implements ClusterMessageHandler {
53 -
54 - private static final int RETRIES = 5;
55 -
56 - private static final int TIMEOUT_MS = 2000;
57 -
58 - private final Logger log = getLogger(getClass());
59 -
60 - private final DatabaseProtocolService protocol;
61 - private volatile ProtocolClient client = null;
62 - private volatile Member currentLeader = null;
63 - private volatile long currentLeaderTerm = 0;
64 -
65 - public DatabaseClient(DatabaseProtocolService protocol) {
66 - this.protocol = checkNotNull(protocol);
67 - }
68 -
69 - @Override
70 - public void handle(ClusterMessage message) {
71 - LeaderElectEvent event =
72 - ClusterMessagingProtocol.DB_SERIALIZER.decode(message.payload());
73 - TcpMember newLeader = event.leader();
74 - long newLeaderTerm = event.term();
75 - if (newLeader != null && !newLeader.equals(currentLeader) && newLeaderTerm > currentLeaderTerm) {
76 - log.info("New leader detected. Leader: {}, term: {}", newLeader, newLeaderTerm);
77 - ProtocolClient prevClient = client;
78 - ProtocolClient newClient = protocol.createClient(newLeader);
79 - newClient.connect();
80 - client = newClient;
81 - currentLeader = newLeader;
82 - currentLeaderTerm = newLeaderTerm;
83 -
84 - if (prevClient != null) {
85 - prevClient.close();
86 - }
87 - }
88 - }
89 -
90 - private String nextRequestId() {
91 - return UUID.randomUUID().toString();
92 - }
93 -
94 - public void waitForLeader() {
95 - if (currentLeader != null) {
96 - return;
97 - }
98 -
99 - log.info("No leader in cluster, waiting for election.");
100 -
101 - try {
102 - while (currentLeader == null) {
103 - Thread.sleep(200);
104 - }
105 - return;
106 - } catch (InterruptedException e) {
107 - log.error("Interrupted while waiting for Leader", e);
108 - Thread.currentThread().interrupt();
109 - }
110 - }
111 -
112 - private <T> T submit(String operationName, Object... args) {
113 - waitForLeader();
114 - if (currentLeader == null) {
115 - throw new DatabaseException("Raft cluster does not have a leader.");
116 - }
117 -
118 - SubmitRequest request =
119 - new SubmitRequest(nextRequestId(), operationName, Arrays.asList(args));
120 -
121 - CompletableFuture<SubmitResponse> submitResponse = client.submit(request);
122 -
123 - log.debug("Sent {} to {}", request, currentLeader);
124 -
125 - try {
126 - final SubmitResponse response = submitResponse.get(TIMEOUT_MS, TimeUnit.MILLISECONDS);
127 - if (response.status() != Status.OK) {
128 - throw new DatabaseException(response.error());
129 - }
130 - return (T) response.result();
131 - } catch (ExecutionException | InterruptedException e) {
132 - throw new DatabaseException(e);
133 - } catch (TimeoutException e) {
134 - throw new DatabaseException.Timeout(e);
135 - }
136 - }
137 -
138 - public boolean createTable(String tableName) {
139 - return submit("createTable", tableName);
140 - }
141 -
142 - public boolean createTable(String tableName, int ttlMillis) {
143 - return submit("createTable", tableName, ttlMillis);
144 - }
145 -
146 - public void dropTable(String tableName) {
147 - submit("dropTable", tableName);
148 - }
149 -
150 - public void dropAllTables() {
151 - submit("dropAllTables");
152 - }
153 -
154 - public Set<String> listTables() {
155 - return submit("listTables");
156 - }
157 -
158 - public List<ReadResult> batchRead(BatchReadRequest batchRequest) {
159 - return submit("read", batchRequest);
160 - }
161 -
162 - public List<WriteResult> batchWrite(BatchWriteRequest batchRequest) {
163 - return submit("write", batchRequest);
164 - }
165 -
166 - public Map<String, VersionedValue> getAll(String tableName) {
167 - return submit("getAll", tableName);
168 - }
169 -
170 - Member getCurrentLeader() {
171 - return currentLeader;
172 - }
173 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -
17 -package org.onosproject.store.service.impl;
18 -
19 -import com.google.common.base.MoreObjects;
20 -import net.jodah.expiringmap.ExpiringMap;
21 -import net.jodah.expiringmap.ExpiringMap.ExpirationListener;
22 -import net.jodah.expiringmap.ExpiringMap.ExpirationPolicy;
23 -import net.kuujo.copycat.cluster.Member;
24 -import net.kuujo.copycat.event.EventHandler;
25 -import net.kuujo.copycat.event.LeaderElectEvent;
26 -import org.onosproject.cluster.ControllerNode;
27 -import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
28 -import org.onosproject.store.cluster.messaging.ClusterMessage;
29 -import org.onosproject.store.service.DatabaseService;
30 -import org.onosproject.store.service.VersionedValue;
31 -import org.onosproject.store.service.impl.DatabaseStateMachine.State;
32 -import org.onosproject.store.service.impl.DatabaseStateMachine.TableMetadata;
33 -import org.slf4j.Logger;
34 -import org.slf4j.LoggerFactory;
35 -
36 -import java.util.HashMap;
37 -import java.util.Map;
38 -import java.util.Objects;
39 -import java.util.concurrent.ExecutorService;
40 -import java.util.concurrent.Executors;
41 -import java.util.concurrent.TimeUnit;
42 -import java.util.concurrent.atomic.AtomicBoolean;
43 -
44 -import static org.onlab.util.Tools.namedThreads;
45 -
46 -/**
47 - * Plugs into the database update stream and track the TTL of entries added to
48 - * the database. For tables with pre-configured finite TTL, this class has
49 - * mechanisms for expiring (deleting) old, expired entries from the database.
50 - */
51 -public class DatabaseEntryExpirationTracker implements
52 - DatabaseUpdateEventListener, EventHandler<LeaderElectEvent> {
53 -
54 - private static final ExecutorService THREAD_POOL =
55 - Executors.newCachedThreadPool(namedThreads("onos-db-stale-entry-expirer-%d"));
56 -
57 - private final Logger log = LoggerFactory.getLogger(getClass());
58 -
59 - private final DatabaseService databaseService;
60 - private final ClusterCommunicationService clusterCommunicator;
61 -
62 - private final Member localMember;
63 - private final ControllerNode localNode;
64 - private final AtomicBoolean isLocalMemberLeader = new AtomicBoolean(false);
65 -
66 - private final Map<String, Map<DatabaseRow, Long>> tableEntryExpirationMap = new HashMap<>();
67 -
68 - private final ExpirationListener<DatabaseRow, Long> expirationObserver = new ExpirationObserver();
69 -
70 - DatabaseEntryExpirationTracker(
71 - Member localMember,
72 - ControllerNode localNode,
73 - ClusterCommunicationService clusterCommunicator,
74 - DatabaseService databaseService) {
75 - this.localMember = localMember;
76 - this.localNode = localNode;
77 - this.clusterCommunicator = clusterCommunicator;
78 - this.databaseService = databaseService;
79 - }
80 -
81 - @Override
82 - public void tableModified(TableModificationEvent event) {
83 - log.debug("{}: Received {}", localNode.id(), event);
84 -
85 - if (!tableEntryExpirationMap.containsKey(event.tableName())) {
86 - return;
87 - }
88 -
89 - Map<DatabaseRow, Long> map = tableEntryExpirationMap.get(event.tableName());
90 - DatabaseRow row = new DatabaseRow(event.tableName(), event.key());
91 - Long eventVersion = event.value().version();
92 -
93 - switch (event.type()) {
94 - case ROW_DELETED:
95 - map.remove(row, eventVersion);
96 - if (isLocalMemberLeader.get()) {
97 - log.debug("Broadcasting {} to the entire cluster", event);
98 - clusterCommunicator.broadcastIncludeSelf(new ClusterMessage(
99 - localNode.id(), DatabaseStateMachine.DATABASE_UPDATE_EVENTS,
100 - ClusterMessagingProtocol.DB_SERIALIZER.encode(event)));
101 - }
102 - break;
103 - case ROW_ADDED:
104 - case ROW_UPDATED:
105 - // To account for potential reordering of notifications,
106 - // check to make sure we are replacing an old version with a new version
107 - Long currentVersion = map.get(row);
108 - if (currentVersion == null || currentVersion < eventVersion) {
109 - map.put(row, eventVersion);
110 - }
111 - break;
112 - default:
113 - break;
114 - }
115 - }
116 -
117 - @Override
118 - public void tableCreated(TableMetadata metadata) {
119 - log.debug("Received a table created event {}", metadata);
120 - if (metadata.expireOldEntries()) {
121 - tableEntryExpirationMap.put(metadata.tableName(), ExpiringMap.builder()
122 - .expiration(metadata.ttlMillis(), TimeUnit.MILLISECONDS)
123 - .expirationListener(expirationObserver)
124 - .expirationPolicy(ExpirationPolicy.CREATED).build());
125 - }
126 - }
127 -
128 - @Override
129 - public void tableDeleted(String tableName) {
130 - log.debug("Received a table deleted event for table ({})", tableName);
131 - tableEntryExpirationMap.remove(tableName);
132 - }
133 -
134 - private class ExpirationObserver implements
135 - ExpirationListener<DatabaseRow, Long> {
136 - @Override
137 - public void expired(DatabaseRow row, Long version) {
138 - THREAD_POOL.submit(new ExpirationTask(row, version));
139 - }
140 - }
141 -
142 - private class ExpirationTask implements Runnable {
143 -
144 - private final DatabaseRow row;
145 - private final Long version;
146 -
147 - public ExpirationTask(DatabaseRow row, Long version) {
148 - this.row = row;
149 - this.version = version;
150 - }
151 -
152 - @Override
153 - public void run() {
154 - log.trace("Received an expiration event for {}, version: {}", row, version);
155 - Map<DatabaseRow, Long> map = tableEntryExpirationMap.get(row.tableName);
156 - try {
157 - if (isLocalMemberLeader.get()) {
158 - if (!databaseService.removeIfVersionMatches(row.tableName,
159 - row.key, version)) {
160 - log.info("Entry in database was updated right before its expiration.");
161 - } else {
162 - log.debug("Successfully expired old entry with key ({}) from table ({})",
163 - row.key, row.tableName);
164 - }
165 - } else {
166 - // Only the current leader will expire keys from database.
167 - // Everyone else function as standby just in case they need to take over
168 - if (map != null) {
169 - map.putIfAbsent(row, version);
170 - }
171 - }
172 -
173 - } catch (Exception e) {
174 - log.warn("Failed to delete entry from the database after ttl "
175 - + "expiration. Operation will be retried.", e);
176 - map.putIfAbsent(row, version);
177 - }
178 - }
179 - }
180 -
181 - @Override
182 - public void handle(LeaderElectEvent event) {
183 - isLocalMemberLeader.set(localMember.equals(event.leader()));
184 - if (isLocalMemberLeader.get()) {
185 - log.info("{} is now the leader of Raft cluster", localNode.id());
186 - }
187 - }
188 -
189 - /**
190 - * Wrapper class for a database row identifier.
191 - */
192 - private class DatabaseRow {
193 -
194 - String tableName;
195 - String key;
196 -
197 - public DatabaseRow(String tableName, String key) {
198 - this.tableName = tableName;
199 - this.key = key;
200 - }
201 -
202 - @Override
203 - public String toString() {
204 - return MoreObjects.toStringHelper(getClass())
205 - .add("tableName", tableName)
206 - .add("key", key)
207 - .toString();
208 - }
209 -
210 - @Override
211 - public boolean equals(Object obj) {
212 - if (this == obj) {
213 - return true;
214 - }
215 - if (!(obj instanceof DatabaseRow)) {
216 - return false;
217 - }
218 - DatabaseRow that = (DatabaseRow) obj;
219 -
220 - return Objects.equals(this.tableName, that.tableName)
221 - && Objects.equals(this.key, that.key);
222 - }
223 -
224 - @Override
225 - public int hashCode() {
226 - return Objects.hash(tableName, key);
227 - }
228 - }
229 -
230 - @Override
231 - public void snapshotInstalled(State state) {
232 - if (!tableEntryExpirationMap.isEmpty()) {
233 - return;
234 - }
235 - log.debug("Received a snapshot installed notification");
236 - for (String tableName : state.getTableNames()) {
237 -
238 - TableMetadata metadata = state.getTableMetadata(tableName);
239 - if (!metadata.expireOldEntries()) {
240 - continue;
241 - }
242 -
243 - Map<DatabaseRow, Long> tableExpirationMap = ExpiringMap.builder()
244 - .expiration(metadata.ttlMillis(), TimeUnit.MILLISECONDS)
245 - .expirationListener(expirationObserver)
246 - .expirationPolicy(ExpirationPolicy.CREATED).build();
247 - for (Map.Entry<String, VersionedValue> entry : state.getTable(tableName).entrySet()) {
248 - tableExpirationMap.put(new DatabaseRow(tableName, entry.getKey()), entry.getValue().version());
249 - }
250 -
251 - tableEntryExpirationMap.put(tableName, tableExpirationMap);
252 - }
253 - }
254 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import com.google.common.collect.ImmutableList;
19 -import net.kuujo.copycat.Copycat;
20 -import net.kuujo.copycat.CopycatConfig;
21 -import net.kuujo.copycat.cluster.ClusterConfig;
22 -import net.kuujo.copycat.cluster.Member;
23 -import net.kuujo.copycat.cluster.TcpCluster;
24 -import net.kuujo.copycat.cluster.TcpClusterConfig;
25 -import net.kuujo.copycat.cluster.TcpMember;
26 -import net.kuujo.copycat.event.EventHandler;
27 -import net.kuujo.copycat.event.LeaderElectEvent;
28 -import net.kuujo.copycat.log.Log;
29 -import org.apache.felix.scr.annotations.Activate;
30 -import org.apache.felix.scr.annotations.Component;
31 -import org.apache.felix.scr.annotations.Deactivate;
32 -import org.apache.felix.scr.annotations.Reference;
33 -import org.apache.felix.scr.annotations.ReferenceCardinality;
34 -import org.apache.felix.scr.annotations.Service;
35 -import org.onlab.packet.IpAddress;
36 -import org.onosproject.cluster.ClusterEvent;
37 -import org.onosproject.cluster.ClusterEventListener;
38 -import org.onosproject.cluster.ClusterService;
39 -import org.onosproject.cluster.ControllerNode;
40 -import org.onosproject.cluster.DefaultControllerNode;
41 -import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
42 -import org.onosproject.store.cluster.messaging.ClusterMessage;
43 -import org.onosproject.store.cluster.messaging.MessageSubject;
44 -import org.onosproject.store.service.BatchReadRequest;
45 -import org.onosproject.store.service.BatchReadResult;
46 -import org.onosproject.store.service.BatchWriteRequest;
47 -import org.onosproject.store.service.BatchWriteResult;
48 -import org.onosproject.store.service.DatabaseAdminService;
49 -import org.onosproject.store.service.DatabaseException;
50 -import org.onosproject.store.service.DatabaseService;
51 -import org.onosproject.store.service.ReadResult;
52 -import org.onosproject.store.service.ReadStatus;
53 -import org.onosproject.store.service.VersionedValue;
54 -import org.onosproject.store.service.WriteResult;
55 -import org.onosproject.store.service.WriteStatus;
56 -import org.slf4j.Logger;
57 -
58 -import java.io.File;
59 -import java.io.IOException;
60 -import java.util.Collection;
61 -import java.util.Collections;
62 -import java.util.HashSet;
63 -import java.util.Map;
64 -import java.util.Optional;
65 -import java.util.Set;
66 -import java.util.concurrent.CountDownLatch;
67 -import java.util.concurrent.ExecutionException;
68 -import java.util.concurrent.ScheduledExecutorService;
69 -import java.util.concurrent.TimeUnit;
70 -
71 -import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor;
72 -import static org.onlab.util.Tools.namedThreads;
73 -import static org.slf4j.LoggerFactory.getLogger;
74 -
75 -/**
76 - * Strongly consistent and durable state management service based on
77 - * Copycat implementation of Raft consensus protocol.
78 - */
79 -@Component(immediate = false)
80 -@Service
81 -public class DatabaseManager implements DatabaseService, DatabaseAdminService {
82 -
83 - private static final int RETRY_MS = 500;
84 -
85 - private static final int ACTIVATE_MAX_RETRIES = 100;
86 -
87 - private final Logger log = getLogger(getClass());
88 -
89 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
90 - protected ClusterService clusterService;
91 -
92 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
93 - protected ClusterCommunicationService clusterCommunicator;
94 -
95 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
96 - protected DatabaseProtocolService copycatMessagingProtocol;
97 -
98 - public static final String LOG_FILE_PREFIX = "raft/onos-copy-cat-log_";
99 -
100 - // Current working dir seems to be /opt/onos/apache-karaf-3.0.2
101 - // TODO: Set the path to /opt/onos/config
102 - private static final String CONFIG_DIR = "../config";
103 -
104 - private static final String DEFAULT_MEMBER_FILE = "tablets.json";
105 -
106 - private static final String DEFAULT_TABLET = "default";
107 -
108 - // TODO: make this configurable
109 - // initial member configuration file path
110 - private String initialMemberConfig = DEFAULT_MEMBER_FILE;
111 -
112 - public static final MessageSubject RAFT_LEADER_ELECTION_EVENT =
113 - new MessageSubject("raft-leader-election-event");
114 -
115 - private Copycat copycat;
116 - private DatabaseClient client;
117 -
118 - // guarded by synchronized block
119 - private ClusterConfig<TcpMember> clusterConfig;
120 -
121 - private CountDownLatch clusterEventLatch;
122 - private ClusterEventListener clusterEventListener;
123 -
124 - private Map<String, Set<DefaultControllerNode>> tabletMembers;
125 -
126 - private boolean autoAddMember = false;
127 -
128 - private ScheduledExecutorService executor;
129 -
130 - private volatile LeaderElectEvent myLeaderEvent = null;
131 -
132 - // TODO make this configurable
133 - private int maxLogSizeBytes = 128 * (1024 * 1024);
134 -
135 - // TODO make this configurable
136 - private long electionTimeoutMs = 5000; // CopyCat default: 2000
137 -
138 - @Activate
139 - public void activate() throws InterruptedException, ExecutionException {
140 -
141 - // KARAF_DATA
142 - // http://karaf.apache.org/manual/latest/users-guide/start-stop.html
143 - final String dataDir = System.getProperty("karaf.data", "./data");
144 -
145 - // load tablet configuration
146 - File file = new File(CONFIG_DIR, initialMemberConfig);
147 - log.info("Loading config: {}", file.getAbsolutePath());
148 - TabletDefinitionStore tabletDef = new TabletDefinitionStore(file);
149 - try {
150 - tabletMembers = tabletDef.read();
151 - } catch (IOException e) {
152 - log.error("Failed to load tablet config {}", file);
153 - throw new IllegalStateException("Failed to load tablet config", e);
154 - }
155 -
156 - // load default tablet configuration and start copycat
157 - clusterConfig = new TcpClusterConfig();
158 - Set<DefaultControllerNode> defaultMembers = tabletMembers.get(DEFAULT_TABLET);
159 - if (defaultMembers == null || defaultMembers.isEmpty()) {
160 - log.error("No members found in [{}] tablet configuration.",
161 - DEFAULT_TABLET);
162 - throw new IllegalStateException("No member found in tablet configuration");
163 -
164 - }
165 -
166 - final ControllerNode localNode = clusterService.getLocalNode();
167 - for (ControllerNode member : defaultMembers) {
168 - final TcpMember tcpMember = new TcpMember(member.ip().toString(),
169 - member.tcpPort());
170 - if (localNode.equals(member)) {
171 - clusterConfig.setLocalMember(tcpMember);
172 - } else {
173 - clusterConfig.addRemoteMember(tcpMember);
174 - }
175 - }
176 -
177 - if (clusterConfig.getLocalMember() != null) {
178 -
179 - // Wait for a minimum viable Raft cluster to boot up.
180 - waitForClusterQuorum();
181 -
182 - final TcpCluster cluster;
183 - synchronized (clusterConfig) {
184 - // Create the cluster.
185 - cluster = new TcpCluster(clusterConfig);
186 - }
187 - log.info("Starting cluster: {}", cluster);
188 -
189 - DatabaseEntryExpirationTracker expirationTracker =
190 - new DatabaseEntryExpirationTracker(
191 - clusterConfig.getLocalMember(),
192 - clusterService.getLocalNode(),
193 - clusterCommunicator,
194 - this);
195 -
196 - DatabaseStateMachine stateMachine = new DatabaseStateMachine();
197 - stateMachine.addEventListener(expirationTracker);
198 - Log consensusLog = new MapDBLog(dataDir + "/" + LOG_FILE_PREFIX + localNode.id(),
199 - ClusterMessagingProtocol.DB_SERIALIZER);
200 -
201 - CopycatConfig ccConfig = new CopycatConfig();
202 - ccConfig.setMaxLogSize(maxLogSizeBytes);
203 - ccConfig.setElectionTimeout(electionTimeoutMs);
204 -
205 - copycat = new Copycat(stateMachine, consensusLog, cluster, copycatMessagingProtocol, ccConfig);
206 - copycat.event(LeaderElectEvent.class).registerHandler(new RaftLeaderElectionMonitor());
207 - copycat.event(LeaderElectEvent.class).registerHandler(expirationTracker);
208 - }
209 -
210 - client = new DatabaseClient(copycatMessagingProtocol);
211 - clusterCommunicator.addSubscriber(RAFT_LEADER_ELECTION_EVENT, client);
212 -
213 - // Starts copycat if this node is a participant
214 - // of the Raft cluster.
215 - if (copycat != null) {
216 - copycat.start().get();
217 -
218 - executor =
219 - newSingleThreadScheduledExecutor(namedThreads("onos-db-heartbeat-%d"));
220 - executor.scheduleWithFixedDelay(new LeaderAdvertiser(), 5, 2, TimeUnit.SECONDS);
221 -
222 - }
223 -
224 - client.waitForLeader();
225 -
226 - // Try and list the tables to verify database manager is
227 - // in a state where it can serve requests.
228 - tryTableListing();
229 -
230 - log.info("Started.");
231 - }
232 -
233 - @Deactivate
234 - public void deactivate() {
235 - if (executor != null) {
236 - executor.shutdownNow();
237 - }
238 - clusterService.removeListener(clusterEventListener);
239 - // TODO: ClusterCommunicationService must support more than one
240 - // handler per message subject.
241 - clusterCommunicator.removeSubscriber(RAFT_LEADER_ELECTION_EVENT);
242 - if (copycat != null) {
243 - copycat.stop();
244 - }
245 - log.info("Stopped.");
246 - }
247 -
248 - private void waitForClusterQuorum() {
249 - // note: from this point beyond, clusterConfig requires synchronization
250 - clusterEventLatch = new CountDownLatch(1);
251 - clusterEventListener = new InternalClusterEventListener();
252 - clusterService.addListener(clusterEventListener);
253 -
254 - final int raftClusterSize = clusterConfig.getMembers().size();
255 - final int raftClusterQuorumSize = (int) (Math.floor(raftClusterSize / 2)) + 1;
256 - if (clusterService.getNodes().size() < raftClusterQuorumSize) {
257 - // current cluster size smaller then expected
258 - try {
259 - final int waitTimeSec = 120;
260 - log.info("Waiting for a maximum of {}s for raft cluster quorum to boot up...", waitTimeSec);
261 - if (!clusterEventLatch.await(waitTimeSec, TimeUnit.SECONDS)) {
262 - log.info("Starting with {}/{} nodes cluster",
263 - clusterService.getNodes().size(),
264 - raftClusterSize);
265 - }
266 - } catch (InterruptedException e) {
267 - log.info("Interrupted waiting for raft quorum.", e);
268 - }
269 - }
270 - }
271 -
272 - private void tryTableListing() throws InterruptedException {
273 - int retries = 0;
274 - do {
275 - try {
276 - listTables();
277 - return;
278 - } catch (DatabaseException.Timeout e) {
279 - log.debug("Failed to listTables. Will retry...", e);
280 - } catch (DatabaseException e) {
281 - log.debug("Failed to listTables. Will retry later...", e);
282 - Thread.sleep(RETRY_MS);
283 - }
284 - if (retries == ACTIVATE_MAX_RETRIES) {
285 - log.error("Failed to listTables after multiple attempts. Giving up.");
286 - // Exiting hoping things will be fixed by the time
287 - // others start using the service
288 - return;
289 - }
290 - retries++;
291 - } while (true);
292 - }
293 -
294 - @Override
295 - public boolean createTable(String name) {
296 - return client.createTable(name);
297 - }
298 -
299 - @Override
300 - public boolean createTable(String name, int ttlMillis) {
301 - return client.createTable(name, ttlMillis);
302 - }
303 -
304 - @Override
305 - public void dropTable(String name) {
306 - client.dropTable(name);
307 - }
308 -
309 - @Override
310 - public void dropAllTables() {
311 - client.dropAllTables();
312 - }
313 -
314 - @Override
315 - public Set<String> listTables() {
316 - return client.listTables();
317 - }
318 -
319 - @Override
320 - public VersionedValue get(String tableName, String key) {
321 - BatchReadRequest batchRequest = new BatchReadRequest.Builder().get(tableName, key).build();
322 - ReadResult readResult = batchRead(batchRequest).getAsList().get(0);
323 - if (readResult.status().equals(ReadStatus.OK)) {
324 - return readResult.value();
325 - }
326 - throw new DatabaseException("get failed due to status: " + readResult.status());
327 - }
328 -
329 - @Override
330 - public Map<String, VersionedValue> getAll(String tableName) {
331 - return client.getAll(tableName);
332 - }
333 -
334 -
335 - @Override
336 - public BatchReadResult batchRead(BatchReadRequest batchRequest) {
337 - return new BatchReadResult(client.batchRead(batchRequest));
338 - }
339 -
340 - @Override
341 - public BatchWriteResult batchWrite(BatchWriteRequest batchRequest) {
342 - return new BatchWriteResult(client.batchWrite(batchRequest));
343 - }
344 -
345 - @Override
346 - public VersionedValue put(String tableName, String key, byte[] value) {
347 - BatchWriteRequest batchRequest = new BatchWriteRequest.Builder().put(tableName, key, value).build();
348 - WriteResult writeResult = batchWrite(batchRequest).getAsList().get(0);
349 - if (writeResult.status().equals(WriteStatus.OK)) {
350 - return writeResult.previousValue();
351 - }
352 - throw new DatabaseException("put failed due to status: " + writeResult.status());
353 - }
354 -
355 - @Override
356 - public boolean putIfAbsent(String tableName, String key, byte[] value) {
357 - BatchWriteRequest batchRequest = new BatchWriteRequest.Builder()
358 - .putIfAbsent(tableName, key, value).build();
359 - WriteResult writeResult = batchWrite(batchRequest).getAsList().get(0);
360 - if (writeResult.status().equals(WriteStatus.OK)) {
361 - return true;
362 - } else if (writeResult.status().equals(WriteStatus.PRECONDITION_VIOLATION)) {
363 - return false;
364 - }
365 - throw new DatabaseException("putIfAbsent failed due to status: "
366 - + writeResult.status());
367 - }
368 -
369 - @Override
370 - public boolean putIfVersionMatches(String tableName, String key,
371 - byte[] value, long version) {
372 - BatchWriteRequest batchRequest =
373 - new BatchWriteRequest.Builder()
374 - .putIfVersionMatches(tableName, key, value, version).build();
375 - WriteResult writeResult = batchWrite(batchRequest).getAsList().get(0);
376 - if (writeResult.status().equals(WriteStatus.OK)) {
377 - return true;
378 - } else if (writeResult.status().equals(WriteStatus.PRECONDITION_VIOLATION)) {
379 - return false;
380 - }
381 - throw new DatabaseException("putIfVersionMatches failed due to status: "
382 - + writeResult.status());
383 - }
384 -
385 - @Override
386 - public boolean putIfValueMatches(String tableName, String key,
387 - byte[] oldValue, byte[] newValue) {
388 - BatchWriteRequest batchRequest = new BatchWriteRequest.Builder()
389 - .putIfValueMatches(tableName, key, oldValue, newValue).build();
390 - WriteResult writeResult = batchWrite(batchRequest).getAsList().get(0);
391 - if (writeResult.status().equals(WriteStatus.OK)) {
392 - return true;
393 - } else if (writeResult.status().equals(WriteStatus.PRECONDITION_VIOLATION)) {
394 - return false;
395 - }
396 - throw new DatabaseException("putIfValueMatches failed due to status: "
397 - + writeResult.status());
398 - }
399 -
400 - @Override
401 - public VersionedValue remove(String tableName, String key) {
402 - BatchWriteRequest batchRequest = new BatchWriteRequest.Builder()
403 - .remove(tableName, key).build();
404 - WriteResult writeResult = batchWrite(batchRequest).getAsList().get(0);
405 - if (writeResult.status().equals(WriteStatus.OK)) {
406 - return writeResult.previousValue();
407 - }
408 - throw new DatabaseException("remove failed due to status: "
409 - + writeResult.status());
410 - }
411 -
412 - @Override
413 - public boolean removeIfVersionMatches(String tableName, String key,
414 - long version) {
415 - BatchWriteRequest batchRequest = new BatchWriteRequest.Builder()
416 - .removeIfVersionMatches(tableName, key, version).build();
417 - WriteResult writeResult = batchWrite(batchRequest).getAsList().get(0);
418 - if (writeResult.status().equals(WriteStatus.OK)) {
419 - return true;
420 - } else if (writeResult.status().equals(WriteStatus.PRECONDITION_VIOLATION)) {
421 - return false;
422 - }
423 - throw new DatabaseException("removeIfVersionMatches failed due to status: "
424 - + writeResult.status());
425 - }
426 -
427 - @Override
428 - public boolean removeIfValueMatches(String tableName, String key,
429 - byte[] value) {
430 - BatchWriteRequest batchRequest = new BatchWriteRequest.Builder()
431 - .removeIfValueMatches(tableName, key, value).build();
432 - WriteResult writeResult = batchWrite(batchRequest).getAsList().get(0);
433 - if (writeResult.status().equals(WriteStatus.OK)) {
434 - return true;
435 - } else if (writeResult.status().equals(WriteStatus.PRECONDITION_VIOLATION)) {
436 - return false;
437 - }
438 - throw new DatabaseException("removeIfValueMatches failed due to status: "
439 - + writeResult.status());
440 - }
441 -
442 - @Override
443 - public void addMember(final ControllerNode node) {
444 - final TcpMember tcpMember = new TcpMember(node.ip().toString(),
445 - node.tcpPort());
446 - log.info("{} was added to the cluster", tcpMember);
447 - synchronized (clusterConfig) {
448 - clusterConfig.addRemoteMember(tcpMember);
449 - }
450 - }
451 -
452 - @Override
453 - public Optional<ControllerNode> leader() {
454 - if (copycat != null) {
455 - if (copycat.isLeader()) {
456 - return Optional.of(clusterService.getLocalNode());
457 - }
458 - Member leader = copycat.cluster().remoteMember(copycat.leader());
459 - return Optional.ofNullable(getNodeIdFromMember(leader));
460 - }
461 - return Optional.ofNullable(getNodeIdFromMember(client.getCurrentLeader()));
462 - }
463 -
464 - private final class LeaderAdvertiser implements Runnable {
465 -
466 - @Override
467 - public void run() {
468 - try {
469 - LeaderElectEvent event = myLeaderEvent;
470 - if (event != null) {
471 - log.trace("Broadcasting RAFT_LEADER_ELECTION_EVENT: {}", event);
472 - // This node just became the leader.
473 - clusterCommunicator.broadcastIncludeSelf(
474 - new ClusterMessage(
475 - clusterService.getLocalNode().id(),
476 - RAFT_LEADER_ELECTION_EVENT,
477 - ClusterMessagingProtocol.DB_SERIALIZER.encode(event)));
478 - }
479 - } catch (Exception e) {
480 - log.debug("LeaderAdvertiser failed with exception", e);
481 - }
482 - }
483 -
484 - }
485 -
486 - private final class RaftLeaderElectionMonitor implements EventHandler<LeaderElectEvent> {
487 - @Override
488 - public void handle(LeaderElectEvent event) {
489 - log.debug("Received LeaderElectEvent: {}", event);
490 - if (clusterConfig.getLocalMember() != null && event.leader().equals(clusterConfig.getLocalMember())) {
491 - log.debug("Broadcasting RAFT_LEADER_ELECTION_EVENT");
492 - myLeaderEvent = event;
493 - // This node just became the leader.
494 - clusterCommunicator.broadcastIncludeSelf(
495 - new ClusterMessage(
496 - clusterService.getLocalNode().id(),
497 - RAFT_LEADER_ELECTION_EVENT,
498 - ClusterMessagingProtocol.DB_SERIALIZER.encode(event)));
499 - } else {
500 - if (myLeaderEvent != null) {
501 - log.debug("This node is no longer the Leader");
502 - }
503 - myLeaderEvent = null;
504 - }
505 - }
506 - }
507 -
508 - private final class InternalClusterEventListener
509 - implements ClusterEventListener {
510 -
511 - @Override
512 - public void event(ClusterEvent event) {
513 - // TODO: Not every node should be part of the consensus ring.
514 -
515 - final ControllerNode node = event.subject();
516 - final TcpMember tcpMember = new TcpMember(node.ip().toString(),
517 - node.tcpPort());
518 -
519 - switch (event.type()) {
520 - case INSTANCE_ACTIVATED:
521 - case INSTANCE_ADDED:
522 - if (autoAddMember) {
523 - synchronized (clusterConfig) {
524 - if (!clusterConfig.getMembers().contains(tcpMember)) {
525 - log.info("{} was automatically added to the cluster", tcpMember);
526 - clusterConfig.addRemoteMember(tcpMember);
527 - }
528 - }
529 - }
530 - break;
531 - case INSTANCE_DEACTIVATED:
532 - case INSTANCE_REMOVED:
533 - if (autoAddMember) {
534 - Set<DefaultControllerNode> members
535 - = tabletMembers.getOrDefault(DEFAULT_TABLET,
536 - Collections.emptySet());
537 - // remove only if not the initial members
538 - if (!members.contains(node)) {
539 - synchronized (clusterConfig) {
540 - if (clusterConfig.getMembers().contains(tcpMember)) {
541 - log.info("{} was automatically removed from the cluster", tcpMember);
542 - clusterConfig.removeRemoteMember(tcpMember);
543 - }
544 - }
545 - }
546 - }
547 - break;
548 - default:
549 - break;
550 - }
551 - if (copycat != null) {
552 - log.debug("Current cluster: {}", copycat.cluster());
553 - }
554 - clusterEventLatch.countDown();
555 - }
556 -
557 - }
558 -
559 - @Override
560 - public void removeMember(final ControllerNode node) {
561 - final TcpMember tcpMember = new TcpMember(node.ip().toString(),
562 - node.tcpPort());
563 - log.info("{} was removed from the cluster", tcpMember);
564 - synchronized (clusterConfig) {
565 - clusterConfig.removeRemoteMember(tcpMember);
566 - }
567 - }
568 -
569 - @Override
570 - public Collection<ControllerNode> listMembers() {
571 - if (copycat == null) {
572 - return ImmutableList.of();
573 - }
574 - Set<ControllerNode> members = new HashSet<>();
575 - for (Member member : copycat.cluster().members()) {
576 - ControllerNode node = getNodeIdFromMember(member);
577 - if (node == null) {
578 - log.info("No Node found for {}", member);
579 - continue;
580 - }
581 - members.add(node);
582 - }
583 - return members;
584 - }
585 -
586 - private ControllerNode getNodeIdFromMember(Member member) {
587 - if (member instanceof TcpMember) {
588 - final TcpMember tcpMember = (TcpMember) member;
589 - // TODO assuming tcpMember#host to be IP address,
590 - // but if not lookup DNS, etc. first
591 - IpAddress ip = IpAddress.valueOf(tcpMember.host());
592 - int tcpPort = tcpMember.port();
593 - for (ControllerNode node : clusterService.getNodes()) {
594 - if (node.ip().equals(ip) &&
595 - node.tcpPort() == tcpPort) {
596 - return node;
597 - }
598 - }
599 - }
600 - return null;
601 - }
602 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import net.kuujo.copycat.cluster.TcpMember;
19 -import net.kuujo.copycat.spi.protocol.Protocol;
20 -
21 -// interface required for connecting DatabaseManager + ClusterMessagingProtocol
22 -// TODO: Consider changing ClusterMessagingProtocol to non-Service class
23 -public interface DatabaseProtocolService extends Protocol<TcpMember> {
24 -
25 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import static org.onlab.util.Tools.namedThreads;
19 -import static org.slf4j.LoggerFactory.getLogger;
20 -import static org.onosproject.store.service.impl.ClusterMessagingProtocol.DB_SERIALIZER;
21 -
22 -import java.io.ByteArrayInputStream;
23 -import java.io.ByteArrayOutputStream;
24 -import java.util.ArrayList;
25 -import java.util.Arrays;
26 -import java.util.List;
27 -import java.util.Map;
28 -import java.util.Set;
29 -import java.util.concurrent.ExecutorService;
30 -import java.util.concurrent.Executors;
31 -import java.util.zip.DeflaterOutputStream;
32 -import java.util.zip.InflaterInputStream;
33 -
34 -import net.kuujo.copycat.Command;
35 -import net.kuujo.copycat.Query;
36 -import net.kuujo.copycat.StateMachine;
37 -
38 -import org.onosproject.store.cluster.messaging.MessageSubject;
39 -import org.onosproject.store.service.BatchReadRequest;
40 -import org.onosproject.store.service.BatchWriteRequest;
41 -import org.onosproject.store.service.ReadRequest;
42 -import org.onosproject.store.service.ReadResult;
43 -import org.onosproject.store.service.ReadStatus;
44 -import org.onosproject.store.service.VersionedValue;
45 -import org.onosproject.store.service.WriteRequest;
46 -import org.onosproject.store.service.WriteResult;
47 -import org.onosproject.store.service.WriteStatus;
48 -import org.slf4j.Logger;
49 -
50 -import com.google.common.base.MoreObjects;
51 -import com.google.common.collect.ImmutableMap;
52 -import com.google.common.collect.ImmutableSet;
53 -import com.google.common.collect.Lists;
54 -import com.google.common.collect.Maps;
55 -import com.google.common.collect.Sets;
56 -
57 -/**
58 - * StateMachine whose transitions are coordinated/replicated
59 - * by Raft consensus.
60 - * Each Raft cluster member has a instance of this state machine that is
61 - * independently updated in lock step once there is consensus
62 - * on the next transition.
63 - */
64 -public class DatabaseStateMachine implements StateMachine {
65 -
66 - private final Logger log = getLogger(getClass());
67 -
68 - private final ExecutorService updatesExecutor =
69 - Executors.newSingleThreadExecutor(namedThreads("onos-db-statemachine-updates"));
70 -
71 - // message subject for database update notifications.
72 - public static final MessageSubject DATABASE_UPDATE_EVENTS =
73 - new MessageSubject("database-update-events");
74 -
75 - private final Set<DatabaseUpdateEventListener> listeners = Sets.newIdentityHashSet();
76 -
77 - // durable internal state of the database.
78 - private State state = new State();
79 -
80 - // TODO make this configurable
81 - private boolean compressSnapshot = true;
82 -
83 - @Command
84 - public boolean createTable(String tableName) {
85 - TableMetadata metadata = new TableMetadata(tableName);
86 - return createTable(metadata);
87 - }
88 -
89 - @Command
90 - public boolean createTable(String tableName, Integer ttlMillis) {
91 - TableMetadata metadata = new TableMetadata(tableName, ttlMillis);
92 - return createTable(metadata);
93 - }
94 -
95 - private boolean createTable(TableMetadata metadata) {
96 - Map<String, VersionedValue> existingTable = state.getTable(metadata.tableName());
97 - if (existingTable != null) {
98 - return false;
99 - }
100 - state.createTable(metadata);
101 -
102 - updatesExecutor.submit(new Runnable() {
103 - @Override
104 - public void run() {
105 - for (DatabaseUpdateEventListener listener : listeners) {
106 - listener.tableCreated(metadata);
107 - }
108 - }
109 - });
110 -
111 - return true;
112 - }
113 -
114 - @Command
115 - public boolean dropTable(String tableName) {
116 - if (state.removeTable(tableName)) {
117 -
118 - updatesExecutor.submit(new Runnable() {
119 - @Override
120 - public void run() {
121 - for (DatabaseUpdateEventListener listener : listeners) {
122 - listener.tableDeleted(tableName);
123 - }
124 - }
125 - });
126 -
127 - return true;
128 - }
129 - return false;
130 - }
131 -
132 - @Command
133 - public boolean dropAllTables() {
134 - Set<String> tableNames = state.getTableNames();
135 - state.removeAllTables();
136 -
137 - updatesExecutor.submit(new Runnable() {
138 - @Override
139 - public void run() {
140 - for (DatabaseUpdateEventListener listener : listeners) {
141 - for (String tableName : tableNames) {
142 - listener.tableDeleted(tableName);
143 - }
144 - }
145 - }
146 - });
147 -
148 - return true;
149 - }
150 -
151 - @Query
152 - public Set<String> listTables() {
153 - return ImmutableSet.copyOf(state.getTableNames());
154 - }
155 -
156 - @Query
157 - public List<ReadResult> read(BatchReadRequest batchRequest) {
158 - List<ReadResult> results = new ArrayList<>(batchRequest.batchSize());
159 - for (ReadRequest request : batchRequest.getAsList()) {
160 - Map<String, VersionedValue> table = state.getTable(request.tableName());
161 - if (table == null) {
162 - results.add(new ReadResult(ReadStatus.NO_SUCH_TABLE, request.tableName(), request.key(), null));
163 - continue;
164 - }
165 - VersionedValue value = VersionedValue.copy(table.get(request.key()));
166 - results.add(new ReadResult(ReadStatus.OK, request.tableName(), request.key(), value));
167 - }
168 - return results;
169 - }
170 -
171 - @Query
172 - public Map<String, VersionedValue> getAll(String tableName) {
173 - return ImmutableMap.copyOf(state.getTable(tableName));
174 - }
175 -
176 -
177 - WriteStatus checkIfApplicable(WriteRequest request, VersionedValue value) {
178 -
179 - switch (request.type()) {
180 - case PUT:
181 - return WriteStatus.OK;
182 -
183 - case PUT_IF_ABSENT:
184 - if (value == null) {
185 - return WriteStatus.OK;
186 - }
187 - return WriteStatus.PRECONDITION_VIOLATION;
188 -
189 - case PUT_IF_VALUE:
190 - case REMOVE_IF_VALUE:
191 - if (value != null && Arrays.equals(value.value(), request.oldValue())) {
192 - return WriteStatus.OK;
193 - }
194 - return WriteStatus.PRECONDITION_VIOLATION;
195 -
196 - case PUT_IF_VERSION:
197 - case REMOVE_IF_VERSION:
198 - if (value != null && request.previousVersion() == value.version()) {
199 - return WriteStatus.OK;
200 - }
201 - return WriteStatus.PRECONDITION_VIOLATION;
202 -
203 - case REMOVE:
204 - return WriteStatus.OK;
205 -
206 - default:
207 - break;
208 - }
209 - log.error("Should never reach here {}", request);
210 - return WriteStatus.ABORTED;
211 - }
212 -
213 - @Command
214 - public List<WriteResult> write(BatchWriteRequest batchRequest) {
215 -
216 - // applicability check
217 - boolean abort = false;
218 - List<WriteResult> results = new ArrayList<>(batchRequest.batchSize());
219 -
220 - for (WriteRequest request : batchRequest.getAsList()) {
221 - Map<String, VersionedValue> table = state.getTable(request.tableName());
222 - if (table == null) {
223 - results.add(new WriteResult(WriteStatus.NO_SUCH_TABLE, null));
224 - abort = true;
225 - continue;
226 - }
227 - final VersionedValue value = table.get(request.key());
228 - WriteStatus result = checkIfApplicable(request, value);
229 - results.add(new WriteResult(result, value));
230 - if (result != WriteStatus.OK) {
231 - abort = true;
232 - }
233 - }
234 -
235 - if (abort) {
236 - for (int i = 0; i < results.size(); ++i) {
237 - if (results.get(i).status() == WriteStatus.OK) {
238 - results.set(i, new WriteResult(WriteStatus.ABORTED, null));
239 - }
240 - }
241 - return results;
242 - }
243 -
244 - List<TableModificationEvent> tableModificationEvents = Lists.newLinkedList();
245 -
246 - // apply changes
247 - for (WriteRequest request : batchRequest.getAsList()) {
248 - Map<String, VersionedValue> table = state.getTable(request.tableName());
249 -
250 - TableModificationEvent tableModificationEvent = null;
251 - // FIXME: If this method could be called by multiple thread,
252 - // synchronization scope is wrong.
253 - // Whole function including applicability check needs to be protected.
254 - // Confirm copycat's thread safety requirement for StateMachine
255 - // TODO: If we need isolation, we need to block reads also
256 - synchronized (table) {
257 - switch (request.type()) {
258 - case PUT:
259 - case PUT_IF_ABSENT:
260 - case PUT_IF_VALUE:
261 - case PUT_IF_VERSION:
262 - VersionedValue newValue = new VersionedValue(request.newValue(), state.nextVersion());
263 - VersionedValue previousValue = table.put(request.key(), newValue);
264 - WriteResult putResult = new WriteResult(WriteStatus.OK, previousValue);
265 - results.add(putResult);
266 - tableModificationEvent = (previousValue == null) ?
267 - TableModificationEvent.rowAdded(request.tableName(), request.key(), newValue) :
268 - TableModificationEvent.rowUpdated(request.tableName(), request.key(), newValue);
269 - break;
270 -
271 - case REMOVE:
272 - case REMOVE_IF_VALUE:
273 - case REMOVE_IF_VERSION:
274 - VersionedValue removedValue = table.remove(request.key());
275 - WriteResult removeResult = new WriteResult(WriteStatus.OK, removedValue);
276 - results.add(removeResult);
277 - if (removedValue != null) {
278 - tableModificationEvent =
279 - TableModificationEvent.rowDeleted(request.tableName(), request.key(), removedValue);
280 - }
281 - break;
282 -
283 - default:
284 - log.error("Invalid WriteRequest type {}", request.type());
285 - break;
286 - }
287 - }
288 -
289 - if (tableModificationEvent != null) {
290 - tableModificationEvents.add(tableModificationEvent);
291 - }
292 - }
293 -
294 - // notify listeners of table mod events.
295 -
296 - updatesExecutor.submit(new Runnable() {
297 - @Override
298 - public void run() {
299 - for (DatabaseUpdateEventListener listener : listeners) {
300 - for (TableModificationEvent tableModificationEvent : tableModificationEvents) {
301 - log.trace("Publishing table modification event: {}", tableModificationEvent);
302 - listener.tableModified(tableModificationEvent);
303 - }
304 - }
305 - }
306 - });
307 -
308 - return results;
309 - }
310 -
311 - public static class State {
312 -
313 - private final Map<String, TableMetadata> tableMetadata = Maps.newHashMap();
314 - private final Map<String, Map<String, VersionedValue>> tableData = Maps.newHashMap();
315 - private long versionCounter = 1;
316 -
317 - Map<String, VersionedValue> getTable(String tableName) {
318 - return tableData.get(tableName);
319 - }
320 -
321 - void createTable(TableMetadata metadata) {
322 - tableMetadata.put(metadata.tableName, metadata);
323 - tableData.put(metadata.tableName, Maps.newHashMap());
324 - }
325 -
326 - TableMetadata getTableMetadata(String tableName) {
327 - return tableMetadata.get(tableName);
328 - }
329 -
330 - long nextVersion() {
331 - return versionCounter++;
332 - }
333 -
334 - Set<String> getTableNames() {
335 - return ImmutableSet.copyOf(tableMetadata.keySet());
336 - }
337 -
338 -
339 - boolean removeTable(String tableName) {
340 - if (!tableMetadata.containsKey(tableName)) {
341 - return false;
342 - }
343 - tableMetadata.remove(tableName);
344 - tableData.remove(tableName);
345 - return true;
346 - }
347 -
348 - void removeAllTables() {
349 - tableMetadata.clear();
350 - tableData.clear();
351 - }
352 - }
353 -
354 - public static class TableMetadata {
355 - private final String tableName;
356 - private final boolean expireOldEntries;
357 - private final int ttlMillis;
358 -
359 - public TableMetadata(String tableName) {
360 - this.tableName = tableName;
361 - this.expireOldEntries = false;
362 - this.ttlMillis = Integer.MAX_VALUE;
363 -
364 - }
365 -
366 - public TableMetadata(String tableName, int ttlMillis) {
367 - this.tableName = tableName;
368 - this.expireOldEntries = true;
369 - this.ttlMillis = ttlMillis;
370 - }
371 -
372 - public String tableName() {
373 - return tableName;
374 - }
375 -
376 - public boolean expireOldEntries() {
377 - return expireOldEntries;
378 - }
379 -
380 - public int ttlMillis() {
381 - return ttlMillis;
382 - }
383 -
384 - @Override
385 - public String toString() {
386 - return MoreObjects.toStringHelper(getClass())
387 - .add("tableName", tableName)
388 - .add("expireOldEntries", expireOldEntries)
389 - .add("ttlMillis", ttlMillis)
390 - .toString();
391 - }
392 - }
393 -
394 - @Override
395 - public byte[] takeSnapshot() {
396 - try {
397 - if (compressSnapshot) {
398 - byte[] input = DB_SERIALIZER.encode(state);
399 - ByteArrayOutputStream comp = new ByteArrayOutputStream(input.length);
400 - DeflaterOutputStream compressor = new DeflaterOutputStream(comp);
401 - compressor.write(input, 0, input.length);
402 - compressor.close();
403 - return comp.toByteArray();
404 - } else {
405 - return DB_SERIALIZER.encode(state);
406 - }
407 - } catch (Exception e) {
408 - log.error("Failed to take snapshot", e);
409 - throw new SnapshotException(e);
410 - }
411 - }
412 -
413 - @Override
414 - public void installSnapshot(byte[] data) {
415 - try {
416 - if (compressSnapshot) {
417 - ByteArrayInputStream in = new ByteArrayInputStream(data);
418 - InflaterInputStream decompressor = new InflaterInputStream(in);
419 - this.state = DB_SERIALIZER.decode(decompressor);
420 - } else {
421 - this.state = DB_SERIALIZER.decode(data);
422 - }
423 -
424 - updatesExecutor.submit(new Runnable() {
425 - @Override
426 - public void run() {
427 - for (DatabaseUpdateEventListener listener : listeners) {
428 - listener.snapshotInstalled(state);
429 - }
430 - }
431 - });
432 -
433 - } catch (Exception e) {
434 - log.error("Failed to install from snapshot", e);
435 - throw new SnapshotException(e);
436 - }
437 - }
438 -
439 - /**
440 - * Adds specified DatabaseUpdateEventListener.
441 - * @param listener listener to add
442 - */
443 - public void addEventListener(DatabaseUpdateEventListener listener) {
444 - listeners.add(listener);
445 - }
446 -
447 - /**
448 - * Removes specified DatabaseUpdateEventListener.
449 - * @param listener listener to remove
450 - */
451 - public void removeEventListener(DatabaseUpdateEventListener listener) {
452 - listeners.remove(listener);
453 - }
454 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -
17 -package org.onosproject.store.service.impl;
18 -
19 -import org.onosproject.store.service.impl.DatabaseStateMachine.TableMetadata;
20 -
21 -/**
22 - * Interface of database update event listeners.
23 - */
24 -public interface DatabaseUpdateEventListener {
25 -
26 - /**
27 - * Notifies listeners of a table modified event.
28 - * @param event table modification event.
29 - */
30 - public void tableModified(TableModificationEvent event);
31 -
32 - /**
33 - * Notifies listeners of a table created event.
34 - * @param metadata metadata for the created table.
35 - */
36 - public void tableCreated(TableMetadata metadata);
37 -
38 - /**
39 - * Notifies listeners of a table deleted event.
40 - * @param tableName name of the table deleted
41 - */
42 - public void tableDeleted(String tableName);
43 -
44 - /**
45 - * Notifies listeners of a snapshot installation event.
46 - * @param snapshotState installed snapshot state.
47 - */
48 - public void snapshotInstalled(DatabaseStateMachine.State snapshotState);
49 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import static com.google.common.base.Verify.verify;
19 -import static org.slf4j.LoggerFactory.getLogger;
20 -
21 -import java.nio.charset.StandardCharsets;
22 -import java.util.Arrays;
23 -import java.util.UUID;
24 -import java.util.concurrent.CompletableFuture;
25 -import java.util.concurrent.ExecutionException;
26 -import java.util.concurrent.TimeUnit;
27 -import java.util.concurrent.TimeoutException;
28 -import java.util.concurrent.atomic.AtomicBoolean;
29 -
30 -import org.joda.time.DateTime;
31 -import org.onosproject.cluster.ClusterService;
32 -import org.onosproject.store.service.DatabaseException;
33 -import org.onosproject.store.service.DatabaseService;
34 -import org.onosproject.store.service.Lock;
35 -import org.onosproject.store.service.VersionedValue;
36 -import org.slf4j.Logger;
37 -
38 -/**
39 - * A distributed lock implementation.
40 - */
41 -public class DistributedLock implements Lock {
42 -
43 - private final Logger log = getLogger(getClass());
44 -
45 - private final DistributedLockManager lockManager;
46 - private final DatabaseService databaseService;
47 - private final String path;
48 - private DateTime lockExpirationTime;
49 - private AtomicBoolean isLocked = new AtomicBoolean(false);
50 - private volatile long epoch = 0;
51 - private byte[] lockId;
52 -
53 - public DistributedLock(
54 - String path,
55 - DatabaseService databaseService,
56 - ClusterService clusterService,
57 - DistributedLockManager lockManager) {
58 -
59 - this.path = path;
60 - this.databaseService = databaseService;
61 - this.lockManager = lockManager;
62 - this.lockId =
63 - (UUID.randomUUID().toString() + "::" +
64 - clusterService.getLocalNode().id().toString()).
65 - getBytes(StandardCharsets.UTF_8);
66 - }
67 -
68 - @Override
69 - public String path() {
70 - return path;
71 - }
72 -
73 - @Override
74 - public void lock(int leaseDurationMillis) throws InterruptedException {
75 - try {
76 - lockAsync(leaseDurationMillis).get();
77 - } catch (ExecutionException e) {
78 - throw new DatabaseException(e);
79 - }
80 - }
81 -
82 - @Override
83 - public CompletableFuture<Void> lockAsync(int leaseDurationMillis) {
84 - try {
85 - if (isLocked() || tryLock(leaseDurationMillis)) {
86 - return CompletableFuture.<Void>completedFuture(null);
87 - }
88 - return lockManager.lockIfAvailable(this, leaseDurationMillis);
89 - } catch (DatabaseException e) {
90 - CompletableFuture<Void> lockFuture = new CompletableFuture<>();
91 - lockFuture.completeExceptionally(e);
92 - return lockFuture;
93 - }
94 - }
95 -
96 - @Override
97 - public boolean tryLock(int leaseDurationMillis) {
98 - if (databaseService.putIfAbsent(
99 - DistributedLockManager.ONOS_LOCK_TABLE_NAME,
100 - path,
101 - lockId)) {
102 - VersionedValue vv =
103 - databaseService.get(DistributedLockManager.ONOS_LOCK_TABLE_NAME, path);
104 - verify(Arrays.equals(vv.value(), lockId));
105 - epoch = vv.version();
106 - isLocked.set(true);
107 - lockExpirationTime = DateTime.now().plusMillis(leaseDurationMillis);
108 - return true;
109 - }
110 - return false;
111 - }
112 -
113 - @Override
114 - public boolean tryLock(
115 - int waitTimeMillis,
116 - int leaseDurationMillis) throws InterruptedException {
117 - if (isLocked() || tryLock(leaseDurationMillis)) {
118 - return true;
119 - }
120 -
121 - CompletableFuture<Void> future =
122 - lockManager.lockIfAvailable(this, waitTimeMillis, leaseDurationMillis);
123 - try {
124 - future.get(waitTimeMillis, TimeUnit.MILLISECONDS);
125 - return true;
126 - } catch (ExecutionException e) {
127 - throw new DatabaseException(e);
128 - } catch (TimeoutException e) {
129 - log.debug("Timed out waiting to acquire lock for {}", path);
130 - return false;
131 - }
132 - }
133 -
134 - @Override
135 - public boolean isLocked() {
136 - if (isLocked.get()) {
137 - // We rely on local information to check
138 - // if the lock expired.
139 - // This should should make this call
140 - // light weight, while still retaining the
141 - // safety guarantees.
142 - if (DateTime.now().isAfter(lockExpirationTime)) {
143 - isLocked.set(false);
144 - return false;
145 - } else {
146 - return true;
147 - }
148 - }
149 - return false;
150 - }
151 -
152 - @Override
153 - public long epoch() {
154 - return epoch;
155 - }
156 -
157 - @Override
158 - public void unlock() {
159 - if (!isLocked()) {
160 - return;
161 - } else {
162 - if (databaseService.removeIfValueMatches(DistributedLockManager.ONOS_LOCK_TABLE_NAME, path, lockId)) {
163 - isLocked.set(false);
164 - }
165 - }
166 - }
167 -
168 - @Override
169 - public boolean extendExpiration(int leaseDurationMillis) {
170 - if (!isLocked()) {
171 - log.warn("Ignoring request to extend expiration for lock {}."
172 - + " ExtendExpiration must be called for locks that are already acquired.", path);
173 - return false;
174 - }
175 -
176 - if (databaseService.putIfValueMatches(
177 - DistributedLockManager.ONOS_LOCK_TABLE_NAME,
178 - path,
179 - lockId,
180 - lockId)) {
181 - lockExpirationTime = DateTime.now().plusMillis(leaseDurationMillis);
182 - log.debug("Succeeded in extending lock {} expiration time to {}", lockExpirationTime);
183 - return true;
184 - } else {
185 - log.info("Failed to extend expiration for {}", path);
186 - return false;
187 - }
188 - }
189 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import static org.onlab.util.Tools.namedThreads;
19 -import static org.slf4j.LoggerFactory.getLogger;
20 -
21 -import java.util.Iterator;
22 -import java.util.List;
23 -import java.util.Set;
24 -import java.util.concurrent.CompletableFuture;
25 -import java.util.concurrent.ExecutorService;
26 -import java.util.concurrent.Executors;
27 -
28 -import org.apache.felix.scr.annotations.Activate;
29 -import org.apache.felix.scr.annotations.Component;
30 -import org.apache.felix.scr.annotations.Deactivate;
31 -import org.apache.felix.scr.annotations.Reference;
32 -import org.apache.felix.scr.annotations.ReferenceCardinality;
33 -import org.apache.felix.scr.annotations.Service;
34 -import org.joda.time.DateTime;
35 -import org.onosproject.cluster.ClusterService;
36 -import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
37 -import org.onosproject.store.cluster.messaging.ClusterMessage;
38 -import org.onosproject.store.cluster.messaging.ClusterMessageHandler;
39 -import org.onosproject.store.service.DatabaseAdminService;
40 -import org.onosproject.store.service.DatabaseException;
41 -import org.onosproject.store.service.DatabaseService;
42 -import org.onosproject.store.service.Lock;
43 -import org.onosproject.store.service.LockEventListener;
44 -import org.onosproject.store.service.LockService;
45 -import org.slf4j.Logger;
46 -
47 -import com.google.common.collect.LinkedListMultimap;
48 -import com.google.common.collect.ListMultimap;
49 -import com.google.common.collect.Multimaps;
50 -
51 -@Component(immediate = false)
52 -@Service
53 -public class DistributedLockManager implements LockService {
54 -
55 - private static final ExecutorService THREAD_POOL =
56 - Executors.newCachedThreadPool(namedThreads("onos-lock-manager-%d"));
57 -
58 - private final Logger log = getLogger(getClass());
59 -
60 - public static final String ONOS_LOCK_TABLE_NAME = "onos-locks";
61 -
62 - public static final int DEAD_LOCK_TIMEOUT_MS = 5000;
63 -
64 - private final ListMultimap<String, LockRequest> locksToAcquire =
65 - Multimaps.synchronizedListMultimap(LinkedListMultimap.<String, LockRequest>create());
66 -
67 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
68 - private ClusterCommunicationService clusterCommunicator;
69 -
70 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
71 - private DatabaseAdminService databaseAdminService;
72 -
73 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
74 - private DatabaseService databaseService;
75 -
76 - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
77 - private ClusterService clusterService;
78 -
79 - @Activate
80 - public void activate() {
81 - try {
82 - Set<String> tables = databaseAdminService.listTables();
83 -
84 - if (!tables.contains(ONOS_LOCK_TABLE_NAME)) {
85 - if (databaseAdminService.createTable(ONOS_LOCK_TABLE_NAME, DEAD_LOCK_TIMEOUT_MS)) {
86 - log.info("Created {} table.", ONOS_LOCK_TABLE_NAME);
87 - }
88 - }
89 - } catch (DatabaseException e) {
90 - log.error("DistributedLockManager#activate failed.", e);
91 - }
92 -
93 - clusterCommunicator.addSubscriber(
94 - DatabaseStateMachine.DATABASE_UPDATE_EVENTS,
95 - new LockEventMessageListener());
96 -
97 - log.info("Started");
98 - }
99 -
100 - @Deactivate
101 - public void deactivate() {
102 - clusterCommunicator.removeSubscriber(DatabaseStateMachine.DATABASE_UPDATE_EVENTS);
103 - locksToAcquire.clear();
104 - log.info("Stopped.");
105 - }
106 -
107 - @Override
108 - public Lock create(String path) {
109 - return new DistributedLock(path, databaseService, clusterService, this);
110 - }
111 -
112 - @Override
113 - public void addListener(LockEventListener listener) {
114 - throw new UnsupportedOperationException();
115 - }
116 -
117 - @Override
118 - public void removeListener(LockEventListener listener) {
119 - throw new UnsupportedOperationException();
120 - }
121 -
122 - /**
123 - * Attempts to acquire the lock as soon as it becomes available.
124 - * @param lock lock to acquire.
125 - * @param waitTimeMillis maximum time to wait before giving up.
126 - * @param leaseDurationMillis the duration for which to acquire the lock initially.
127 - * @return Future that can be blocked on until lock becomes available.
128 - */
129 - protected CompletableFuture<Void> lockIfAvailable(
130 - Lock lock,
131 - int waitTimeMillis,
132 - int leaseDurationMillis) {
133 - CompletableFuture<Void> future = new CompletableFuture<>();
134 - LockRequest request = new LockRequest(
135 - lock,
136 - leaseDurationMillis,
137 - DateTime.now().plusMillis(waitTimeMillis),
138 - future);
139 - locksToAcquire.put(lock.path(), request);
140 - return future;
141 - }
142 -
143 - /**
144 - * Attempts to acquire the lock as soon as it becomes available.
145 - * @param lock lock to acquire.
146 - * @param leaseDurationMillis the duration for which to acquire the lock initially.
147 - * @return Future lease expiration date.
148 - */
149 - protected CompletableFuture<Void> lockIfAvailable(
150 - Lock lock,
151 - int leaseDurationMillis) {
152 - CompletableFuture<Void> future = new CompletableFuture<>();
153 - LockRequest request = new LockRequest(
154 - lock,
155 - leaseDurationMillis,
156 - DateTime.now().plusYears(100),
157 - future);
158 - locksToAcquire.put(lock.path(), request);
159 - return future;
160 - }
161 -
162 - private class LockEventMessageListener implements ClusterMessageHandler {
163 - @Override
164 - public void handle(ClusterMessage message) {
165 - TableModificationEvent event = ClusterMessagingProtocol.DB_SERIALIZER
166 - .decode(message.payload());
167 - if (event.tableName().equals(ONOS_LOCK_TABLE_NAME) &&
168 - event.type().equals(TableModificationEvent.Type.ROW_DELETED)) {
169 - THREAD_POOL.submit(new RetryLockTask(event.key()));
170 - }
171 - }
172 - }
173 -
174 - private class RetryLockTask implements Runnable {
175 -
176 - private final String path;
177 -
178 - public RetryLockTask(String path) {
179 - this.path = path;
180 - }
181 -
182 - @Override
183 - public void run() {
184 - if (!locksToAcquire.containsKey(path)) {
185 - return;
186 - }
187 -
188 - List<LockRequest> existingRequests = locksToAcquire.get(path);
189 - if (existingRequests == null || existingRequests.isEmpty()) {
190 - return;
191 - }
192 - log.info("Path {} is now available for locking. There are {} outstanding "
193 - + "requests for it.",
194 - path, existingRequests.size());
195 -
196 - synchronized (existingRequests) {
197 - Iterator<LockRequest> existingRequestIterator = existingRequests.iterator();
198 - while (existingRequestIterator.hasNext()) {
199 - LockRequest request = existingRequestIterator.next();
200 - if (DateTime.now().isAfter(request.requestExpirationTime())) {
201 - // request expired.
202 - existingRequestIterator.remove();
203 - } else {
204 - if (request.lock().tryLock(request.leaseDurationMillis())) {
205 - request.future().complete(null);
206 - existingRequestIterator.remove();
207 - }
208 - }
209 - }
210 - }
211 - }
212 - }
213 -
214 - private class LockRequest {
215 -
216 - private final Lock lock;
217 - private final DateTime requestExpirationTime;
218 - private final int leaseDurationMillis;
219 - private final CompletableFuture<Void> future;
220 -
221 - public LockRequest(
222 - Lock lock,
223 - int leaseDurationMillis,
224 - DateTime requestExpirationTime,
225 - CompletableFuture<Void> future) {
226 -
227 - this.lock = lock;
228 - this.requestExpirationTime = requestExpirationTime;
229 - this.leaseDurationMillis = leaseDurationMillis;
230 - this.future = future;
231 - }
232 -
233 - public Lock lock() {
234 - return lock;
235 - }
236 -
237 - public DateTime requestExpirationTime() {
238 - return requestExpirationTime;
239 - }
240 -
241 - public int leaseDurationMillis() {
242 - return leaseDurationMillis;
243 - }
244 -
245 - public CompletableFuture<Void> future() {
246 - return future;
247 - }
248 - }
249 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import static com.google.common.base.Preconditions.checkArgument;
19 -import static com.google.common.base.Preconditions.checkState;
20 -import static com.google.common.base.Verify.verifyNotNull;
21 -import static org.slf4j.LoggerFactory.getLogger;
22 -
23 -import java.io.File;
24 -import java.io.IOException;
25 -import java.util.ArrayList;
26 -import java.util.Arrays;
27 -import java.util.Iterator;
28 -import java.util.List;
29 -import java.util.Map;
30 -import java.util.concurrent.ConcurrentNavigableMap;
31 -
32 -import net.kuujo.copycat.log.Entry;
33 -import net.kuujo.copycat.log.Log;
34 -import net.kuujo.copycat.log.LogIndexOutOfBoundsException;
35 -
36 -import org.mapdb.Atomic;
37 -import org.mapdb.BTreeMap;
38 -import org.mapdb.DB;
39 -import org.mapdb.DBMaker;
40 -import org.mapdb.Serializer;
41 -import org.mapdb.TxBlock;
42 -import org.mapdb.TxMaker;
43 -import org.onosproject.store.serializers.StoreSerializer;
44 -import org.slf4j.Logger;
45 -
46 -/**
47 - * MapDB based log implementation.
48 - */
49 -public class MapDBLog implements Log {
50 -
51 - private final Logger log = getLogger(getClass());
52 -
53 - private final File dbFile;
54 - private TxMaker txMaker;
55 - private final StoreSerializer serializer;
56 - private static final String LOG_NAME = "log";
57 - private static final String SIZE_FIELD_NAME = "size";
58 -
59 - private int cacheSize = 256;
60 -
61 - public MapDBLog(String dbFileName, StoreSerializer serializer) {
62 - this.dbFile = new File(dbFileName);
63 - this.serializer = serializer;
64 - }
65 -
66 - @Override
67 - public void open() throws IOException {
68 - txMaker = DBMaker
69 - .newFileDB(dbFile)
70 - .mmapFileEnableIfSupported()
71 - .cacheSize(cacheSize)
72 - .makeTxMaker();
73 - log.info("Raft log file: {}", dbFile.getCanonicalPath());
74 - }
75 -
76 - @Override
77 - public void close() throws IOException {
78 - assertIsOpen();
79 - txMaker.close();
80 - txMaker = null;
81 - }
82 -
83 - @Override
84 - public boolean isOpen() {
85 - return txMaker != null;
86 - }
87 -
88 - protected void assertIsOpen() {
89 - checkState(isOpen(), "The log is not currently open.");
90 - }
91 -
92 - @Override
93 - public long appendEntry(Entry entry) {
94 - checkArgument(entry != null, "expecting non-null entry");
95 - return appendEntries(entry).get(0);
96 - }
97 -
98 - @Override
99 - public List<Long> appendEntries(Entry... entries) {
100 - checkArgument(entries != null, "expecting non-null entries");
101 - return appendEntries(Arrays.asList(entries));
102 - }
103 -
104 - @Override
105 - public synchronized List<Long> appendEntries(List<Entry> entries) {
106 - assertIsOpen();
107 - checkArgument(entries != null, "expecting non-null entries");
108 - final List<Long> indices = new ArrayList<>(entries.size());
109 -
110 - txMaker.execute(new TxBlock() {
111 - @Override
112 - public void tx(DB db) {
113 - BTreeMap<Long, byte[]> log = getLogMap(db);
114 - Atomic.Long size = db.getAtomicLong(SIZE_FIELD_NAME);
115 - long nextIndex = log.isEmpty() ? 1 : log.lastKey() + 1;
116 - long addedBytes = 0;
117 - for (Entry entry : entries) {
118 - byte[] entryBytes = verifyNotNull(serializer.encode(entry),
119 - "Writing LogEntry %s failed", nextIndex);
120 - log.put(nextIndex, entryBytes);
121 - addedBytes += entryBytes.length;
122 - indices.add(nextIndex);
123 - nextIndex++;
124 - }
125 - size.addAndGet(addedBytes);
126 - }
127 - });
128 -
129 - return indices;
130 - }
131 -
132 - @Override
133 - public boolean containsEntry(long index) {
134 - assertIsOpen();
135 - DB db = txMaker.makeTx();
136 - try {
137 - BTreeMap<Long, byte[]> log = getLogMap(db);
138 - return log.containsKey(index);
139 - } finally {
140 - db.close();
141 - }
142 - }
143 -
144 - @Override
145 - public void delete() throws IOException {
146 - assertIsOpen();
147 - txMaker.execute(new TxBlock() {
148 - @Override
149 - public void tx(DB db) {
150 - BTreeMap<Long, byte[]> log = getLogMap(db);
151 - Atomic.Long size = db.getAtomicLong(SIZE_FIELD_NAME);
152 - log.clear();
153 - size.set(0);
154 - }
155 - });
156 - }
157 -
158 - @Override
159 - public <T extends Entry> T firstEntry() {
160 - assertIsOpen();
161 - DB db = txMaker.makeTx();
162 - try {
163 - BTreeMap<Long, byte[]> log = getLogMap(db);
164 - return log.isEmpty() ? null : verifyNotNull(decodeEntry(log.firstEntry().getValue()));
165 - } finally {
166 - db.close();
167 - }
168 - }
169 -
170 - @Override
171 - public long firstIndex() {
172 - assertIsOpen();
173 - DB db = txMaker.makeTx();
174 - try {
175 - BTreeMap<Long, byte[]> log = getLogMap(db);
176 - return log.isEmpty() ? 0 : log.firstKey();
177 - } finally {
178 - db.close();
179 - }
180 - }
181 -
182 - private <T extends Entry> T decodeEntry(final byte[] bytes) {
183 - if (bytes == null) {
184 - return null;
185 - }
186 - return serializer.decode(bytes.clone());
187 - }
188 -
189 - @Override
190 - public <T extends Entry> List<T> getEntries(long from, long to) {
191 - assertIsOpen();
192 - DB db = txMaker.makeTx();
193 - try {
194 - BTreeMap<Long, byte[]> log = getLogMap(db);
195 - if (log.isEmpty()) {
196 - throw new LogIndexOutOfBoundsException("Log is empty");
197 - } else if (from < log.firstKey()) {
198 - throw new LogIndexOutOfBoundsException("From index out of bounds.");
199 - } else if (to > log.lastKey()) {
200 - throw new LogIndexOutOfBoundsException("To index out of bounds.");
201 - }
202 - List<T> entries = new ArrayList<>((int) (to - from + 1));
203 - for (long i = from; i <= to; i++) {
204 - T entry = verifyNotNull(decodeEntry(log.get(i)), "LogEntry %s was null", i);
205 - entries.add(entry);
206 - }
207 - return entries;
208 - } finally {
209 - db.close();
210 - }
211 - }
212 -
213 - @Override
214 - public <T extends Entry> T getEntry(long index) {
215 - assertIsOpen();
216 - DB db = txMaker.makeTx();
217 - try {
218 - BTreeMap<Long, byte[]> log = getLogMap(db);
219 - byte[] entryBytes = log.get(index);
220 - return entryBytes == null ? null : verifyNotNull(decodeEntry(entryBytes),
221 - "LogEntry %s was null", index);
222 - } finally {
223 - db.close();
224 - }
225 - }
226 -
227 - @Override
228 - public boolean isEmpty() {
229 - assertIsOpen();
230 - DB db = txMaker.makeTx();
231 - try {
232 - BTreeMap<Long, byte[]> log = getLogMap(db);
233 - return log.isEmpty();
234 - } finally {
235 - db.close();
236 - }
237 - }
238 -
239 - @Override
240 - public <T extends Entry> T lastEntry() {
241 - assertIsOpen();
242 - DB db = txMaker.makeTx();
243 - try {
244 - BTreeMap<Long, byte[]> log = getLogMap(db);
245 - return log.isEmpty() ? null : verifyNotNull(decodeEntry(log.lastEntry().getValue()));
246 - } finally {
247 - db.close();
248 - }
249 - }
250 -
251 - @Override
252 - public long lastIndex() {
253 - assertIsOpen();
254 - DB db = txMaker.makeTx();
255 - try {
256 - BTreeMap<Long, byte[]> log = getLogMap(db);
257 - return log.isEmpty() ? 0 : log.lastKey();
258 - } finally {
259 - db.close();
260 - }
261 - }
262 -
263 - @Override
264 - public void removeAfter(long index) {
265 - assertIsOpen();
266 - txMaker.execute(new TxBlock() {
267 - @Override
268 - public void tx(DB db) {
269 - BTreeMap<Long, byte[]> log = getLogMap(db);
270 - Atomic.Long size = db.getAtomicLong(SIZE_FIELD_NAME);
271 - long removedBytes = 0;
272 - ConcurrentNavigableMap<Long, byte[]> tailMap = log.tailMap(index, false);
273 - Iterator<Map.Entry<Long, byte[]>> it = tailMap.entrySet().iterator();
274 - while (it.hasNext()) {
275 - Map.Entry<Long, byte[]> entry = it.next();
276 - removedBytes += entry.getValue().length;
277 - it.remove();
278 - }
279 - size.addAndGet(-removedBytes);
280 - }
281 - });
282 - }
283 -
284 - @Override
285 - public long size() {
286 - assertIsOpen();
287 - DB db = txMaker.makeTx();
288 - try {
289 - Atomic.Long size = db.getAtomicLong(SIZE_FIELD_NAME);
290 - return size.get();
291 - } finally {
292 - db.close();
293 - }
294 - }
295 -
296 - @Override
297 - public void sync() throws IOException {
298 - assertIsOpen();
299 - }
300 -
301 - @Override
302 - public void compact(long index, Entry entry) throws IOException {
303 -
304 - assertIsOpen();
305 - txMaker.execute(new TxBlock() {
306 - @Override
307 - public void tx(DB db) {
308 - BTreeMap<Long, byte[]> log = getLogMap(db);
309 - Atomic.Long size = db.getAtomicLong(SIZE_FIELD_NAME);
310 - ConcurrentNavigableMap<Long, byte[]> headMap = log.headMap(index);
311 - Iterator<Map.Entry<Long, byte[]>> it = headMap.entrySet().iterator();
312 -
313 - long deletedBytes = 0;
314 - while (it.hasNext()) {
315 - Map.Entry<Long, byte[]> e = it.next();
316 - deletedBytes += e.getValue().length;
317 - it.remove();
318 - }
319 - size.addAndGet(-deletedBytes);
320 - byte[] entryBytes = verifyNotNull(serializer.encode(entry));
321 - byte[] existingEntry = log.put(index, entryBytes);
322 - if (existingEntry != null) {
323 - size.addAndGet(entryBytes.length - existingEntry.length);
324 - } else {
325 - size.addAndGet(entryBytes.length);
326 - }
327 - db.compact();
328 - }
329 - });
330 - }
331 -
332 - private BTreeMap<Long, byte[]> getLogMap(DB db) {
333 - return db.createTreeMap(LOG_NAME)
334 - .valuesOutsideNodesEnable()
335 - .keySerializerWrap(Serializer.LONG)
336 - .valueSerializer(Serializer.BYTE_ARRAY)
337 - .makeOrGet();
338 - }
339 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import org.onosproject.store.service.DatabaseException;
19 -
20 -/**
21 - * Exception that indicates a problem with the state machine snapshotting.
22 - */
23 -@SuppressWarnings("serial")
24 -public class SnapshotException extends DatabaseException {
25 - public SnapshotException(Throwable t) {
26 - super(t);
27 - }
28 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import org.onosproject.store.service.VersionedValue;
19 -
20 -import com.google.common.base.MoreObjects;
21 -
22 -/**
23 - * A table modification event.
24 - */
25 -public final class TableModificationEvent {
26 -
27 - /**
28 - * Type of table modification event.
29 - */
30 - public enum Type {
31 - ROW_ADDED,
32 - ROW_DELETED,
33 - ROW_UPDATED
34 - }
35 -
36 - private final String tableName;
37 - private final String key;
38 - private final VersionedValue value;
39 - private final Type type;
40 -
41 - /**
42 - * Creates a new row deleted table modification event.
43 - * @param tableName table name.
44 - * @param key row key
45 - * @param value value associated with the key when it was deleted.
46 - * @return table modification event.
47 - */
48 - public static TableModificationEvent rowDeleted(String tableName, String key, VersionedValue value) {
49 - return new TableModificationEvent(tableName, key, value, Type.ROW_DELETED);
50 - }
51 -
52 - /**
53 - * Creates a new row added table modification event.
54 - * @param tableName table name.
55 - * @param key row key
56 - * @param value value associated with the key
57 - * @return table modification event.
58 - */
59 - public static TableModificationEvent rowAdded(String tableName, String key, VersionedValue value) {
60 - return new TableModificationEvent(tableName, key, value, Type.ROW_ADDED);
61 - }
62 -
63 - /**
64 - * Creates a new row updated table modification event.
65 - * @param tableName table name.
66 - * @param key row key
67 - * @param newValue value
68 - * @return table modification event.
69 - */
70 - public static TableModificationEvent rowUpdated(String tableName, String key, VersionedValue newValue) {
71 - return new TableModificationEvent(tableName, key, newValue, Type.ROW_UPDATED);
72 - }
73 -
74 - private TableModificationEvent(String tableName, String key, VersionedValue value, Type type) {
75 - this.tableName = tableName;
76 - this.key = key;
77 - this.value = value;
78 - this.type = type;
79 - }
80 -
81 - /**
82 - * Returns name of table this event is for.
83 - * @return table name
84 - */
85 - public String tableName() {
86 - return tableName;
87 - }
88 -
89 - /**
90 - * Returns the row key this event is for.
91 - * @return row key
92 - */
93 - public String key() {
94 - return key;
95 - }
96 -
97 - /**
98 - * Returns the value associated with the key. If the event for a deletion, this
99 - * method returns value that was deleted.
100 - * @return row value
101 - */
102 - public VersionedValue value() {
103 - return value;
104 - }
105 -
106 - /**
107 - * Returns the type of table modification event.
108 - * @return event type.
109 - */
110 - public Type type() {
111 - return type;
112 - }
113 -
114 - @Override
115 - public String toString() {
116 - return MoreObjects.toStringHelper(getClass())
117 - .add("type", type)
118 - .add("tableName", tableName)
119 - .add("key", key)
120 - .add("version", value.version())
121 - .toString();
122 - }
123 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import static com.google.common.base.Preconditions.checkArgument;
19 -import static com.google.common.base.Preconditions.checkNotNull;
20 -import static org.slf4j.LoggerFactory.getLogger;
21 -
22 -import java.io.File;
23 -import java.io.IOException;
24 -import java.util.HashMap;
25 -import java.util.HashSet;
26 -import java.util.Iterator;
27 -import java.util.Map;
28 -import java.util.Map.Entry;
29 -import java.util.Set;
30 -
31 -import org.onosproject.cluster.DefaultControllerNode;
32 -import org.onosproject.cluster.NodeId;
33 -import org.onlab.packet.IpAddress;
34 -import org.slf4j.Logger;
35 -
36 -import com.fasterxml.jackson.core.JsonEncoding;
37 -import com.fasterxml.jackson.core.JsonFactory;
38 -import com.fasterxml.jackson.databind.JsonNode;
39 -import com.fasterxml.jackson.databind.ObjectMapper;
40 -import com.fasterxml.jackson.databind.node.ArrayNode;
41 -import com.fasterxml.jackson.databind.node.ObjectNode;
42 -
43 -/**
44 - * Allows for reading and writing tablet definition as a JSON file.
45 - */
46 -public class TabletDefinitionStore {
47 -
48 - private final Logger log = getLogger(getClass());
49 -
50 - private final File file;
51 -
52 - /**
53 - * Creates a reader/writer of the tablet definition file.
54 - *
55 - * @param filePath location of the definition file
56 - */
57 - public TabletDefinitionStore(String filePath) {
58 - file = new File(filePath);
59 - }
60 -
61 - /**
62 - * Creates a reader/writer of the tablet definition file.
63 - *
64 - * @param filePath location of the definition file
65 - */
66 - public TabletDefinitionStore(File filePath) {
67 - file = checkNotNull(filePath);
68 - }
69 -
70 - /**
71 - * Returns the Map from tablet name to set of initial member nodes.
72 - *
73 - * @return Map from tablet name to set of initial member nodes
74 - * @throws IOException when I/O exception of some sort has occurred.
75 - */
76 - public Map<String, Set<DefaultControllerNode>> read() throws IOException {
77 -
78 - final Map<String, Set<DefaultControllerNode>> tablets = new HashMap<>();
79 -
80 - final ObjectMapper mapper = new ObjectMapper();
81 - final ObjectNode tabletNodes = (ObjectNode) mapper.readTree(file);
82 - final Iterator<Entry<String, JsonNode>> fields = tabletNodes.fields();
83 - while (fields.hasNext()) {
84 - final Entry<String, JsonNode> next = fields.next();
85 - final Set<DefaultControllerNode> nodes = new HashSet<>();
86 - final Iterator<JsonNode> elements = next.getValue().elements();
87 - while (elements.hasNext()) {
88 - ObjectNode nodeDef = (ObjectNode) elements.next();
89 - nodes.add(new DefaultControllerNode(new NodeId(nodeDef.get("id").asText()),
90 - IpAddress.valueOf(nodeDef.get("ip").asText()),
91 - nodeDef.get("tcpPort").asInt(9876)));
92 - }
93 -
94 - tablets.put(next.getKey(), nodes);
95 - }
96 - return tablets;
97 - }
98 -
99 - /**
100 - * Updates the Map from tablet name to set of member nodes.
101 - *
102 - * @param tabletName name of the tablet to update
103 - * @param nodes set of initial member nodes
104 - * @throws IOException when I/O exception of some sort has occurred.
105 - */
106 - public void write(String tabletName, Set<DefaultControllerNode> nodes) throws IOException {
107 - checkNotNull(tabletName);
108 - checkArgument(tabletName.isEmpty(), "Tablet name cannot be empty");
109 - // TODO should validate if tabletName is allowed in JSON
110 -
111 - // load current
112 - Map<String, Set<DefaultControllerNode>> config;
113 - try {
114 - config = read();
115 - } catch (IOException e) {
116 - log.info("Reading tablet config failed, assuming empty definition.");
117 - config = new HashMap<>();
118 - }
119 - // update with specified
120 - config.put(tabletName, nodes);
121 -
122 - // write back to file
123 - final ObjectMapper mapper = new ObjectMapper();
124 - final ObjectNode tabletNodes = mapper.createObjectNode();
125 - for (Entry<String, Set<DefaultControllerNode>> tablet : config.entrySet()) {
126 - ArrayNode nodeDefs = mapper.createArrayNode();
127 - tabletNodes.set(tablet.getKey(), nodeDefs);
128 -
129 - for (DefaultControllerNode node : tablet.getValue()) {
130 - ObjectNode nodeDef = mapper.createObjectNode();
131 - nodeDef.put("id", node.id().toString())
132 - .put("ip", node.ip().toString())
133 - .put("tcpPort", node.tcpPort());
134 - nodeDefs.add(nodeDef);
135 - }
136 - }
137 - mapper.writeTree(new JsonFactory().createGenerator(file, JsonEncoding.UTF8),
138 - tabletNodes);
139 - }
140 -
141 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import java.util.Collection;
19 -
20 -import net.kuujo.copycat.cluster.TcpClusterConfig;
21 -import net.kuujo.copycat.cluster.TcpMember;
22 -
23 -import com.esotericsoftware.kryo.Kryo;
24 -import com.esotericsoftware.kryo.Serializer;
25 -import com.esotericsoftware.kryo.io.Input;
26 -import com.esotericsoftware.kryo.io.Output;
27 -
28 -public class TcpClusterConfigSerializer extends Serializer<TcpClusterConfig> {
29 -
30 - @Override
31 - public void write(Kryo kryo, Output output, TcpClusterConfig object) {
32 - kryo.writeClassAndObject(output, object.getLocalMember());
33 - kryo.writeClassAndObject(output, object.getRemoteMembers());
34 - }
35 -
36 - @Override
37 - public TcpClusterConfig read(Kryo kryo, Input input,
38 - Class<TcpClusterConfig> type) {
39 - TcpMember localMember = (TcpMember) kryo.readClassAndObject(input);
40 - @SuppressWarnings("unchecked")
41 - Collection<TcpMember> remoteMembers = (Collection<TcpMember>) kryo.readClassAndObject(input);
42 - return new TcpClusterConfig(localMember, remoteMembers);
43 - }
44 -
45 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import net.kuujo.copycat.cluster.TcpMember;
19 -
20 -import com.esotericsoftware.kryo.Kryo;
21 -import com.esotericsoftware.kryo.Serializer;
22 -import com.esotericsoftware.kryo.io.Input;
23 -import com.esotericsoftware.kryo.io.Output;
24 -
25 -public class TcpMemberSerializer extends Serializer<TcpMember> {
26 -
27 - @Override
28 - public void write(Kryo kryo, Output output, TcpMember object) {
29 - output.writeString(object.host());
30 - output.writeInt(object.port());
31 - }
32 -
33 - @Override
34 - public TcpMember read(Kryo kryo, Input input, Class<TcpMember> type) {
35 - String host = input.readString();
36 - int port = input.readInt();
37 - return new TcpMember(host, port);
38 - }
39 -}
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -
17 -/**
18 - * Strongly consistent, fault-tolerant and durable state management
19 - * based on Raft consensus protocol.
20 - */
21 -package org.onosproject.store.service.impl;
1 -/*
2 - * Copyright 2014 Open Networking Laboratory
3 - *
4 - * Licensed under the Apache License, Version 2.0 (the "License");
5 - * you may not use this file except in compliance with the License.
6 - * You may obtain a copy of the License at
7 - *
8 - * http://www.apache.org/licenses/LICENSE-2.0
9 - *
10 - * Unless required by applicable law or agreed to in writing, software
11 - * distributed under the License is distributed on an "AS IS" BASIS,
12 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 - * See the License for the specific language governing permissions and
14 - * limitations under the License.
15 - */
16 -package org.onosproject.store.service.impl;
17 -
18 -import java.io.File;
19 -import java.io.IOException;
20 -import java.nio.file.Files;
21 -import java.util.List;
22 -
23 -import net.kuujo.copycat.internal.log.OperationEntry;
24 -import net.kuujo.copycat.log.Entry;
25 -import net.kuujo.copycat.log.Log;
26 -
27 -import org.junit.After;
28 -import org.junit.Assert;
29 -import org.junit.Before;
30 -import org.junit.Test;
31 -import org.onosproject.store.serializers.StoreSerializer;
32 -
33 -import com.google.common.testing.EqualsTester;
34 -
35 -/**
36 - * Test the MapDBLog implementation.
37 - */
38 -public class MapDBLogTest {
39 -
40 - private static final StoreSerializer SERIALIZER = ClusterMessagingProtocol.DB_SERIALIZER;
41 - private static final Entry TEST_ENTRY1 = new OperationEntry(1, "test1");
42 - private static final Entry TEST_ENTRY2 = new OperationEntry(2, "test12");
43 - private static final Entry TEST_ENTRY3 = new OperationEntry(3, "test123");
44 - private static final Entry TEST_ENTRY4 = new OperationEntry(4, "test1234");
45 -
46 - private static final Entry TEST_SNAPSHOT_ENTRY = new OperationEntry(5, "snapshot");
47 -
48 - private static final long TEST_ENTRY1_SIZE = SERIALIZER.encode(TEST_ENTRY1).length;
49 - private static final long TEST_ENTRY2_SIZE = SERIALIZER.encode(TEST_ENTRY2).length;
50 - private static final long TEST_ENTRY3_SIZE = SERIALIZER.encode(TEST_ENTRY3).length;
51 - private static final long TEST_ENTRY4_SIZE = SERIALIZER.encode(TEST_ENTRY4).length;
52 -
53 - private static final long TEST_SNAPSHOT_ENTRY_SIZE = SERIALIZER.encode(TEST_SNAPSHOT_ENTRY).length;
54 -
55 - private String dbFileName;
56 -
57 -
58 - @Before
59 - public void setUp() throws Exception {
60 - File logFile = File.createTempFile("mapdbTest", null);
61 - dbFileName = logFile.getAbsolutePath();
62 - }
63 -
64 - @After
65 - public void tearDown() throws Exception {
66 - Files.deleteIfExists(new File(dbFileName).toPath());
67 - Files.deleteIfExists(new File(dbFileName + ".t").toPath());
68 - Files.deleteIfExists(new File(dbFileName + ".p").toPath());
69 - }
70 -
71 - @Test(expected = IllegalStateException.class)
72 - public void testAssertOpen() {
73 - Log log = new MapDBLog(dbFileName, SERIALIZER);
74 - log.size();
75 - }
76 -
77 - @Test
78 - public void testAppendEntry() throws IOException {
79 - Log log = new MapDBLog(dbFileName, SERIALIZER);
80 - log.open();
81 - log.appendEntry(TEST_ENTRY1);
82 - OperationEntry first = log.firstEntry();
83 - OperationEntry last = log.lastEntry();
84 - new EqualsTester()
85 - .addEqualityGroup(first, last, TEST_ENTRY1)
86 - .testEquals();
87 - Assert.assertEquals(TEST_ENTRY1_SIZE, log.size());
88 - Assert.assertEquals(1, log.firstIndex());
89 - Assert.assertEquals(1, log.lastIndex());
90 - }
91 -
92 - @Test
93 - public void testAppendEntries() throws IOException {
94 - Log log = new MapDBLog(dbFileName, SERIALIZER);
95 - log.open();
96 - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3);
97 - OperationEntry first = log.firstEntry();
98 - OperationEntry last = log.lastEntry();
99 - new EqualsTester()
100 - .addEqualityGroup(first, TEST_ENTRY1)
101 - .addEqualityGroup(last, TEST_ENTRY3)
102 - .testEquals();
103 - Assert.assertEquals(TEST_ENTRY1_SIZE + TEST_ENTRY2_SIZE, TEST_ENTRY3_SIZE, log.size());
104 - Assert.assertEquals(1, log.firstIndex());
105 - Assert.assertEquals(3, log.lastIndex());
106 - Assert.assertTrue(log.containsEntry(1));
107 - Assert.assertTrue(log.containsEntry(2));
108 - }
109 -
110 - @Test
111 - public void testDelete() throws IOException {
112 - Log log = new MapDBLog(dbFileName, SERIALIZER);
113 - log.open();
114 - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2);
115 - log.delete();
116 - Assert.assertEquals(0, log.size());
117 - Assert.assertTrue(log.isEmpty());
118 - Assert.assertEquals(0, log.firstIndex());
119 - Assert.assertNull(log.firstEntry());
120 - Assert.assertEquals(0, log.lastIndex());
121 - Assert.assertNull(log.lastEntry());
122 - }
123 -
124 - @Test
125 - public void testGetEntries() throws IOException {
126 - Log log = new MapDBLog(dbFileName, SERIALIZER);
127 - log.open();
128 - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3, TEST_ENTRY4);
129 - Assert.assertEquals(
130 - TEST_ENTRY1_SIZE +
131 - TEST_ENTRY2_SIZE +
132 - TEST_ENTRY3_SIZE +
133 - TEST_ENTRY4_SIZE, log.size());
134 -
135 - List<Entry> entries = log.getEntries(2, 3);
136 - new EqualsTester()
137 - .addEqualityGroup(log.getEntry(4), TEST_ENTRY4)
138 - .addEqualityGroup(entries.get(0), TEST_ENTRY2)
139 - .addEqualityGroup(entries.get(1), TEST_ENTRY3)
140 - .testEquals();
141 - }
142 -
143 - @Test
144 - public void testRemoveAfter() throws IOException {
145 - Log log = new MapDBLog(dbFileName, SERIALIZER);
146 - log.open();
147 - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3, TEST_ENTRY4);
148 - log.removeAfter(1);
149 - Assert.assertEquals(TEST_ENTRY1_SIZE, log.size());
150 - new EqualsTester()
151 - .addEqualityGroup(log.firstEntry(), log.lastEntry(), TEST_ENTRY1)
152 - .testEquals();
153 - }
154 -
155 - @Test
156 - public void testAddAfterRemove() throws IOException {
157 - Log log = new MapDBLog(dbFileName, SERIALIZER);
158 - log.open();
159 - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3, TEST_ENTRY4);
160 - log.removeAfter(1);
161 - log.appendEntry(TEST_ENTRY4);
162 - Assert.assertEquals(TEST_ENTRY1_SIZE + TEST_ENTRY4_SIZE, log.size());
163 - new EqualsTester()
164 - .addEqualityGroup(log.firstEntry(), TEST_ENTRY1)
165 - .addEqualityGroup(log.lastEntry(), TEST_ENTRY4)
166 - .addEqualityGroup(log.size(), TEST_ENTRY1_SIZE + TEST_ENTRY4_SIZE)
167 - .testEquals();
168 - }
169 -
170 - @Test
171 - public void testClose() throws IOException {
172 - Log log = new MapDBLog(dbFileName, SERIALIZER);
173 - Assert.assertFalse(log.isOpen());
174 - log.open();
175 - Assert.assertTrue(log.isOpen());
176 - log.close();
177 - Assert.assertFalse(log.isOpen());
178 - }
179 -
180 - @Test
181 - public void testReopen() throws IOException {
182 - Log log = new MapDBLog(dbFileName, SERIALIZER);
183 - log.open();
184 - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3, TEST_ENTRY4);
185 - log.close();
186 - log.open();
187 -
188 - new EqualsTester()
189 - .addEqualityGroup(log.firstEntry(), TEST_ENTRY1)
190 - .addEqualityGroup(log.getEntry(2), TEST_ENTRY2)
191 - .addEqualityGroup(log.lastEntry(), TEST_ENTRY4)
192 - .addEqualityGroup(log.size(),
193 - TEST_ENTRY1_SIZE +
194 - TEST_ENTRY2_SIZE +
195 - TEST_ENTRY3_SIZE +
196 - TEST_ENTRY4_SIZE)
197 - .testEquals();
198 - }
199 -
200 - @Test
201 - public void testCompact() throws IOException {
202 - Log log = new MapDBLog(dbFileName, SERIALIZER);
203 - log.open();
204 - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3, TEST_ENTRY4);
205 - log.compact(3, TEST_SNAPSHOT_ENTRY);
206 - new EqualsTester()
207 - .addEqualityGroup(log.firstEntry(), TEST_SNAPSHOT_ENTRY)
208 - .addEqualityGroup(log.lastEntry(), TEST_ENTRY4)
209 - .addEqualityGroup(log.size(),
210 - TEST_SNAPSHOT_ENTRY_SIZE +
211 - TEST_ENTRY4_SIZE)
212 - .testEquals();
213 - }
214 -}
...@@ -69,7 +69,7 @@ ...@@ -69,7 +69,7 @@
69 <properties> 69 <properties>
70 <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> 70 <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
71 <netty4.version>4.0.23.Final</netty4.version> 71 <netty4.version>4.0.23.Final</netty4.version>
72 - <copycat.version>0.3.0.onos</copycat.version> 72 + <copycat.version>0.5.0.onos-SNAPSHOT</copycat.version>
73 <openflowj.version>0.3.9.oe</openflowj.version> 73 <openflowj.version>0.3.9.oe</openflowj.version>
74 </properties> 74 </properties>
75 75
......
...@@ -31,6 +31,14 @@ ...@@ -31,6 +31,14 @@
31 31
32 <description>ONLab third-party dependencies</description> 32 <description>ONLab third-party dependencies</description>
33 33
34 + <!-- TODO: Needed for copycat snapshot. Remove before official release -->
35 + <repositories>
36 + <repository>
37 + <id>snapshots</id>
38 + <url>https://oss.sonatype.org/content/repositories/snapshots</url>
39 + </repository>
40 + </repositories>
41 +
34 <dependencies> 42 <dependencies>
35 <dependency> 43 <dependency>
36 <groupId>com.googlecode.concurrent-trees</groupId> 44 <groupId>com.googlecode.concurrent-trees</groupId>
...@@ -47,14 +55,14 @@ ...@@ -47,14 +55,14 @@
47 <dependency> 55 <dependency>
48 <!-- FIXME once fixes get merged to upstream --> 56 <!-- FIXME once fixes get merged to upstream -->
49 <groupId>org.onosproject</groupId> 57 <groupId>org.onosproject</groupId>
50 - <artifactId>copycat</artifactId> 58 + <artifactId>copycat-api</artifactId>
51 <version>${copycat.version}</version> 59 <version>${copycat.version}</version>
52 </dependency> 60 </dependency>
53 61
54 <dependency> 62 <dependency>
55 <!-- FIXME once fixes get merged to upstream --> 63 <!-- FIXME once fixes get merged to upstream -->
56 <groupId>org.onosproject</groupId> 64 <groupId>org.onosproject</groupId>
57 - <artifactId>copycat-tcp</artifactId> 65 + <artifactId>copycat-netty</artifactId>
58 <version>${copycat.version}</version> 66 <version>${copycat.version}</version>
59 </dependency> 67 </dependency>
60 68
......