Committed by
Jonathan Hart
WIP: Partitioned Database based on Raft.
Removed the implementation based on previous Copycat API. Change-Id: I6b9d67e943e17095f585ae2a2cb6304c248cd686
Showing
35 changed files
with
1364 additions
and
2122 deletions
... | @@ -39,7 +39,6 @@ import org.onosproject.store.serializers.KryoNamespaces; | ... | @@ -39,7 +39,6 @@ import org.onosproject.store.serializers.KryoNamespaces; |
39 | import org.onosproject.store.serializers.KryoSerializer; | 39 | import org.onosproject.store.serializers.KryoSerializer; |
40 | import org.onosproject.store.service.Lock; | 40 | import org.onosproject.store.service.Lock; |
41 | import org.onosproject.store.service.LockService; | 41 | import org.onosproject.store.service.LockService; |
42 | -import org.onosproject.store.service.impl.DistributedLockManager; | ||
43 | import org.slf4j.Logger; | 42 | import org.slf4j.Logger; |
44 | 43 | ||
45 | import java.util.Map; | 44 | import java.util.Map; |
... | @@ -62,9 +61,7 @@ public class LeadershipManager implements LeadershipService { | ... | @@ -62,9 +61,7 @@ public class LeadershipManager implements LeadershipService { |
62 | 61 | ||
63 | private final Logger log = getLogger(getClass()); | 62 | private final Logger log = getLogger(getClass()); |
64 | 63 | ||
65 | - // TODO: Remove this dependency | 64 | + private static final int TERM_DURATION_MS = 2000; |
66 | - private static final int TERM_DURATION_MS = | ||
67 | - DistributedLockManager.DEAD_LOCK_TIMEOUT_MS; | ||
68 | 65 | ||
69 | // Time to wait before retrying leadership after | 66 | // Time to wait before retrying leadership after |
70 | // a unexpected error. | 67 | // a unexpected error. | ... | ... |
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | + | ||
4 | +import net.kuujo.copycat.cluster.ClusterConfig; | ||
5 | +import net.kuujo.copycat.cluster.internal.coordinator.ClusterCoordinator; | ||
6 | +import net.kuujo.copycat.cluster.internal.coordinator.CoordinatorConfig; | ||
7 | +import net.kuujo.copycat.cluster.internal.coordinator.DefaultClusterCoordinator; | ||
8 | +import net.kuujo.copycat.resource.Resource; | ||
9 | + | ||
10 | +/** | ||
11 | + * Database. | ||
12 | + */ | ||
13 | +public interface Database extends DatabaseProxy<String, byte[]>, Resource<Database> { | ||
14 | + | ||
15 | + /** | ||
16 | + * Creates a new database with the default cluster configuration.<p> | ||
17 | + * | ||
18 | + * The database will be constructed with the default cluster configuration. The default cluster configuration | ||
19 | + * searches for two resources on the classpath - {@code cluster} and {cluster-defaults} - in that order. Configuration | ||
20 | + * options specified in {@code cluster.conf} will override those in {cluster-defaults.conf}.<p> | ||
21 | + * | ||
22 | + * Additionally, the database will be constructed with an database configuration that searches the classpath for | ||
23 | + * three configuration files - {@code {name}}, {@code database}, {@code database-defaults}, {@code resource}, and | ||
24 | + * {@code resource-defaults} - in that order. The first resource is a configuration resource with the same name | ||
25 | + * as the map resource. If the resource is namespaced - e.g. `databases.my-database.conf` - then resource | ||
26 | + * configurations will be loaded according to namespaces as well; for example, `databases.conf`. | ||
27 | + * | ||
28 | + * @param name The database name. | ||
29 | + * @return The database. | ||
30 | + */ | ||
31 | + static Database create(String name) { | ||
32 | + return create(name, new ClusterConfig(), new DatabaseConfig()); | ||
33 | + } | ||
34 | + | ||
35 | + /** | ||
36 | + * Creates a new database.<p> | ||
37 | + * | ||
38 | + * The database will be constructed with an database configuration that searches the classpath for | ||
39 | + * three configuration files - {@code {name}}, {@code database}, {@code database-defaults}, {@code resource}, and | ||
40 | + * {@code resource-defaults} - in that order. The first resource is a configuration resource with the same name | ||
41 | + * as the database resource. If the resource is namespaced - e.g. `databases.my-database.conf` - then resource | ||
42 | + * configurations will be loaded according to namespaces as well; for example, `databases.conf`. | ||
43 | + * | ||
44 | + * @param name The database name. | ||
45 | + * @param cluster The cluster configuration. | ||
46 | + * @return The database. | ||
47 | + */ | ||
48 | + static Database create(String name, ClusterConfig cluster) { | ||
49 | + return create(name, cluster, new DatabaseConfig()); | ||
50 | + } | ||
51 | + | ||
52 | + /** | ||
53 | + * Creates a new database. | ||
54 | + * | ||
55 | + * @param name The database name. | ||
56 | + * @param cluster The cluster configuration. | ||
57 | + * @param config The database configuration. | ||
58 | + | ||
59 | + * @return The database. | ||
60 | + */ | ||
61 | + static Database create(String name, ClusterConfig cluster, DatabaseConfig config) { | ||
62 | + ClusterCoordinator coordinator = | ||
63 | + new DefaultClusterCoordinator(new CoordinatorConfig().withName(name).withClusterConfig(cluster)); | ||
64 | + return coordinator.<Database>getResource(name, config.resolve(cluster)) | ||
65 | + .addStartupTask(() -> coordinator.open().thenApply(v -> null)) | ||
66 | + .addShutdownTask(coordinator::close); | ||
67 | + } | ||
68 | + | ||
69 | +} |
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import com.typesafe.config.ConfigValueFactory; | ||
4 | +import net.kuujo.copycat.cluster.ClusterConfig; | ||
5 | +import net.kuujo.copycat.cluster.internal.coordinator.CoordinatedResourceConfig; | ||
6 | +import net.kuujo.copycat.protocol.Consistency; | ||
7 | +import net.kuujo.copycat.resource.ResourceConfig; | ||
8 | +import net.kuujo.copycat.state.StateLogConfig; | ||
9 | +import net.kuujo.copycat.util.internal.Assert; | ||
10 | + | ||
11 | +import java.util.Map; | ||
12 | + | ||
13 | +/** | ||
14 | + * Database configuration. | ||
15 | + * | ||
16 | + */ | ||
17 | +public class DatabaseConfig extends ResourceConfig<DatabaseConfig> { | ||
18 | + private static final String DATABASE_CONSISTENCY = "consistency"; | ||
19 | + | ||
20 | + private static final String DEFAULT_CONFIGURATION = "database-defaults"; | ||
21 | + private static final String CONFIGURATION = "database"; | ||
22 | + | ||
23 | + public DatabaseConfig() { | ||
24 | + super(CONFIGURATION, DEFAULT_CONFIGURATION); | ||
25 | + } | ||
26 | + | ||
27 | + public DatabaseConfig(Map<String, Object> config) { | ||
28 | + super(config, CONFIGURATION, DEFAULT_CONFIGURATION); | ||
29 | + } | ||
30 | + | ||
31 | + public DatabaseConfig(String resource) { | ||
32 | + super(resource, CONFIGURATION, DEFAULT_CONFIGURATION); | ||
33 | + } | ||
34 | + | ||
35 | + protected DatabaseConfig(DatabaseConfig config) { | ||
36 | + super(config); | ||
37 | + } | ||
38 | + | ||
39 | + @Override | ||
40 | + public DatabaseConfig copy() { | ||
41 | + return new DatabaseConfig(this); | ||
42 | + } | ||
43 | + | ||
44 | + /** | ||
45 | + * Sets the database read consistency. | ||
46 | + * | ||
47 | + * @param consistency The database read consistency. | ||
48 | + * @throws java.lang.NullPointerException If the consistency is {@code null} | ||
49 | + */ | ||
50 | + public void setConsistency(String consistency) { | ||
51 | + this.config = config.withValue(DATABASE_CONSISTENCY, | ||
52 | + ConfigValueFactory.fromAnyRef( | ||
53 | + Consistency.parse(Assert.isNotNull(consistency, "consistency")).toString())); | ||
54 | + } | ||
55 | + | ||
56 | + /** | ||
57 | + * Sets the database read consistency. | ||
58 | + * | ||
59 | + * @param consistency The database read consistency. | ||
60 | + * @throws java.lang.NullPointerException If the consistency is {@code null} | ||
61 | + */ | ||
62 | + public void setConsistency(Consistency consistency) { | ||
63 | + this.config = config.withValue(DATABASE_CONSISTENCY, | ||
64 | + ConfigValueFactory.fromAnyRef( | ||
65 | + Assert.isNotNull(consistency, "consistency").toString())); | ||
66 | + } | ||
67 | + | ||
68 | + /** | ||
69 | + * Returns the database read consistency. | ||
70 | + * | ||
71 | + * @return The database read consistency. | ||
72 | + */ | ||
73 | + public Consistency getConsistency() { | ||
74 | + return Consistency.parse(config.getString(DATABASE_CONSISTENCY)); | ||
75 | + } | ||
76 | + | ||
77 | + /** | ||
78 | + * Sets the database read consistency, returning the configuration for method chaining. | ||
79 | + * | ||
80 | + * @param consistency The database read consistency. | ||
81 | + * @return The database configuration. | ||
82 | + * @throws java.lang.NullPointerException If the consistency is {@code null} | ||
83 | + */ | ||
84 | + public DatabaseConfig withConsistency(String consistency) { | ||
85 | + setConsistency(consistency); | ||
86 | + return this; | ||
87 | + } | ||
88 | + | ||
89 | + /** | ||
90 | + * Sets the database read consistency, returning the configuration for method chaining. | ||
91 | + * | ||
92 | + * @param consistency The database read consistency. | ||
93 | + * @return The database configuration. | ||
94 | + * @throws java.lang.NullPointerException If the consistency is {@code null} | ||
95 | + */ | ||
96 | + public DatabaseConfig withConsistency(Consistency consistency) { | ||
97 | + setConsistency(consistency); | ||
98 | + return this; | ||
99 | + } | ||
100 | + | ||
101 | + @Override | ||
102 | + public CoordinatedResourceConfig resolve(ClusterConfig cluster) { | ||
103 | + return new StateLogConfig(toMap()) | ||
104 | + .resolve(cluster) | ||
105 | + .withResourceType(DefaultDatabase.class); | ||
106 | + } | ||
107 | + | ||
108 | +} |
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import java.util.Collection; | ||
4 | +import java.util.List; | ||
5 | +import java.util.Map; | ||
6 | +import java.util.Set; | ||
7 | +import java.util.concurrent.CompletableFuture; | ||
8 | + | ||
9 | +/** | ||
10 | + * Database proxy. | ||
11 | + */ | ||
12 | +public interface DatabaseProxy<K, V> { | ||
13 | + | ||
14 | + /** | ||
15 | + * Gets the table size. | ||
16 | + * | ||
17 | + * @param tableName table name | ||
18 | + * @return A completable future to be completed with the result once complete. | ||
19 | + */ | ||
20 | + CompletableFuture<Integer> size(String tableName); | ||
21 | + | ||
22 | + /** | ||
23 | + * Checks whether the table is empty. | ||
24 | + * | ||
25 | + * @param tableName table name | ||
26 | + * @return A completable future to be completed with the result once complete. | ||
27 | + */ | ||
28 | + CompletableFuture<Boolean> isEmpty(String tableName); | ||
29 | + | ||
30 | + /** | ||
31 | + * Checks whether the table contains a key. | ||
32 | + * | ||
33 | + * @param tableName table name | ||
34 | + * @param key The key to check. | ||
35 | + * @return A completable future to be completed with the result once complete. | ||
36 | + */ | ||
37 | + CompletableFuture<Boolean> containsKey(String tableName, K key); | ||
38 | + | ||
39 | + /** | ||
40 | + * Checks whether the table contains a value. | ||
41 | + * | ||
42 | + * @param tableName table name | ||
43 | + * @param value The value to check. | ||
44 | + * @return A completable future to be completed with the result once complete. | ||
45 | + */ | ||
46 | + CompletableFuture<Boolean> containsValue(String tableName, V value); | ||
47 | + | ||
48 | + /** | ||
49 | + * Gets a value from the table. | ||
50 | + * | ||
51 | + * @param tableName table name | ||
52 | + * @param key The key to get. | ||
53 | + * @return A completable future to be completed with the result once complete. | ||
54 | + */ | ||
55 | + CompletableFuture<Versioned<V>> get(String tableName, K key); | ||
56 | + | ||
57 | + /** | ||
58 | + * Puts a value in the table. | ||
59 | + * | ||
60 | + * @param tableName table name | ||
61 | + * @param key The key to set. | ||
62 | + * @param value The value to set. | ||
63 | + * @return A completable future to be completed with the result once complete. | ||
64 | + */ | ||
65 | + CompletableFuture<Versioned<V>> put(String tableName, K key, V value); | ||
66 | + | ||
67 | + /** | ||
68 | + * Removes a value from the table. | ||
69 | + * | ||
70 | + * @param tableName table name | ||
71 | + * @param key The key to remove. | ||
72 | + * @return A completable future to be completed with the result once complete. | ||
73 | + */ | ||
74 | + CompletableFuture<Versioned<V>> remove(String tableName, K key); | ||
75 | + | ||
76 | + /** | ||
77 | + * Clears the table. | ||
78 | + * | ||
79 | + * @param tableName table name | ||
80 | + * @return A completable future to be completed with the result once complete. | ||
81 | + */ | ||
82 | + CompletableFuture<Void> clear(String tableName); | ||
83 | + | ||
84 | + /** | ||
85 | + * Gets a set of keys in the table. | ||
86 | + * | ||
87 | + * @param tableName table name | ||
88 | + * @return A completable future to be completed with the result once complete. | ||
89 | + */ | ||
90 | + CompletableFuture<Set<K>> keySet(String tableName); | ||
91 | + | ||
92 | + /** | ||
93 | + * Gets a collection of values in the table. | ||
94 | + * | ||
95 | + * @param tableName table name | ||
96 | + * @return A completable future to be completed with the result once complete. | ||
97 | + */ | ||
98 | + CompletableFuture<Collection<Versioned<V>>> values(String tableName); | ||
99 | + | ||
100 | + /** | ||
101 | + * Gets a set of entries in the table. | ||
102 | + * | ||
103 | + * @param tableName table name | ||
104 | + * @return A completable future to be completed with the result once complete. | ||
105 | + */ | ||
106 | + CompletableFuture<Set<Map.Entry<K, Versioned<V>>>> entrySet(String tableName); | ||
107 | + | ||
108 | + /** | ||
109 | + * Puts a value in the table if the given key does not exist. | ||
110 | + * | ||
111 | + * @param tableName table name | ||
112 | + * @param key The key to set. | ||
113 | + * @param value The value to set if the given key does not exist. | ||
114 | + * @return A completable future to be completed with the result once complete. | ||
115 | + */ | ||
116 | + CompletableFuture<Versioned<V>> putIfAbsent(String tableName, K key, V value); | ||
117 | + | ||
118 | + /** | ||
119 | + * Removes a key and if the existing value for that key matches the specified value. | ||
120 | + * | ||
121 | + * @param tableName table name | ||
122 | + * @param key The key to remove. | ||
123 | + * @param value The value to remove. | ||
124 | + * @return A completable future to be completed with the result once complete. | ||
125 | + */ | ||
126 | + CompletableFuture<Boolean> remove(String tableName, K key, V value); | ||
127 | + | ||
128 | + /** | ||
129 | + * Removes a key and if the existing version for that key matches the specified version. | ||
130 | + * | ||
131 | + * @param tableName table name | ||
132 | + * @param key The key to remove. | ||
133 | + * @param version The expected version. | ||
134 | + * @return A completable future to be completed with the result once complete. | ||
135 | + */ | ||
136 | + CompletableFuture<Boolean> remove(String tableName, K key, long version); | ||
137 | + | ||
138 | + /** | ||
139 | + * Replaces the entry for the specified key only if currently mapped to the specified value. | ||
140 | + * | ||
141 | + * @param tableName table name | ||
142 | + * @param key The key to replace. | ||
143 | + * @param oldValue The value to replace. | ||
144 | + * @param newValue The value with which to replace the given key and value. | ||
145 | + * @return A completable future to be completed with the result once complete. | ||
146 | + */ | ||
147 | + CompletableFuture<Boolean> replace(String tableName, K key, V oldValue, V newValue); | ||
148 | + | ||
149 | + /** | ||
150 | + * Replaces the entry for the specified key only if currently mapped to the specified version. | ||
151 | + * | ||
152 | + * @param tableName table name | ||
153 | + * @param key The key to update | ||
154 | + * @param oldVersion existing version in the map for this replace to succeed. | ||
155 | + * @param newValue The value with which to replace the given key and version. | ||
156 | + * @return A completable future to be completed with the result once complete. | ||
157 | + */ | ||
158 | + CompletableFuture<Boolean> replace(String tableName, K key, long oldVersion, V newValue); | ||
159 | + | ||
160 | + /** | ||
161 | + * Perform a atomic batch update operation i.e. either all operations in batch succeed or | ||
162 | + * none do and no state changes are made. | ||
163 | + * | ||
164 | + * @param updates list of updates to apply atomically. | ||
165 | + * @return A completable future to be completed with the result once complete. | ||
166 | + */ | ||
167 | + CompletableFuture<Boolean> atomicBatchUpdate(List<UpdateOperation<K, V>> updates); | ||
168 | +} |
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import java.util.Collection; | ||
4 | +import java.util.List; | ||
5 | +import java.util.Map.Entry; | ||
6 | +import java.util.Set; | ||
7 | + | ||
8 | +import net.kuujo.copycat.state.Command; | ||
9 | +import net.kuujo.copycat.state.Initializer; | ||
10 | +import net.kuujo.copycat.state.Query; | ||
11 | +import net.kuujo.copycat.state.StateContext; | ||
12 | + | ||
13 | +/** | ||
14 | + * Database state. | ||
15 | + * | ||
16 | + */ | ||
17 | +public interface DatabaseState<K, V> { | ||
18 | + | ||
19 | + /** | ||
20 | + * Initializes the database state. | ||
21 | + * | ||
22 | + * @param context The map state context. | ||
23 | + */ | ||
24 | + @Initializer | ||
25 | + public void init(StateContext<DatabaseState<K, V>> context); | ||
26 | + | ||
27 | + @Query | ||
28 | + int size(String tableName); | ||
29 | + | ||
30 | + @Query | ||
31 | + boolean isEmpty(String tableName); | ||
32 | + | ||
33 | + @Query | ||
34 | + boolean containsKey(String tableName, K key); | ||
35 | + | ||
36 | + @Query | ||
37 | + boolean containsValue(String tableName, V value); | ||
38 | + | ||
39 | + @Query | ||
40 | + Versioned<V> get(String tableName, K key); | ||
41 | + | ||
42 | + @Command | ||
43 | + Versioned<V> put(String tableName, K key, V value); | ||
44 | + | ||
45 | + @Command | ||
46 | + Versioned<V> remove(String tableName, K key); | ||
47 | + | ||
48 | + @Command | ||
49 | + void clear(String tableName); | ||
50 | + | ||
51 | + @Query | ||
52 | + Set<K> keySet(String tableName); | ||
53 | + | ||
54 | + @Query | ||
55 | + Collection<Versioned<V>> values(String tableName); | ||
56 | + | ||
57 | + @Query | ||
58 | + Set<Entry<K, Versioned<V>>> entrySet(String tableName); | ||
59 | + | ||
60 | + @Command | ||
61 | + Versioned<V> putIfAbsent(String tableName, K key, V value); | ||
62 | + | ||
63 | + @Command | ||
64 | + boolean remove(String tableName, K key, V value); | ||
65 | + | ||
66 | + @Command | ||
67 | + boolean remove(String tableName, K key, long version); | ||
68 | + | ||
69 | + @Command | ||
70 | + boolean replace(String tableName, K key, V oldValue, V newValue); | ||
71 | + | ||
72 | + @Command | ||
73 | + boolean replace(String tableName, K key, long oldVersion, V newValue); | ||
74 | + | ||
75 | + @Command | ||
76 | + boolean batchUpdate(List<UpdateOperation<K, V>> updates); | ||
77 | +} |
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import net.kuujo.copycat.resource.internal.ResourceContext; | ||
4 | +import net.kuujo.copycat.state.StateMachine; | ||
5 | +import net.kuujo.copycat.resource.internal.AbstractResource; | ||
6 | +import net.kuujo.copycat.state.internal.DefaultStateMachine; | ||
7 | +import net.kuujo.copycat.util.concurrent.Futures; | ||
8 | + | ||
9 | +import java.util.Collection; | ||
10 | +import java.util.List; | ||
11 | +import java.util.Map; | ||
12 | +import java.util.Set; | ||
13 | +import java.util.concurrent.CompletableFuture; | ||
14 | +import java.util.function.Supplier; | ||
15 | + | ||
16 | +/** | ||
17 | + * Default database. | ||
18 | + */ | ||
19 | +public class DefaultDatabase extends AbstractResource<Database> implements Database { | ||
20 | + private final StateMachine<DatabaseState<String, byte[]>> stateMachine; | ||
21 | + private DatabaseProxy<String, byte[]> proxy; | ||
22 | + | ||
23 | + @SuppressWarnings("unchecked") | ||
24 | + public DefaultDatabase(ResourceContext context) { | ||
25 | + super(context); | ||
26 | + this.stateMachine = new DefaultStateMachine(context, DatabaseState.class, DefaultDatabaseState.class); | ||
27 | + } | ||
28 | + | ||
29 | + /** | ||
30 | + * If the database is closed, returning a failed CompletableFuture. Otherwise, calls the given supplier to | ||
31 | + * return the completed future result. | ||
32 | + * | ||
33 | + * @param supplier The supplier to call if the database is open. | ||
34 | + * @param <T> The future result type. | ||
35 | + * @return A completable future that if this database is closed is immediately failed. | ||
36 | + */ | ||
37 | + protected <T> CompletableFuture<T> checkOpen(Supplier<CompletableFuture<T>> supplier) { | ||
38 | + if (proxy == null) { | ||
39 | + return Futures.exceptionalFuture(new IllegalStateException("Database closed")); | ||
40 | + } | ||
41 | + return supplier.get(); | ||
42 | + } | ||
43 | + | ||
44 | + @Override | ||
45 | + public CompletableFuture<Integer> size(String tableName) { | ||
46 | + return checkOpen(() -> proxy.size(tableName)); | ||
47 | + } | ||
48 | + | ||
49 | + @Override | ||
50 | + public CompletableFuture<Boolean> isEmpty(String tableName) { | ||
51 | + return checkOpen(() -> proxy.isEmpty(tableName)); | ||
52 | + } | ||
53 | + | ||
54 | + @Override | ||
55 | + public CompletableFuture<Boolean> containsKey(String tableName, String key) { | ||
56 | + return checkOpen(() -> proxy.containsKey(tableName, key)); | ||
57 | + } | ||
58 | + | ||
59 | + @Override | ||
60 | + public CompletableFuture<Boolean> containsValue(String tableName, byte[] value) { | ||
61 | + return checkOpen(() -> proxy.containsValue(tableName, value)); | ||
62 | + } | ||
63 | + | ||
64 | + @Override | ||
65 | + public CompletableFuture<Versioned<byte[]>> get(String tableName, String key) { | ||
66 | + return checkOpen(() -> proxy.get(tableName, key)); | ||
67 | + } | ||
68 | + | ||
69 | + @Override | ||
70 | + public CompletableFuture<Versioned<byte[]>> put(String tableName, String key, byte[] value) { | ||
71 | + return checkOpen(() -> proxy.put(tableName, key, value)); | ||
72 | + } | ||
73 | + | ||
74 | + @Override | ||
75 | + public CompletableFuture<Versioned<byte[]>> remove(String tableName, String key) { | ||
76 | + return checkOpen(() -> proxy.remove(tableName, key)); | ||
77 | + } | ||
78 | + | ||
79 | + @Override | ||
80 | + public CompletableFuture<Void> clear(String tableName) { | ||
81 | + return checkOpen(() -> proxy.clear(tableName)); | ||
82 | + } | ||
83 | + | ||
84 | + @Override | ||
85 | + public CompletableFuture<Set<String>> keySet(String tableName) { | ||
86 | + return checkOpen(() -> proxy.keySet(tableName)); | ||
87 | + } | ||
88 | + | ||
89 | + @Override | ||
90 | + public CompletableFuture<Collection<Versioned<byte[]>>> values(String tableName) { | ||
91 | + return checkOpen(() -> proxy.values(tableName)); | ||
92 | + } | ||
93 | + | ||
94 | + @Override | ||
95 | + public CompletableFuture<Set<Map.Entry<String, Versioned<byte[]>>>> entrySet(String tableName) { | ||
96 | + return checkOpen(() -> proxy.entrySet(tableName)); | ||
97 | + } | ||
98 | + | ||
99 | + @Override | ||
100 | + public CompletableFuture<Versioned<byte[]>> putIfAbsent(String tableName, String key, byte[] value) { | ||
101 | + return checkOpen(() -> proxy.putIfAbsent(tableName, key, value)); | ||
102 | + } | ||
103 | + | ||
104 | + @Override | ||
105 | + public CompletableFuture<Boolean> remove(String tableName, String key, byte[] value) { | ||
106 | + return checkOpen(() -> proxy.remove(tableName, key, value)); | ||
107 | + } | ||
108 | + | ||
109 | + @Override | ||
110 | + public CompletableFuture<Boolean> remove(String tableName, String key, long version) { | ||
111 | + return checkOpen(() -> proxy.remove(tableName, key, version)); | ||
112 | + } | ||
113 | + | ||
114 | + @Override | ||
115 | + public CompletableFuture<Boolean> replace(String tableName, String key, byte[] oldValue, byte[] newValue) { | ||
116 | + return checkOpen(() -> proxy.replace(tableName, key, oldValue, newValue)); | ||
117 | + } | ||
118 | + | ||
119 | + @Override | ||
120 | + public CompletableFuture<Boolean> replace(String tableName, String key, long oldVersion, byte[] newValue) { | ||
121 | + return checkOpen(() -> proxy.replace(tableName, key, oldVersion, newValue)); | ||
122 | + } | ||
123 | + | ||
124 | + @Override | ||
125 | + public CompletableFuture<Boolean> atomicBatchUpdate(List<UpdateOperation<String, byte[]>> updates) { | ||
126 | + return checkOpen(() -> proxy.atomicBatchUpdate(updates)); | ||
127 | + } | ||
128 | + | ||
129 | + @Override | ||
130 | + @SuppressWarnings("unchecked") | ||
131 | + public synchronized CompletableFuture<Database> open() { | ||
132 | + return runStartupTasks() | ||
133 | + .thenCompose(v -> stateMachine.open()) | ||
134 | + .thenRun(() -> { | ||
135 | + this.proxy = stateMachine.createProxy(DatabaseProxy.class); | ||
136 | + }) | ||
137 | + .thenApply(v -> null); | ||
138 | + } | ||
139 | + | ||
140 | + @Override | ||
141 | + public synchronized CompletableFuture<Void> close() { | ||
142 | + proxy = null; | ||
143 | + return stateMachine.close() | ||
144 | + .thenCompose(v -> runShutdownTasks()); | ||
145 | + } | ||
146 | +} |
core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DefaultDatabaseState.java
0 → 100644
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import java.util.Arrays; | ||
4 | +import java.util.Collection; | ||
5 | +import java.util.HashMap; | ||
6 | +import java.util.List; | ||
7 | +import java.util.Map; | ||
8 | +import java.util.Map.Entry; | ||
9 | +import java.util.Set; | ||
10 | + | ||
11 | +import net.kuujo.copycat.state.Initializer; | ||
12 | +import net.kuujo.copycat.state.StateContext; | ||
13 | + | ||
14 | +/** | ||
15 | + * Default database state. | ||
16 | + * | ||
17 | + * @param <K> key type | ||
18 | + * @param <V> value type | ||
19 | + */ | ||
20 | +public class DefaultDatabaseState<K, V> implements DatabaseState<K, V> { | ||
21 | + | ||
22 | + private Long nextVersion; | ||
23 | + private Map<String, Map<K, Versioned<V>>> tables; | ||
24 | + | ||
25 | + @Initializer | ||
26 | + @Override | ||
27 | + public void init(StateContext<DatabaseState<K, V>> context) { | ||
28 | + tables = context.get("tables"); | ||
29 | + if (tables == null) { | ||
30 | + tables = new HashMap<>(); | ||
31 | + context.put("tables", tables); | ||
32 | + } | ||
33 | + nextVersion = context.get("nextVersion"); | ||
34 | + if (nextVersion == null) { | ||
35 | + nextVersion = new Long(0); | ||
36 | + context.put("nextVersion", nextVersion); | ||
37 | + } | ||
38 | + } | ||
39 | + | ||
40 | + private Map<K, Versioned<V>> getTableMap(String tableName) { | ||
41 | + Map<K, Versioned<V>> table = tables.get(tableName); | ||
42 | + if (table == null) { | ||
43 | + table = new HashMap<>(); | ||
44 | + tables.put(tableName, table); | ||
45 | + } | ||
46 | + return table; | ||
47 | + } | ||
48 | + | ||
49 | + @Override | ||
50 | + public int size(String tableName) { | ||
51 | + return getTableMap(tableName).size(); | ||
52 | + } | ||
53 | + | ||
54 | + @Override | ||
55 | + public boolean isEmpty(String tableName) { | ||
56 | + return getTableMap(tableName).isEmpty(); | ||
57 | + } | ||
58 | + | ||
59 | + @Override | ||
60 | + public boolean containsKey(String tableName, K key) { | ||
61 | + return getTableMap(tableName).containsKey(key); | ||
62 | + } | ||
63 | + | ||
64 | + @Override | ||
65 | + public boolean containsValue(String tableName, V value) { | ||
66 | + return getTableMap(tableName).values().stream().anyMatch(v -> checkEquality(v.value(), value)); | ||
67 | + } | ||
68 | + | ||
69 | + @Override | ||
70 | + public Versioned<V> get(String tableName, K key) { | ||
71 | + return getTableMap(tableName).get(key); | ||
72 | + } | ||
73 | + | ||
74 | + @Override | ||
75 | + public Versioned<V> put(String tableName, K key, V value) { | ||
76 | + return getTableMap(tableName).put(key, new Versioned<>(value, ++nextVersion)); | ||
77 | + } | ||
78 | + | ||
79 | + @Override | ||
80 | + public Versioned<V> remove(String tableName, K key) { | ||
81 | + return getTableMap(tableName).remove(key); | ||
82 | + } | ||
83 | + | ||
84 | + @Override | ||
85 | + public void clear(String tableName) { | ||
86 | + getTableMap(tableName).clear(); | ||
87 | + } | ||
88 | + | ||
89 | + @Override | ||
90 | + public Set<K> keySet(String tableName) { | ||
91 | + return getTableMap(tableName).keySet(); | ||
92 | + } | ||
93 | + | ||
94 | + @Override | ||
95 | + public Collection<Versioned<V>> values(String tableName) { | ||
96 | + return getTableMap(tableName).values(); | ||
97 | + } | ||
98 | + | ||
99 | + @Override | ||
100 | + public Set<Entry<K, Versioned<V>>> entrySet(String tableName) { | ||
101 | + return getTableMap(tableName).entrySet(); | ||
102 | + } | ||
103 | + | ||
104 | + @Override | ||
105 | + public Versioned<V> putIfAbsent(String tableName, K key, V value) { | ||
106 | + Versioned<V> existingValue = getTableMap(tableName).get(key); | ||
107 | + return existingValue != null ? existingValue : put(tableName, key, value); | ||
108 | + } | ||
109 | + | ||
110 | + @Override | ||
111 | + public boolean remove(String tableName, K key, V value) { | ||
112 | + Versioned<V> existing = getTableMap(tableName).get(key); | ||
113 | + if (existing != null && existing.value().equals(value)) { | ||
114 | + getTableMap(tableName).remove(key); | ||
115 | + return true; | ||
116 | + } | ||
117 | + return false; | ||
118 | + } | ||
119 | + | ||
120 | + @Override | ||
121 | + public boolean remove(String tableName, K key, long version) { | ||
122 | + Versioned<V> existing = getTableMap(tableName).get(key); | ||
123 | + if (existing != null && existing.version() == version) { | ||
124 | + remove(tableName, key); | ||
125 | + return true; | ||
126 | + } | ||
127 | + return false; | ||
128 | + } | ||
129 | + | ||
130 | + @Override | ||
131 | + public boolean replace(String tableName, K key, V oldValue, V newValue) { | ||
132 | + Versioned<V> existing = getTableMap(tableName).get(key); | ||
133 | + if (existing != null && existing.value().equals(oldValue)) { | ||
134 | + put(tableName, key, newValue); | ||
135 | + return true; | ||
136 | + } | ||
137 | + return false; | ||
138 | + } | ||
139 | + | ||
140 | + @Override | ||
141 | + public boolean replace(String tableName, K key, long oldVersion, V newValue) { | ||
142 | + Versioned<V> existing = getTableMap(tableName).get(key); | ||
143 | + if (existing != null && existing.version() == oldVersion) { | ||
144 | + put(tableName, key, newValue); | ||
145 | + return true; | ||
146 | + } | ||
147 | + return false; | ||
148 | + } | ||
149 | + | ||
150 | + @Override | ||
151 | + public boolean batchUpdate(List<UpdateOperation<K, V>> updates) { | ||
152 | + if (updates.stream().anyMatch(update -> !checkIfUpdateIsPossible(update))) { | ||
153 | + return false; | ||
154 | + } else { | ||
155 | + updates.stream().forEach(this::doUpdate); | ||
156 | + return true; | ||
157 | + } | ||
158 | + } | ||
159 | + | ||
160 | + private void doUpdate(UpdateOperation<K, V> update) { | ||
161 | + String tableName = update.tableName(); | ||
162 | + K key = update.key(); | ||
163 | + switch (update.type()) { | ||
164 | + case PUT: | ||
165 | + put(tableName, key, update.value()); | ||
166 | + return; | ||
167 | + case REMOVE: | ||
168 | + remove(tableName, key); | ||
169 | + return; | ||
170 | + case PUT_IF_ABSENT: | ||
171 | + putIfAbsent(tableName, key, update.value()); | ||
172 | + return; | ||
173 | + case PUT_IF_VERSION_MATCH: | ||
174 | + replace(tableName, key, update.currentValue(), update.value()); | ||
175 | + return; | ||
176 | + case PUT_IF_VALUE_MATCH: | ||
177 | + replace(tableName, key, update.currentVersion(), update.value()); | ||
178 | + return; | ||
179 | + case REMOVE_IF_VERSION_MATCH: | ||
180 | + remove(tableName, key, update.currentVersion()); | ||
181 | + return; | ||
182 | + case REMOVE_IF_VALUE_MATCH: | ||
183 | + remove(tableName, key, update.currentValue()); | ||
184 | + return; | ||
185 | + default: | ||
186 | + throw new IllegalStateException("Unsupported type: " + update.type()); | ||
187 | + } | ||
188 | + } | ||
189 | + | ||
190 | + private boolean checkIfUpdateIsPossible(UpdateOperation<K, V> update) { | ||
191 | + Versioned<V> existingEntry = get(update.tableName(), update.key()); | ||
192 | + switch (update.type()) { | ||
193 | + case PUT: | ||
194 | + case REMOVE: | ||
195 | + return true; | ||
196 | + case PUT_IF_ABSENT: | ||
197 | + return existingEntry == null; | ||
198 | + case PUT_IF_VERSION_MATCH: | ||
199 | + return existingEntry != null && existingEntry.version() == update.currentVersion(); | ||
200 | + case PUT_IF_VALUE_MATCH: | ||
201 | + return existingEntry != null && existingEntry.value().equals(update.currentValue()); | ||
202 | + case REMOVE_IF_VERSION_MATCH: | ||
203 | + return existingEntry == null || existingEntry.version() == update.currentVersion(); | ||
204 | + case REMOVE_IF_VALUE_MATCH: | ||
205 | + return existingEntry == null || existingEntry.value().equals(update.currentValue()); | ||
206 | + default: | ||
207 | + throw new IllegalStateException("Unsupported type: " + update.type()); | ||
208 | + } | ||
209 | + } | ||
210 | + | ||
211 | + private boolean checkEquality(V value1, V value2) { | ||
212 | + if (value1 instanceof byte[]) { | ||
213 | + return Arrays.equals((byte[]) value1, (byte[]) value2); | ||
214 | + } | ||
215 | + return value1.equals(value2); | ||
216 | + } | ||
217 | +} |
core/store/dist/src/main/java/org/onosproject/store/consistent/impl/PartitionedDatabase.java
0 → 100644
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import java.util.Collection; | ||
4 | +import java.util.List; | ||
5 | +import java.util.Map; | ||
6 | +import java.util.Map.Entry; | ||
7 | +import java.util.Set; | ||
8 | +import java.util.concurrent.CompletableFuture; | ||
9 | +import java.util.concurrent.CopyOnWriteArrayList; | ||
10 | +import java.util.concurrent.atomic.AtomicBoolean; | ||
11 | +import java.util.concurrent.atomic.AtomicInteger; | ||
12 | +import java.util.stream.Collectors; | ||
13 | + | ||
14 | +import com.google.common.collect.ImmutableMap; | ||
15 | +import com.google.common.collect.Lists; | ||
16 | +import com.google.common.collect.Maps; | ||
17 | +import com.google.common.collect.Sets; | ||
18 | + | ||
19 | +import net.kuujo.copycat.cluster.internal.coordinator.ClusterCoordinator; | ||
20 | + | ||
21 | +/** | ||
22 | + * A database that partitions the keys across one or more database partitions. | ||
23 | + */ | ||
24 | +public class PartitionedDatabase implements DatabaseProxy<String, byte[]>, PartitionedDatabaseManager { | ||
25 | + | ||
26 | + private Partitioner<String> partitioner; | ||
27 | + private final ClusterCoordinator coordinator; | ||
28 | + private final Map<String, Database> partitions = Maps.newConcurrentMap(); | ||
29 | + | ||
30 | + protected PartitionedDatabase(ClusterCoordinator coordinator) { | ||
31 | + this.coordinator = coordinator; | ||
32 | + } | ||
33 | + | ||
34 | + @Override | ||
35 | + public void registerPartition(String name, Database partition) { | ||
36 | + partitions.put(name, partition); | ||
37 | + } | ||
38 | + | ||
39 | + @Override | ||
40 | + public Map<String, Database> getRegisteredPartitions() { | ||
41 | + return ImmutableMap.copyOf(partitions); | ||
42 | + } | ||
43 | + | ||
44 | + @Override | ||
45 | + public CompletableFuture<Integer> size(String tableName) { | ||
46 | + AtomicInteger totalSize = new AtomicInteger(0); | ||
47 | + return CompletableFuture.allOf(partitions | ||
48 | + .values() | ||
49 | + .stream() | ||
50 | + .map(p -> p.size(tableName).thenApply(totalSize::addAndGet)) | ||
51 | + .toArray(CompletableFuture[]::new)) | ||
52 | + .thenApply(v -> totalSize.get()); | ||
53 | + } | ||
54 | + | ||
55 | + @Override | ||
56 | + public CompletableFuture<Boolean> isEmpty(String tableName) { | ||
57 | + return size(tableName).thenApply(size -> size == 0); | ||
58 | + } | ||
59 | + | ||
60 | + @Override | ||
61 | + public CompletableFuture<Boolean> containsKey(String tableName, String key) { | ||
62 | + return partitioner.getPartition(tableName, key).containsKey(tableName, key); | ||
63 | + } | ||
64 | + | ||
65 | + @Override | ||
66 | + public CompletableFuture<Boolean> containsValue(String tableName, byte[] value) { | ||
67 | + AtomicBoolean containsValue = new AtomicBoolean(false); | ||
68 | + return CompletableFuture.allOf(partitions | ||
69 | + .values() | ||
70 | + .stream() | ||
71 | + .map(p -> p.containsValue(tableName, value).thenApply(v -> containsValue.compareAndSet(false, v))) | ||
72 | + .toArray(CompletableFuture[]::new)) | ||
73 | + .thenApply(v -> containsValue.get()); | ||
74 | + } | ||
75 | + | ||
76 | + @Override | ||
77 | + public CompletableFuture<Versioned<byte[]>> get(String tableName, String key) { | ||
78 | + return partitioner.getPartition(tableName, key).get(tableName, key); | ||
79 | + } | ||
80 | + | ||
81 | + @Override | ||
82 | + public CompletableFuture<Versioned<byte[]>> put(String tableName, String key, byte[] value) { | ||
83 | + return partitioner.getPartition(tableName, key).put(tableName, key, value); | ||
84 | + } | ||
85 | + | ||
86 | + @Override | ||
87 | + public CompletableFuture<Versioned<byte[]>> remove(String tableName, String key) { | ||
88 | + return partitioner.getPartition(tableName, key).remove(tableName, key); | ||
89 | + } | ||
90 | + | ||
91 | + @Override | ||
92 | + public CompletableFuture<Void> clear(String tableName) { | ||
93 | + return CompletableFuture.allOf(partitions | ||
94 | + .values() | ||
95 | + .stream() | ||
96 | + .map(p -> p.clear(tableName)) | ||
97 | + .toArray(CompletableFuture[]::new)); | ||
98 | + } | ||
99 | + | ||
100 | + @Override | ||
101 | + public CompletableFuture<Set<String>> keySet(String tableName) { | ||
102 | + Set<String> keySet = Sets.newConcurrentHashSet(); | ||
103 | + return CompletableFuture.allOf(partitions | ||
104 | + .values() | ||
105 | + .stream() | ||
106 | + .map(p -> p.keySet(tableName).thenApply(keySet::addAll)) | ||
107 | + .toArray(CompletableFuture[]::new)) | ||
108 | + .thenApply(v -> keySet); | ||
109 | + } | ||
110 | + | ||
111 | + @Override | ||
112 | + public CompletableFuture<Collection<Versioned<byte[]>>> values(String tableName) { | ||
113 | + List<Versioned<byte[]>> values = new CopyOnWriteArrayList<>(); | ||
114 | + return CompletableFuture.allOf(partitions | ||
115 | + .values() | ||
116 | + .stream() | ||
117 | + .map(p -> p.values(tableName)) | ||
118 | + .toArray(CompletableFuture[]::new)) | ||
119 | + .thenApply(v -> values); | ||
120 | + } | ||
121 | + | ||
122 | + @Override | ||
123 | + public CompletableFuture<Set<Entry<String, Versioned<byte[]>>>> entrySet(String tableName) { | ||
124 | + Set<Entry<String, Versioned<byte[]>>> entrySet = Sets.newConcurrentHashSet(); | ||
125 | + return CompletableFuture.allOf(partitions | ||
126 | + .values() | ||
127 | + .stream() | ||
128 | + .map(p -> p.entrySet(tableName).thenApply(entrySet::addAll)) | ||
129 | + .toArray(CompletableFuture[]::new)) | ||
130 | + .thenApply(v -> entrySet); | ||
131 | + } | ||
132 | + | ||
133 | + @Override | ||
134 | + public CompletableFuture<Versioned<byte[]>> putIfAbsent(String tableName, String key, byte[] value) { | ||
135 | + return partitioner.getPartition(tableName, key).putIfAbsent(tableName, key, value); | ||
136 | + } | ||
137 | + | ||
138 | + @Override | ||
139 | + public CompletableFuture<Boolean> remove(String tableName, String key, byte[] value) { | ||
140 | + return partitioner.getPartition(tableName, key).remove(tableName, key, value); | ||
141 | + } | ||
142 | + | ||
143 | + @Override | ||
144 | + public CompletableFuture<Boolean> remove(String tableName, String key, long version) { | ||
145 | + return partitioner.getPartition(tableName, key).remove(tableName, key, version); | ||
146 | + } | ||
147 | + | ||
148 | + @Override | ||
149 | + public CompletableFuture<Boolean> replace(String tableName, String key, byte[] oldValue, byte[] newValue) { | ||
150 | + return partitioner.getPartition(tableName, key).replace(tableName, key, oldValue, newValue); | ||
151 | + } | ||
152 | + | ||
153 | + @Override | ||
154 | + public CompletableFuture<Boolean> replace(String tableName, String key, long oldVersion, byte[] newValue) { | ||
155 | + return partitioner.getPartition(tableName, key).replace(tableName, key, oldVersion, newValue); | ||
156 | + } | ||
157 | + | ||
158 | + @Override | ||
159 | + public CompletableFuture<Boolean> atomicBatchUpdate(List<UpdateOperation<String, byte[]>> updates) { | ||
160 | + Map<Database, List<UpdateOperation<String, byte[]>>> perPartitionUpdates = Maps.newHashMap(); | ||
161 | + for (UpdateOperation<String, byte[]> update : updates) { | ||
162 | + Database partition = partitioner.getPartition(update.tableName(), update.key()); | ||
163 | + List<UpdateOperation<String, byte[]>> partitionUpdates = perPartitionUpdates.get(partition); | ||
164 | + if (partitionUpdates == null) { | ||
165 | + partitionUpdates = Lists.newArrayList(); | ||
166 | + perPartitionUpdates.put(partition, partitionUpdates); | ||
167 | + } | ||
168 | + partitionUpdates.add(update); | ||
169 | + } | ||
170 | + if (perPartitionUpdates.size() > 1) { | ||
171 | + // TODO | ||
172 | + throw new UnsupportedOperationException("Cross partition transactional updates are not supported."); | ||
173 | + } else { | ||
174 | + Entry<Database, List<UpdateOperation<String, byte[]>>> only = | ||
175 | + perPartitionUpdates.entrySet().iterator().next(); | ||
176 | + return only.getKey().atomicBatchUpdate(only.getValue()); | ||
177 | + } | ||
178 | + } | ||
179 | + | ||
180 | + @Override | ||
181 | + public void setPartitioner(Partitioner<String> partitioner) { | ||
182 | + this.partitioner = partitioner; | ||
183 | + } | ||
184 | + | ||
185 | + @Override | ||
186 | + public CompletableFuture<PartitionedDatabase> open() { | ||
187 | + return coordinator.open().thenCompose(c -> CompletableFuture.allOf(partitions | ||
188 | + .values() | ||
189 | + .stream() | ||
190 | + .map(Database::open) | ||
191 | + .collect(Collectors.toList()) | ||
192 | + .toArray(new CompletableFuture[partitions.size()])) | ||
193 | + .thenApply(v -> this)); | ||
194 | + | ||
195 | + } | ||
196 | + | ||
197 | + @Override | ||
198 | + public CompletableFuture<Void> close() { | ||
199 | + CompletableFuture<Void> closePartitions = CompletableFuture.allOf(partitions | ||
200 | + .values() | ||
201 | + .stream() | ||
202 | + .map(database -> database.close()) | ||
203 | + .collect(Collectors.toList()) | ||
204 | + .toArray(new CompletableFuture[partitions.size()])); | ||
205 | + CompletableFuture<Void> closeCoordinator = coordinator.close(); | ||
206 | + return closePartitions.thenCompose(v -> closeCoordinator); | ||
207 | + } | ||
208 | +} |
core/store/dist/src/main/java/org/onosproject/store/consistent/impl/PartitionedDatabaseConfig.java
0 → 100644
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import java.util.Collections; | ||
4 | +import java.util.HashMap; | ||
5 | +import java.util.Map; | ||
6 | + | ||
7 | +/** | ||
8 | + * Partitioned database configuration. | ||
9 | + */ | ||
10 | +public class PartitionedDatabaseConfig { | ||
11 | + private final Map<String, DatabaseConfig> partitions = new HashMap<>(); | ||
12 | + | ||
13 | + /** | ||
14 | + * Returns the configuration for all partitions. | ||
15 | + * @return partition map to configuartion mapping. | ||
16 | + */ | ||
17 | + public Map<String, DatabaseConfig> partitions() { | ||
18 | + return Collections.unmodifiableMap(partitions); | ||
19 | + } | ||
20 | + | ||
21 | + /** | ||
22 | + * Adds the specified partition name and configuration. | ||
23 | + * @param name partition name. | ||
24 | + * @param config partition config | ||
25 | + * @return this instance | ||
26 | + */ | ||
27 | + public PartitionedDatabaseConfig withPartition(String name, DatabaseConfig config) { | ||
28 | + partitions.put(name, config); | ||
29 | + return this; | ||
30 | + } | ||
31 | +} |
core/store/dist/src/main/java/org/onosproject/store/consistent/impl/PartitionedDatabaseManager.java
0 → 100644
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import java.util.Map; | ||
4 | +import java.util.concurrent.CompletableFuture; | ||
5 | +import java.util.concurrent.Executors; | ||
6 | + | ||
7 | +import net.kuujo.copycat.CopycatConfig; | ||
8 | +import net.kuujo.copycat.cluster.ClusterConfig; | ||
9 | +import net.kuujo.copycat.cluster.internal.coordinator.ClusterCoordinator; | ||
10 | +import net.kuujo.copycat.cluster.internal.coordinator.DefaultClusterCoordinator; | ||
11 | +import net.kuujo.copycat.util.concurrent.NamedThreadFactory; | ||
12 | + | ||
13 | +public interface PartitionedDatabaseManager { | ||
14 | + /** | ||
15 | + * Opens the database. | ||
16 | + * | ||
17 | + * @return A completable future to be completed with the result once complete. | ||
18 | + */ | ||
19 | + CompletableFuture<PartitionedDatabase> open(); | ||
20 | + | ||
21 | + /** | ||
22 | + * Closes the database. | ||
23 | + * | ||
24 | + * @return A completable future to be completed with the result once complete. | ||
25 | + */ | ||
26 | + CompletableFuture<Void> close(); | ||
27 | + | ||
28 | + /** | ||
29 | + * Sets the partitioner to use for mapping keys to partitions. | ||
30 | + * | ||
31 | + * @param partitioner partitioner | ||
32 | + */ | ||
33 | + void setPartitioner(Partitioner<String> partitioner); | ||
34 | + | ||
35 | + /** | ||
36 | + * Registers a new partition. | ||
37 | + * | ||
38 | + * @param partitionName partition name. | ||
39 | + * @param partition partition. | ||
40 | + */ | ||
41 | + void registerPartition(String partitionName, Database partition); | ||
42 | + | ||
43 | + /** | ||
44 | + * Returns all the registered database partitions. | ||
45 | + * | ||
46 | + * @return mapping of all registered database partitions. | ||
47 | + */ | ||
48 | + Map<String, Database> getRegisteredPartitions(); | ||
49 | + | ||
50 | + | ||
51 | + /** | ||
52 | + * Creates a new partitioned database. | ||
53 | + * | ||
54 | + * @param name The database name. | ||
55 | + * @param clusterConfig The cluster configuration. | ||
56 | + * @param partitionedDatabaseConfig The database configuration. | ||
57 | + | ||
58 | + * @return The database. | ||
59 | + */ | ||
60 | + public static PartitionedDatabase create( | ||
61 | + String name, | ||
62 | + ClusterConfig clusterConfig, | ||
63 | + PartitionedDatabaseConfig partitionedDatabaseConfig) { | ||
64 | + CopycatConfig copycatConfig = new CopycatConfig() | ||
65 | + .withName(name) | ||
66 | + .withClusterConfig(clusterConfig) | ||
67 | + .withDefaultExecutor(Executors.newSingleThreadExecutor(new NamedThreadFactory("copycat-coordinator-%d"))); | ||
68 | + ClusterCoordinator coordinator = new DefaultClusterCoordinator(copycatConfig.resolve()); | ||
69 | + PartitionedDatabase partitionedDatabase = new PartitionedDatabase(coordinator); | ||
70 | + partitionedDatabaseConfig.partitions().forEach((partitionName, partitionConfig) -> | ||
71 | + partitionedDatabase.registerPartition(partitionName , | ||
72 | + coordinator.getResource(partitionName, partitionConfig.resolve(clusterConfig) | ||
73 | + .withDefaultSerializer(copycatConfig.getDefaultSerializer().copy()) | ||
74 | + .withDefaultExecutor(copycatConfig.getDefaultExecutor())))); | ||
75 | + partitionedDatabase.setPartitioner( | ||
76 | + new SimpleKeyHashPartitioner<>(partitionedDatabase.getRegisteredPartitions())); | ||
77 | + return partitionedDatabase; | ||
78 | + } | ||
79 | +} |
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +/** | ||
4 | + * Partitioner is responsible for mapping keys to individual database partitions. | ||
5 | + * | ||
6 | + * @param <K> key type. | ||
7 | + */ | ||
8 | +public interface Partitioner<K> { | ||
9 | + | ||
10 | + /** | ||
11 | + * Returns the database partition. | ||
12 | + * @param tableName table name | ||
13 | + * @param key key | ||
14 | + * @return Database partition | ||
15 | + */ | ||
16 | + Database getPartition(String tableName, K key); | ||
17 | +} |
core/store/dist/src/main/java/org/onosproject/store/consistent/impl/SimpleKeyHashPartitioner.java
0 → 100644
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import java.util.Collections; | ||
4 | +import java.util.List; | ||
5 | +import java.util.Map; | ||
6 | + | ||
7 | +import com.google.common.collect.ImmutableMap; | ||
8 | +import com.google.common.collect.Lists; | ||
9 | + | ||
10 | +/** | ||
11 | + * A simple Partitioner that uses the key hashCode to map | ||
12 | + * key to a partition. | ||
13 | + * | ||
14 | + * @param <K> key type. | ||
15 | + */ | ||
16 | +public class SimpleKeyHashPartitioner<K> implements Partitioner<K> { | ||
17 | + | ||
18 | + private final Map<String, Database> partitionMap; | ||
19 | + private final List<String> sortedPartitionNames; | ||
20 | + | ||
21 | + public SimpleKeyHashPartitioner(Map<String, Database> partitionMap) { | ||
22 | + this.partitionMap = ImmutableMap.copyOf(partitionMap); | ||
23 | + sortedPartitionNames = Lists.newArrayList(this.partitionMap.keySet()); | ||
24 | + Collections.sort(sortedPartitionNames); | ||
25 | + } | ||
26 | + | ||
27 | + @Override | ||
28 | + public Database getPartition(String tableName, K key) { | ||
29 | + return partitionMap.get(sortedPartitionNames.get(Math.abs(key.hashCode()) % partitionMap.size())); | ||
30 | + } | ||
31 | +} |
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import com.google.common.base.MoreObjects; | ||
4 | + | ||
5 | +/** | ||
6 | + * Database update operation. | ||
7 | + * | ||
8 | + * @param <K> key type. | ||
9 | + * @param <V> value type. | ||
10 | + */ | ||
11 | +public class UpdateOperation<K, V> { | ||
12 | + | ||
13 | + /** | ||
14 | + * Type of database update operation. | ||
15 | + */ | ||
16 | + public static enum Type { | ||
17 | + PUT, | ||
18 | + PUT_IF_ABSENT, | ||
19 | + PUT_IF_VERSION_MATCH, | ||
20 | + PUT_IF_VALUE_MATCH, | ||
21 | + REMOVE, | ||
22 | + REMOVE_IF_VERSION_MATCH, | ||
23 | + REMOVE_IF_VALUE_MATCH, | ||
24 | + } | ||
25 | + | ||
26 | + private Type type; | ||
27 | + private String tableName; | ||
28 | + private K key; | ||
29 | + private V value; | ||
30 | + private V currentValue; | ||
31 | + private long currentVersion; | ||
32 | + | ||
33 | + /** | ||
34 | + * Returns the type of update operation. | ||
35 | + * @return type of update. | ||
36 | + */ | ||
37 | + public Type type() { | ||
38 | + return type; | ||
39 | + } | ||
40 | + | ||
41 | + /** | ||
42 | + * Returns the tableName being updated. | ||
43 | + * @return table name. | ||
44 | + */ | ||
45 | + public String tableName() { | ||
46 | + return tableName; | ||
47 | + } | ||
48 | + | ||
49 | + /** | ||
50 | + * Returns the item key being updated. | ||
51 | + * @return item key | ||
52 | + */ | ||
53 | + public K key() { | ||
54 | + return key; | ||
55 | + } | ||
56 | + | ||
57 | + /** | ||
58 | + * Returns the new value. | ||
59 | + * @return item's target value. | ||
60 | + */ | ||
61 | + public V value() { | ||
62 | + return value; | ||
63 | + } | ||
64 | + | ||
65 | + /** | ||
66 | + * Returns the expected current value in the database value for the key. | ||
67 | + * @return current value in database. | ||
68 | + */ | ||
69 | + public V currentValue() { | ||
70 | + return currentValue; | ||
71 | + } | ||
72 | + | ||
73 | + /** | ||
74 | + * Returns the expected current version in the database for the key. | ||
75 | + * @return expected version. | ||
76 | + */ | ||
77 | + public long currentVersion() { | ||
78 | + return currentVersion; | ||
79 | + } | ||
80 | + | ||
81 | + @Override | ||
82 | + public String toString() { | ||
83 | + return MoreObjects.toStringHelper(this) | ||
84 | + .add("type", type) | ||
85 | + .add("tableName", tableName) | ||
86 | + .add("key", key) | ||
87 | + .add("value", value) | ||
88 | + .add("currentValue", currentValue) | ||
89 | + .add("currentVersion", currentVersion) | ||
90 | + .toString(); | ||
91 | + } | ||
92 | + | ||
93 | + /** | ||
94 | + * UpdatOperation builder. | ||
95 | + * | ||
96 | + * @param <K> key type. | ||
97 | + * @param <V> value type. | ||
98 | + */ | ||
99 | + public static final class Builder<K, V> { | ||
100 | + | ||
101 | + private UpdateOperation<K, V> operation = new UpdateOperation<>(); | ||
102 | + | ||
103 | + /** | ||
104 | + * Creates a new builder instance. | ||
105 | + * @param <K> key type. | ||
106 | + * @param <V> value type. | ||
107 | + * | ||
108 | + * @return builder. | ||
109 | + */ | ||
110 | + public static <K, V> Builder<K, V> builder() { | ||
111 | + return new Builder<>(); | ||
112 | + } | ||
113 | + | ||
114 | + private Builder() { | ||
115 | + } | ||
116 | + | ||
117 | + public UpdateOperation<K, V> build() { | ||
118 | + return operation; | ||
119 | + } | ||
120 | + | ||
121 | + public Builder<K, V> withType(Type type) { | ||
122 | + operation.type = type; | ||
123 | + return this; | ||
124 | + } | ||
125 | + | ||
126 | + public Builder<K, V> withTableName(String tableName) { | ||
127 | + operation.tableName = tableName; | ||
128 | + return this; | ||
129 | + } | ||
130 | + | ||
131 | + public Builder<K, V> withKey(K key) { | ||
132 | + operation.key = key; | ||
133 | + return this; | ||
134 | + } | ||
135 | + | ||
136 | + public Builder<K, V> withCurrentValue(V value) { | ||
137 | + operation.currentValue = value; | ||
138 | + return this; | ||
139 | + } | ||
140 | + | ||
141 | + public Builder<K, V> withValue(V value) { | ||
142 | + operation.value = value; | ||
143 | + return this; | ||
144 | + } | ||
145 | + | ||
146 | + public Builder<K, V> withCurrentVersion(long version) { | ||
147 | + operation.currentVersion = version; | ||
148 | + return this; | ||
149 | + } | ||
150 | + } | ||
151 | +} |
1 | +package org.onosproject.store.consistent.impl; | ||
2 | + | ||
3 | +import com.google.common.base.MoreObjects; | ||
4 | + | ||
5 | +/** | ||
6 | + * Versioned value. | ||
7 | + * | ||
8 | + * @param <V> value type. | ||
9 | + */ | ||
10 | +public class Versioned<V> { | ||
11 | + | ||
12 | + private final V value; | ||
13 | + private final long version; | ||
14 | + | ||
15 | + /** | ||
16 | + * Constructs a new versioned value. | ||
17 | + * @param value value | ||
18 | + * @param version version | ||
19 | + */ | ||
20 | + public Versioned(V value, long version) { | ||
21 | + this.value = value; | ||
22 | + this.version = version; | ||
23 | + } | ||
24 | + | ||
25 | + /** | ||
26 | + * Returns the value. | ||
27 | + * | ||
28 | + * @return value. | ||
29 | + */ | ||
30 | + public V value() { | ||
31 | + return value; | ||
32 | + } | ||
33 | + | ||
34 | + /** | ||
35 | + * Returns the version. | ||
36 | + * | ||
37 | + * @return version | ||
38 | + */ | ||
39 | + public long version() { | ||
40 | + return version; | ||
41 | + } | ||
42 | + | ||
43 | + @Override | ||
44 | + public String toString() { | ||
45 | + return MoreObjects.toStringHelper(this) | ||
46 | + .add("value", value) | ||
47 | + .add("version", version) | ||
48 | + .toString(); | ||
49 | + } | ||
50 | +} |
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import static org.slf4j.LoggerFactory.getLogger; | ||
19 | - | ||
20 | -import java.util.Vector; | ||
21 | - | ||
22 | -import net.kuujo.copycat.cluster.TcpClusterConfig; | ||
23 | -import net.kuujo.copycat.cluster.TcpMember; | ||
24 | -import net.kuujo.copycat.event.LeaderElectEvent; | ||
25 | -import net.kuujo.copycat.internal.log.ConfigurationEntry; | ||
26 | -import net.kuujo.copycat.internal.log.CopycatEntry; | ||
27 | -import net.kuujo.copycat.internal.log.OperationEntry; | ||
28 | -import net.kuujo.copycat.internal.log.SnapshotEntry; | ||
29 | -import net.kuujo.copycat.protocol.PingRequest; | ||
30 | -import net.kuujo.copycat.protocol.PingResponse; | ||
31 | -import net.kuujo.copycat.protocol.PollRequest; | ||
32 | -import net.kuujo.copycat.protocol.PollResponse; | ||
33 | -import net.kuujo.copycat.protocol.Response.Status; | ||
34 | -import net.kuujo.copycat.protocol.SubmitRequest; | ||
35 | -import net.kuujo.copycat.protocol.SubmitResponse; | ||
36 | -import net.kuujo.copycat.protocol.SyncRequest; | ||
37 | -import net.kuujo.copycat.protocol.SyncResponse; | ||
38 | -import net.kuujo.copycat.spi.protocol.Protocol; | ||
39 | -import net.kuujo.copycat.spi.protocol.ProtocolClient; | ||
40 | -import net.kuujo.copycat.spi.protocol.ProtocolServer; | ||
41 | - | ||
42 | -import org.apache.felix.scr.annotations.Activate; | ||
43 | -import org.apache.felix.scr.annotations.Component; | ||
44 | -import org.apache.felix.scr.annotations.Deactivate; | ||
45 | -import org.apache.felix.scr.annotations.Reference; | ||
46 | -import org.apache.felix.scr.annotations.ReferenceCardinality; | ||
47 | -import org.apache.felix.scr.annotations.Service; | ||
48 | -import org.onosproject.cluster.ClusterService; | ||
49 | -import org.onosproject.store.cluster.messaging.ClusterCommunicationService; | ||
50 | -import org.onosproject.store.cluster.messaging.MessageSubject; | ||
51 | -import org.onosproject.store.serializers.KryoNamespaces; | ||
52 | -import org.onosproject.store.serializers.KryoSerializer; | ||
53 | -import org.onosproject.store.serializers.StoreSerializer; | ||
54 | -import org.onosproject.store.service.impl.DatabaseStateMachine.State; | ||
55 | -import org.onosproject.store.service.impl.DatabaseStateMachine.TableMetadata; | ||
56 | -import org.onlab.util.KryoNamespace; | ||
57 | -import org.slf4j.Logger; | ||
58 | - | ||
59 | -/** | ||
60 | - * ONOS Cluster messaging based Copycat protocol. | ||
61 | - */ | ||
62 | -@Component(immediate = false) | ||
63 | -@Service | ||
64 | -public class ClusterMessagingProtocol | ||
65 | - implements DatabaseProtocolService, Protocol<TcpMember> { | ||
66 | - | ||
67 | - private final Logger log = getLogger(getClass()); | ||
68 | - | ||
69 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
70 | - protected ClusterService clusterService; | ||
71 | - | ||
72 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
73 | - protected ClusterCommunicationService clusterCommunicator; | ||
74 | - | ||
75 | - public static final MessageSubject COPYCAT_PING = | ||
76 | - new MessageSubject("copycat-raft-consensus-ping"); | ||
77 | - public static final MessageSubject COPYCAT_SYNC = | ||
78 | - new MessageSubject("copycat-raft-consensus-sync"); | ||
79 | - public static final MessageSubject COPYCAT_POLL = | ||
80 | - new MessageSubject("copycat-raft-consensus-poll"); | ||
81 | - public static final MessageSubject COPYCAT_SUBMIT = | ||
82 | - new MessageSubject("copycat-raft-consensus-submit"); | ||
83 | - | ||
84 | - static final int AFTER_COPYCAT = KryoNamespaces.BEGIN_USER_CUSTOM_ID + 50; | ||
85 | - | ||
86 | - static final KryoNamespace COPYCAT = KryoNamespace.newBuilder() | ||
87 | - .register(KryoNamespaces.API) | ||
88 | - .nextId(KryoNamespaces.BEGIN_USER_CUSTOM_ID) | ||
89 | - .register(PingRequest.class) | ||
90 | - .register(PingResponse.class) | ||
91 | - .register(PollRequest.class) | ||
92 | - .register(PollResponse.class) | ||
93 | - .register(SyncRequest.class) | ||
94 | - .register(SyncResponse.class) | ||
95 | - .register(SubmitRequest.class) | ||
96 | - .register(SubmitResponse.class) | ||
97 | - .register(Status.class) | ||
98 | - .register(ConfigurationEntry.class) | ||
99 | - .register(SnapshotEntry.class) | ||
100 | - .register(CopycatEntry.class) | ||
101 | - .register(OperationEntry.class) | ||
102 | - .register(TcpClusterConfig.class) | ||
103 | - .register(TcpMember.class) | ||
104 | - .register(LeaderElectEvent.class) | ||
105 | - .register(Vector.class) | ||
106 | - .build(); | ||
107 | - | ||
108 | - // serializer used for CopyCat Protocol | ||
109 | - public static final StoreSerializer DB_SERIALIZER = new KryoSerializer() { | ||
110 | - @Override | ||
111 | - protected void setupKryoPool() { | ||
112 | - serializerPool = KryoNamespace.newBuilder() | ||
113 | - .register(COPYCAT) | ||
114 | - .nextId(AFTER_COPYCAT) | ||
115 | - // for snapshot | ||
116 | - .register(State.class) | ||
117 | - .register(TableMetadata.class) | ||
118 | - // TODO: Move this out to API? | ||
119 | - .register(TableModificationEvent.class) | ||
120 | - .register(TableModificationEvent.Type.class) | ||
121 | - .build(); | ||
122 | - } | ||
123 | - }; | ||
124 | - | ||
125 | - @Activate | ||
126 | - public void activate() { | ||
127 | - log.info("Started"); | ||
128 | - } | ||
129 | - | ||
130 | - @Deactivate | ||
131 | - public void deactivate() { | ||
132 | - log.info("Stopped"); | ||
133 | - } | ||
134 | - | ||
135 | - @Override | ||
136 | - public ProtocolServer createServer(TcpMember member) { | ||
137 | - return new ClusterMessagingProtocolServer(clusterCommunicator); | ||
138 | - } | ||
139 | - | ||
140 | - @Override | ||
141 | - public ProtocolClient createClient(TcpMember member) { | ||
142 | - return new ClusterMessagingProtocolClient(clusterService, | ||
143 | - clusterCommunicator, | ||
144 | - clusterService.getLocalNode(), | ||
145 | - member); | ||
146 | - } | ||
147 | -} |
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import static com.google.common.base.Verify.verifyNotNull; | ||
19 | -import static org.onosproject.store.service.impl.ClusterMessagingProtocol.DB_SERIALIZER; | ||
20 | -import static org.onlab.util.Tools.namedThreads; | ||
21 | -import static org.slf4j.LoggerFactory.getLogger; | ||
22 | -import static java.util.concurrent.Executors.newCachedThreadPool; | ||
23 | - | ||
24 | -import java.io.IOException; | ||
25 | -import java.time.Duration; | ||
26 | -import java.util.concurrent.CompletableFuture; | ||
27 | -import java.util.concurrent.ExecutionException; | ||
28 | -import java.util.concurrent.ExecutorService; | ||
29 | -import java.util.concurrent.TimeUnit; | ||
30 | -import java.util.concurrent.TimeoutException; | ||
31 | -import java.util.concurrent.atomic.AtomicBoolean; | ||
32 | - | ||
33 | -import net.kuujo.copycat.cluster.TcpMember; | ||
34 | -import net.kuujo.copycat.protocol.PingRequest; | ||
35 | -import net.kuujo.copycat.protocol.PingResponse; | ||
36 | -import net.kuujo.copycat.protocol.PollRequest; | ||
37 | -import net.kuujo.copycat.protocol.PollResponse; | ||
38 | -import net.kuujo.copycat.protocol.SubmitRequest; | ||
39 | -import net.kuujo.copycat.protocol.SubmitResponse; | ||
40 | -import net.kuujo.copycat.protocol.SyncRequest; | ||
41 | -import net.kuujo.copycat.protocol.SyncResponse; | ||
42 | -import net.kuujo.copycat.spi.protocol.ProtocolClient; | ||
43 | - | ||
44 | -import org.onosproject.cluster.ClusterService; | ||
45 | -import org.onosproject.cluster.ControllerNode; | ||
46 | -import org.onosproject.store.cluster.messaging.ClusterCommunicationService; | ||
47 | -import org.onosproject.store.cluster.messaging.ClusterMessage; | ||
48 | -import org.onosproject.store.cluster.messaging.MessageSubject; | ||
49 | -import org.slf4j.Logger; | ||
50 | - | ||
51 | -/** | ||
52 | - * ONOS Cluster messaging based Copycat protocol client. | ||
53 | - */ | ||
54 | -public class ClusterMessagingProtocolClient implements ProtocolClient { | ||
55 | - | ||
56 | - private final Logger log = getLogger(getClass()); | ||
57 | - | ||
58 | - public static final Duration RETRY_INTERVAL = Duration.ofMillis(2000); | ||
59 | - | ||
60 | - private final ClusterService clusterService; | ||
61 | - private final ClusterCommunicationService clusterCommunicator; | ||
62 | - private final ControllerNode localNode; | ||
63 | - private final TcpMember remoteMember; | ||
64 | - | ||
65 | - private ControllerNode remoteNode; | ||
66 | - private final AtomicBoolean connectionOK = new AtomicBoolean(true); | ||
67 | - | ||
68 | - private ExecutorService pool; | ||
69 | - | ||
70 | - public ClusterMessagingProtocolClient( | ||
71 | - ClusterService clusterService, | ||
72 | - ClusterCommunicationService clusterCommunicator, | ||
73 | - ControllerNode localNode, | ||
74 | - TcpMember remoteMember) { | ||
75 | - | ||
76 | - this.clusterService = clusterService; | ||
77 | - this.clusterCommunicator = clusterCommunicator; | ||
78 | - this.localNode = localNode; | ||
79 | - this.remoteMember = remoteMember; | ||
80 | - } | ||
81 | - | ||
82 | - @Override | ||
83 | - public CompletableFuture<PingResponse> ping(PingRequest request) { | ||
84 | - return requestReply(request); | ||
85 | - } | ||
86 | - | ||
87 | - @Override | ||
88 | - public CompletableFuture<SyncResponse> sync(SyncRequest request) { | ||
89 | - return requestReply(request); | ||
90 | - } | ||
91 | - | ||
92 | - @Override | ||
93 | - public CompletableFuture<PollResponse> poll(PollRequest request) { | ||
94 | - return requestReply(request); | ||
95 | - } | ||
96 | - | ||
97 | - @Override | ||
98 | - public CompletableFuture<SubmitResponse> submit(SubmitRequest request) { | ||
99 | - return requestReply(request); | ||
100 | - } | ||
101 | - | ||
102 | - @Override | ||
103 | - public synchronized CompletableFuture<Void> connect() { | ||
104 | - if (pool == null || pool.isShutdown()) { | ||
105 | - // TODO include remote name? | ||
106 | - pool = newCachedThreadPool(namedThreads("onos-copycat-netty-messaging-client-%d")); | ||
107 | - } | ||
108 | - return CompletableFuture.completedFuture(null); | ||
109 | - } | ||
110 | - | ||
111 | - @Override | ||
112 | - public synchronized CompletableFuture<Void> close() { | ||
113 | - if (pool != null) { | ||
114 | - pool.shutdownNow(); | ||
115 | - pool = null; | ||
116 | - } | ||
117 | - return CompletableFuture.completedFuture(null); | ||
118 | - } | ||
119 | - | ||
120 | - private <I> MessageSubject messageType(I input) { | ||
121 | - Class<?> clazz = input.getClass(); | ||
122 | - if (clazz.equals(PollRequest.class)) { | ||
123 | - return ClusterMessagingProtocol.COPYCAT_POLL; | ||
124 | - } else if (clazz.equals(SyncRequest.class)) { | ||
125 | - return ClusterMessagingProtocol.COPYCAT_SYNC; | ||
126 | - } else if (clazz.equals(SubmitRequest.class)) { | ||
127 | - return ClusterMessagingProtocol.COPYCAT_SUBMIT; | ||
128 | - } else if (clazz.equals(PingRequest.class)) { | ||
129 | - return ClusterMessagingProtocol.COPYCAT_PING; | ||
130 | - } else { | ||
131 | - throw new IllegalArgumentException("Unknown class " + clazz.getName()); | ||
132 | - } | ||
133 | - } | ||
134 | - | ||
135 | - private <I, O> CompletableFuture<O> requestReply(I request) { | ||
136 | - CompletableFuture<O> future = new CompletableFuture<>(); | ||
137 | - if (pool == null) { | ||
138 | - log.info("Attempted to use closed client, connecting now. {}", request); | ||
139 | - connect(); | ||
140 | - } | ||
141 | - pool.submit(new RPCTask<I, O>(request, future)); | ||
142 | - return future; | ||
143 | - } | ||
144 | - | ||
145 | - private ControllerNode getControllerNode(TcpMember remoteMember) { | ||
146 | - final String host = remoteMember.host(); | ||
147 | - final int port = remoteMember.port(); | ||
148 | - for (ControllerNode node : clusterService.getNodes()) { | ||
149 | - if (node.ip().toString().equals(host) && node.tcpPort() == port) { | ||
150 | - return node; | ||
151 | - } | ||
152 | - } | ||
153 | - return null; | ||
154 | - } | ||
155 | - | ||
156 | - private class RPCTask<I, O> implements Runnable { | ||
157 | - | ||
158 | - private final I request; | ||
159 | - private final ClusterMessage message; | ||
160 | - private final CompletableFuture<O> future; | ||
161 | - | ||
162 | - public RPCTask(I request, CompletableFuture<O> future) { | ||
163 | - this.request = request; | ||
164 | - this.message = | ||
165 | - new ClusterMessage( | ||
166 | - localNode.id(), | ||
167 | - messageType(request), | ||
168 | - verifyNotNull(DB_SERIALIZER.encode(request))); | ||
169 | - this.future = future; | ||
170 | - } | ||
171 | - | ||
172 | - @Override | ||
173 | - public void run() { | ||
174 | - try { | ||
175 | - if (remoteNode == null) { | ||
176 | - remoteNode = getControllerNode(remoteMember); | ||
177 | - if (remoteNode == null) { | ||
178 | - throw new IOException("Remote node is offline!"); | ||
179 | - } | ||
180 | - } | ||
181 | - byte[] response = clusterCommunicator | ||
182 | - .sendAndReceive(message, remoteNode.id()) | ||
183 | - .get(RETRY_INTERVAL.toMillis(), TimeUnit.MILLISECONDS); | ||
184 | - if (!connectionOK.getAndSet(true)) { | ||
185 | - log.info("Connectivity to {} restored", remoteNode); | ||
186 | - } | ||
187 | - future.complete(verifyNotNull(DB_SERIALIZER.decode(response))); | ||
188 | - | ||
189 | - } catch (IOException | TimeoutException e) { | ||
190 | - if (connectionOK.getAndSet(false)) { | ||
191 | - log.warn("Detected connectivity issues with {}. Reason: {}", remoteNode, e.getMessage()); | ||
192 | - } | ||
193 | - log.debug("RPCTask for {} failed.", request, e); | ||
194 | - future.completeExceptionally(e); | ||
195 | - } catch (ExecutionException e) { | ||
196 | - log.warn("RPCTask execution for {} failed: {}", request, e.getMessage()); | ||
197 | - log.debug("RPCTask execution for {} failed.", request, e); | ||
198 | - future.completeExceptionally(e); | ||
199 | - } catch (InterruptedException e) { | ||
200 | - log.warn("RPCTask for {} was interrupted: {}", request, e.getMessage()); | ||
201 | - log.debug("RPCTask for {} was interrupted.", request, e); | ||
202 | - future.completeExceptionally(e); | ||
203 | - Thread.currentThread().interrupt(); | ||
204 | - } catch (Exception e) { | ||
205 | - log.warn("RPCTask for {} terribly failed.", request, e); | ||
206 | - future.completeExceptionally(e); | ||
207 | - } | ||
208 | - } | ||
209 | - } | ||
210 | -} |
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import static java.util.concurrent.Executors.newCachedThreadPool; | ||
19 | -import static org.onlab.util.Tools.namedThreads; | ||
20 | -import static org.slf4j.LoggerFactory.getLogger; | ||
21 | -import static org.onosproject.store.service.impl.ClusterMessagingProtocol.*; | ||
22 | -import static org.onosproject.store.service.impl.ClusterMessagingProtocol.DB_SERIALIZER; | ||
23 | - | ||
24 | -import java.util.concurrent.CompletableFuture; | ||
25 | -import java.util.concurrent.ExecutorService; | ||
26 | -import java.util.function.BiConsumer; | ||
27 | - | ||
28 | -import net.kuujo.copycat.protocol.PingRequest; | ||
29 | -import net.kuujo.copycat.protocol.PollRequest; | ||
30 | -import net.kuujo.copycat.protocol.RequestHandler; | ||
31 | -import net.kuujo.copycat.protocol.SubmitRequest; | ||
32 | -import net.kuujo.copycat.protocol.SyncRequest; | ||
33 | -import net.kuujo.copycat.spi.protocol.ProtocolServer; | ||
34 | - | ||
35 | -import org.onosproject.store.cluster.messaging.ClusterCommunicationService; | ||
36 | -import org.onosproject.store.cluster.messaging.ClusterMessage; | ||
37 | -import org.onosproject.store.cluster.messaging.ClusterMessageHandler; | ||
38 | -import org.slf4j.Logger; | ||
39 | - | ||
40 | -/** | ||
41 | - * ONOS Cluster messaging based Copycat protocol server. | ||
42 | - */ | ||
43 | -public class ClusterMessagingProtocolServer implements ProtocolServer { | ||
44 | - | ||
45 | - private final Logger log = getLogger(getClass()); | ||
46 | - | ||
47 | - private final ClusterCommunicationService clusterCommunicator; | ||
48 | - | ||
49 | - private volatile RequestHandler handler; | ||
50 | - | ||
51 | - private ExecutorService pool; | ||
52 | - | ||
53 | - public ClusterMessagingProtocolServer(ClusterCommunicationService clusterCommunicator) { | ||
54 | - this.clusterCommunicator = clusterCommunicator; | ||
55 | - } | ||
56 | - | ||
57 | - @Override | ||
58 | - public void requestHandler(RequestHandler handler) { | ||
59 | - this.handler = handler; | ||
60 | - } | ||
61 | - | ||
62 | - @Override | ||
63 | - public CompletableFuture<Void> listen() { | ||
64 | - if (pool == null || pool.isShutdown()) { | ||
65 | - pool = newCachedThreadPool(namedThreads("onos-copycat-netty-messaging-server-%d")); | ||
66 | - } | ||
67 | - | ||
68 | - clusterCommunicator.addSubscriber(COPYCAT_PING, new PingHandler()); | ||
69 | - clusterCommunicator.addSubscriber(COPYCAT_SYNC, new SyncHandler()); | ||
70 | - clusterCommunicator.addSubscriber(COPYCAT_POLL, new PollHandler()); | ||
71 | - clusterCommunicator.addSubscriber(COPYCAT_SUBMIT, new SubmitHandler()); | ||
72 | - return CompletableFuture.completedFuture(null); | ||
73 | - } | ||
74 | - | ||
75 | - @Override | ||
76 | - public CompletableFuture<Void> close() { | ||
77 | - clusterCommunicator.removeSubscriber(COPYCAT_PING); | ||
78 | - clusterCommunicator.removeSubscriber(COPYCAT_SYNC); | ||
79 | - clusterCommunicator.removeSubscriber(COPYCAT_POLL); | ||
80 | - clusterCommunicator.removeSubscriber(COPYCAT_SUBMIT); | ||
81 | - if (pool != null) { | ||
82 | - pool.shutdownNow(); | ||
83 | - pool = null; | ||
84 | - } | ||
85 | - return CompletableFuture.completedFuture(null); | ||
86 | - } | ||
87 | - | ||
88 | - private final class PingHandler extends CopycatMessageHandler<PingRequest> { | ||
89 | - | ||
90 | - @Override | ||
91 | - public void raftHandle(PingRequest request, ClusterMessage message) { | ||
92 | - pool.submit(new Runnable() { | ||
93 | - | ||
94 | - @Override | ||
95 | - public void run() { | ||
96 | - currentHandler().ping(request) | ||
97 | - .whenComplete(new PostExecutionTask<>(message)); | ||
98 | - } | ||
99 | - }); | ||
100 | - } | ||
101 | - } | ||
102 | - | ||
103 | - private final class SyncHandler extends CopycatMessageHandler<SyncRequest> { | ||
104 | - | ||
105 | - @Override | ||
106 | - public void raftHandle(SyncRequest request, ClusterMessage message) { | ||
107 | - pool.submit(new Runnable() { | ||
108 | - | ||
109 | - @Override | ||
110 | - public void run() { | ||
111 | - currentHandler().sync(request) | ||
112 | - .whenComplete(new PostExecutionTask<>(message)); | ||
113 | - } | ||
114 | - }); | ||
115 | - } | ||
116 | - } | ||
117 | - | ||
118 | - private final class PollHandler extends CopycatMessageHandler<PollRequest> { | ||
119 | - | ||
120 | - @Override | ||
121 | - public void raftHandle(PollRequest request, ClusterMessage message) { | ||
122 | - pool.submit(new Runnable() { | ||
123 | - | ||
124 | - @Override | ||
125 | - public void run() { | ||
126 | - currentHandler().poll(request) | ||
127 | - .whenComplete(new PostExecutionTask<>(message)); | ||
128 | - } | ||
129 | - }); | ||
130 | - } | ||
131 | - } | ||
132 | - | ||
133 | - private final class SubmitHandler extends CopycatMessageHandler<SubmitRequest> { | ||
134 | - | ||
135 | - @Override | ||
136 | - public void raftHandle(SubmitRequest request, ClusterMessage message) { | ||
137 | - pool.submit(new Runnable() { | ||
138 | - | ||
139 | - @Override | ||
140 | - public void run() { | ||
141 | - currentHandler().submit(request) | ||
142 | - .whenComplete(new PostExecutionTask<>(message)); | ||
143 | - } | ||
144 | - }); | ||
145 | - } | ||
146 | - } | ||
147 | - | ||
148 | - private abstract class CopycatMessageHandler<T> implements ClusterMessageHandler { | ||
149 | - | ||
150 | - public abstract void raftHandle(T request, ClusterMessage message); | ||
151 | - | ||
152 | - @Override | ||
153 | - public void handle(ClusterMessage message) { | ||
154 | - T request = DB_SERIALIZER.decode(message.payload()); | ||
155 | - raftHandle(request, message); | ||
156 | - } | ||
157 | - | ||
158 | - RequestHandler currentHandler() { | ||
159 | - RequestHandler currentHandler = handler; | ||
160 | - if (currentHandler == null) { | ||
161 | - // there is a slight window of time during state transition, | ||
162 | - // where handler becomes null | ||
163 | - long sleepMs = 1; | ||
164 | - for (int i = 0; i < 10; ++i) { | ||
165 | - currentHandler = handler; | ||
166 | - if (currentHandler != null) { | ||
167 | - break; | ||
168 | - } | ||
169 | - try { | ||
170 | - sleepMs <<= 1; | ||
171 | - Thread.sleep(sleepMs); | ||
172 | - } catch (InterruptedException e) { | ||
173 | - log.error("Interrupted", e); | ||
174 | - return handler; | ||
175 | - } | ||
176 | - } | ||
177 | - if (currentHandler == null) { | ||
178 | - log.error("There was no handler registered!"); | ||
179 | - return handler; | ||
180 | - } | ||
181 | - } | ||
182 | - return currentHandler; | ||
183 | - } | ||
184 | - | ||
185 | - final class PostExecutionTask<R> implements BiConsumer<R, Throwable> { | ||
186 | - | ||
187 | - private final ClusterMessage message; | ||
188 | - | ||
189 | - public PostExecutionTask(ClusterMessage message) { | ||
190 | - this.message = message; | ||
191 | - } | ||
192 | - | ||
193 | - @Override | ||
194 | - public void accept(R response, Throwable error) { | ||
195 | - if (error != null) { | ||
196 | - log.error("Processing {} failed.", message.subject(), error); | ||
197 | - } else { | ||
198 | - try { | ||
199 | - log.trace("responding to {}", message.subject()); | ||
200 | - message.respond(DB_SERIALIZER.encode(response)); | ||
201 | - } catch (Exception e) { | ||
202 | - log.error("Failed responding with {}", response.getClass().getName(), e); | ||
203 | - } | ||
204 | - } | ||
205 | - } | ||
206 | - } | ||
207 | - } | ||
208 | -} |
core/store/dist/src/main/java/org/onosproject/store/service/impl/DatabaseClient.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
19 | -import static org.slf4j.LoggerFactory.getLogger; | ||
20 | - | ||
21 | -import java.util.Arrays; | ||
22 | -import java.util.List; | ||
23 | -import java.util.Map; | ||
24 | -import java.util.Set; | ||
25 | -import java.util.UUID; | ||
26 | -import java.util.concurrent.CompletableFuture; | ||
27 | -import java.util.concurrent.ExecutionException; | ||
28 | -import java.util.concurrent.TimeUnit; | ||
29 | -import java.util.concurrent.TimeoutException; | ||
30 | - | ||
31 | -import net.kuujo.copycat.cluster.Member; | ||
32 | -import net.kuujo.copycat.cluster.TcpMember; | ||
33 | -import net.kuujo.copycat.event.LeaderElectEvent; | ||
34 | -import net.kuujo.copycat.protocol.Response.Status; | ||
35 | -import net.kuujo.copycat.protocol.SubmitRequest; | ||
36 | -import net.kuujo.copycat.protocol.SubmitResponse; | ||
37 | -import net.kuujo.copycat.spi.protocol.ProtocolClient; | ||
38 | - | ||
39 | -import org.onosproject.store.cluster.messaging.ClusterMessage; | ||
40 | -import org.onosproject.store.cluster.messaging.ClusterMessageHandler; | ||
41 | -import org.onosproject.store.service.BatchReadRequest; | ||
42 | -import org.onosproject.store.service.BatchWriteRequest; | ||
43 | -import org.onosproject.store.service.DatabaseException; | ||
44 | -import org.onosproject.store.service.ReadResult; | ||
45 | -import org.onosproject.store.service.VersionedValue; | ||
46 | -import org.onosproject.store.service.WriteResult; | ||
47 | -import org.slf4j.Logger; | ||
48 | - | ||
49 | -/** | ||
50 | - * Client for interacting with the Copycat Raft cluster. | ||
51 | - */ | ||
52 | -public class DatabaseClient implements ClusterMessageHandler { | ||
53 | - | ||
54 | - private static final int RETRIES = 5; | ||
55 | - | ||
56 | - private static final int TIMEOUT_MS = 2000; | ||
57 | - | ||
58 | - private final Logger log = getLogger(getClass()); | ||
59 | - | ||
60 | - private final DatabaseProtocolService protocol; | ||
61 | - private volatile ProtocolClient client = null; | ||
62 | - private volatile Member currentLeader = null; | ||
63 | - private volatile long currentLeaderTerm = 0; | ||
64 | - | ||
65 | - public DatabaseClient(DatabaseProtocolService protocol) { | ||
66 | - this.protocol = checkNotNull(protocol); | ||
67 | - } | ||
68 | - | ||
69 | - @Override | ||
70 | - public void handle(ClusterMessage message) { | ||
71 | - LeaderElectEvent event = | ||
72 | - ClusterMessagingProtocol.DB_SERIALIZER.decode(message.payload()); | ||
73 | - TcpMember newLeader = event.leader(); | ||
74 | - long newLeaderTerm = event.term(); | ||
75 | - if (newLeader != null && !newLeader.equals(currentLeader) && newLeaderTerm > currentLeaderTerm) { | ||
76 | - log.info("New leader detected. Leader: {}, term: {}", newLeader, newLeaderTerm); | ||
77 | - ProtocolClient prevClient = client; | ||
78 | - ProtocolClient newClient = protocol.createClient(newLeader); | ||
79 | - newClient.connect(); | ||
80 | - client = newClient; | ||
81 | - currentLeader = newLeader; | ||
82 | - currentLeaderTerm = newLeaderTerm; | ||
83 | - | ||
84 | - if (prevClient != null) { | ||
85 | - prevClient.close(); | ||
86 | - } | ||
87 | - } | ||
88 | - } | ||
89 | - | ||
90 | - private String nextRequestId() { | ||
91 | - return UUID.randomUUID().toString(); | ||
92 | - } | ||
93 | - | ||
94 | - public void waitForLeader() { | ||
95 | - if (currentLeader != null) { | ||
96 | - return; | ||
97 | - } | ||
98 | - | ||
99 | - log.info("No leader in cluster, waiting for election."); | ||
100 | - | ||
101 | - try { | ||
102 | - while (currentLeader == null) { | ||
103 | - Thread.sleep(200); | ||
104 | - } | ||
105 | - return; | ||
106 | - } catch (InterruptedException e) { | ||
107 | - log.error("Interrupted while waiting for Leader", e); | ||
108 | - Thread.currentThread().interrupt(); | ||
109 | - } | ||
110 | - } | ||
111 | - | ||
112 | - private <T> T submit(String operationName, Object... args) { | ||
113 | - waitForLeader(); | ||
114 | - if (currentLeader == null) { | ||
115 | - throw new DatabaseException("Raft cluster does not have a leader."); | ||
116 | - } | ||
117 | - | ||
118 | - SubmitRequest request = | ||
119 | - new SubmitRequest(nextRequestId(), operationName, Arrays.asList(args)); | ||
120 | - | ||
121 | - CompletableFuture<SubmitResponse> submitResponse = client.submit(request); | ||
122 | - | ||
123 | - log.debug("Sent {} to {}", request, currentLeader); | ||
124 | - | ||
125 | - try { | ||
126 | - final SubmitResponse response = submitResponse.get(TIMEOUT_MS, TimeUnit.MILLISECONDS); | ||
127 | - if (response.status() != Status.OK) { | ||
128 | - throw new DatabaseException(response.error()); | ||
129 | - } | ||
130 | - return (T) response.result(); | ||
131 | - } catch (ExecutionException | InterruptedException e) { | ||
132 | - throw new DatabaseException(e); | ||
133 | - } catch (TimeoutException e) { | ||
134 | - throw new DatabaseException.Timeout(e); | ||
135 | - } | ||
136 | - } | ||
137 | - | ||
138 | - public boolean createTable(String tableName) { | ||
139 | - return submit("createTable", tableName); | ||
140 | - } | ||
141 | - | ||
142 | - public boolean createTable(String tableName, int ttlMillis) { | ||
143 | - return submit("createTable", tableName, ttlMillis); | ||
144 | - } | ||
145 | - | ||
146 | - public void dropTable(String tableName) { | ||
147 | - submit("dropTable", tableName); | ||
148 | - } | ||
149 | - | ||
150 | - public void dropAllTables() { | ||
151 | - submit("dropAllTables"); | ||
152 | - } | ||
153 | - | ||
154 | - public Set<String> listTables() { | ||
155 | - return submit("listTables"); | ||
156 | - } | ||
157 | - | ||
158 | - public List<ReadResult> batchRead(BatchReadRequest batchRequest) { | ||
159 | - return submit("read", batchRequest); | ||
160 | - } | ||
161 | - | ||
162 | - public List<WriteResult> batchWrite(BatchWriteRequest batchRequest) { | ||
163 | - return submit("write", batchRequest); | ||
164 | - } | ||
165 | - | ||
166 | - public Map<String, VersionedValue> getAll(String tableName) { | ||
167 | - return submit("getAll", tableName); | ||
168 | - } | ||
169 | - | ||
170 | - Member getCurrentLeader() { | ||
171 | - return currentLeader; | ||
172 | - } | ||
173 | -} |
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | - | ||
17 | -package org.onosproject.store.service.impl; | ||
18 | - | ||
19 | -import com.google.common.base.MoreObjects; | ||
20 | -import net.jodah.expiringmap.ExpiringMap; | ||
21 | -import net.jodah.expiringmap.ExpiringMap.ExpirationListener; | ||
22 | -import net.jodah.expiringmap.ExpiringMap.ExpirationPolicy; | ||
23 | -import net.kuujo.copycat.cluster.Member; | ||
24 | -import net.kuujo.copycat.event.EventHandler; | ||
25 | -import net.kuujo.copycat.event.LeaderElectEvent; | ||
26 | -import org.onosproject.cluster.ControllerNode; | ||
27 | -import org.onosproject.store.cluster.messaging.ClusterCommunicationService; | ||
28 | -import org.onosproject.store.cluster.messaging.ClusterMessage; | ||
29 | -import org.onosproject.store.service.DatabaseService; | ||
30 | -import org.onosproject.store.service.VersionedValue; | ||
31 | -import org.onosproject.store.service.impl.DatabaseStateMachine.State; | ||
32 | -import org.onosproject.store.service.impl.DatabaseStateMachine.TableMetadata; | ||
33 | -import org.slf4j.Logger; | ||
34 | -import org.slf4j.LoggerFactory; | ||
35 | - | ||
36 | -import java.util.HashMap; | ||
37 | -import java.util.Map; | ||
38 | -import java.util.Objects; | ||
39 | -import java.util.concurrent.ExecutorService; | ||
40 | -import java.util.concurrent.Executors; | ||
41 | -import java.util.concurrent.TimeUnit; | ||
42 | -import java.util.concurrent.atomic.AtomicBoolean; | ||
43 | - | ||
44 | -import static org.onlab.util.Tools.namedThreads; | ||
45 | - | ||
46 | -/** | ||
47 | - * Plugs into the database update stream and track the TTL of entries added to | ||
48 | - * the database. For tables with pre-configured finite TTL, this class has | ||
49 | - * mechanisms for expiring (deleting) old, expired entries from the database. | ||
50 | - */ | ||
51 | -public class DatabaseEntryExpirationTracker implements | ||
52 | - DatabaseUpdateEventListener, EventHandler<LeaderElectEvent> { | ||
53 | - | ||
54 | - private static final ExecutorService THREAD_POOL = | ||
55 | - Executors.newCachedThreadPool(namedThreads("onos-db-stale-entry-expirer-%d")); | ||
56 | - | ||
57 | - private final Logger log = LoggerFactory.getLogger(getClass()); | ||
58 | - | ||
59 | - private final DatabaseService databaseService; | ||
60 | - private final ClusterCommunicationService clusterCommunicator; | ||
61 | - | ||
62 | - private final Member localMember; | ||
63 | - private final ControllerNode localNode; | ||
64 | - private final AtomicBoolean isLocalMemberLeader = new AtomicBoolean(false); | ||
65 | - | ||
66 | - private final Map<String, Map<DatabaseRow, Long>> tableEntryExpirationMap = new HashMap<>(); | ||
67 | - | ||
68 | - private final ExpirationListener<DatabaseRow, Long> expirationObserver = new ExpirationObserver(); | ||
69 | - | ||
70 | - DatabaseEntryExpirationTracker( | ||
71 | - Member localMember, | ||
72 | - ControllerNode localNode, | ||
73 | - ClusterCommunicationService clusterCommunicator, | ||
74 | - DatabaseService databaseService) { | ||
75 | - this.localMember = localMember; | ||
76 | - this.localNode = localNode; | ||
77 | - this.clusterCommunicator = clusterCommunicator; | ||
78 | - this.databaseService = databaseService; | ||
79 | - } | ||
80 | - | ||
81 | - @Override | ||
82 | - public void tableModified(TableModificationEvent event) { | ||
83 | - log.debug("{}: Received {}", localNode.id(), event); | ||
84 | - | ||
85 | - if (!tableEntryExpirationMap.containsKey(event.tableName())) { | ||
86 | - return; | ||
87 | - } | ||
88 | - | ||
89 | - Map<DatabaseRow, Long> map = tableEntryExpirationMap.get(event.tableName()); | ||
90 | - DatabaseRow row = new DatabaseRow(event.tableName(), event.key()); | ||
91 | - Long eventVersion = event.value().version(); | ||
92 | - | ||
93 | - switch (event.type()) { | ||
94 | - case ROW_DELETED: | ||
95 | - map.remove(row, eventVersion); | ||
96 | - if (isLocalMemberLeader.get()) { | ||
97 | - log.debug("Broadcasting {} to the entire cluster", event); | ||
98 | - clusterCommunicator.broadcastIncludeSelf(new ClusterMessage( | ||
99 | - localNode.id(), DatabaseStateMachine.DATABASE_UPDATE_EVENTS, | ||
100 | - ClusterMessagingProtocol.DB_SERIALIZER.encode(event))); | ||
101 | - } | ||
102 | - break; | ||
103 | - case ROW_ADDED: | ||
104 | - case ROW_UPDATED: | ||
105 | - // To account for potential reordering of notifications, | ||
106 | - // check to make sure we are replacing an old version with a new version | ||
107 | - Long currentVersion = map.get(row); | ||
108 | - if (currentVersion == null || currentVersion < eventVersion) { | ||
109 | - map.put(row, eventVersion); | ||
110 | - } | ||
111 | - break; | ||
112 | - default: | ||
113 | - break; | ||
114 | - } | ||
115 | - } | ||
116 | - | ||
117 | - @Override | ||
118 | - public void tableCreated(TableMetadata metadata) { | ||
119 | - log.debug("Received a table created event {}", metadata); | ||
120 | - if (metadata.expireOldEntries()) { | ||
121 | - tableEntryExpirationMap.put(metadata.tableName(), ExpiringMap.builder() | ||
122 | - .expiration(metadata.ttlMillis(), TimeUnit.MILLISECONDS) | ||
123 | - .expirationListener(expirationObserver) | ||
124 | - .expirationPolicy(ExpirationPolicy.CREATED).build()); | ||
125 | - } | ||
126 | - } | ||
127 | - | ||
128 | - @Override | ||
129 | - public void tableDeleted(String tableName) { | ||
130 | - log.debug("Received a table deleted event for table ({})", tableName); | ||
131 | - tableEntryExpirationMap.remove(tableName); | ||
132 | - } | ||
133 | - | ||
134 | - private class ExpirationObserver implements | ||
135 | - ExpirationListener<DatabaseRow, Long> { | ||
136 | - @Override | ||
137 | - public void expired(DatabaseRow row, Long version) { | ||
138 | - THREAD_POOL.submit(new ExpirationTask(row, version)); | ||
139 | - } | ||
140 | - } | ||
141 | - | ||
142 | - private class ExpirationTask implements Runnable { | ||
143 | - | ||
144 | - private final DatabaseRow row; | ||
145 | - private final Long version; | ||
146 | - | ||
147 | - public ExpirationTask(DatabaseRow row, Long version) { | ||
148 | - this.row = row; | ||
149 | - this.version = version; | ||
150 | - } | ||
151 | - | ||
152 | - @Override | ||
153 | - public void run() { | ||
154 | - log.trace("Received an expiration event for {}, version: {}", row, version); | ||
155 | - Map<DatabaseRow, Long> map = tableEntryExpirationMap.get(row.tableName); | ||
156 | - try { | ||
157 | - if (isLocalMemberLeader.get()) { | ||
158 | - if (!databaseService.removeIfVersionMatches(row.tableName, | ||
159 | - row.key, version)) { | ||
160 | - log.info("Entry in database was updated right before its expiration."); | ||
161 | - } else { | ||
162 | - log.debug("Successfully expired old entry with key ({}) from table ({})", | ||
163 | - row.key, row.tableName); | ||
164 | - } | ||
165 | - } else { | ||
166 | - // Only the current leader will expire keys from database. | ||
167 | - // Everyone else function as standby just in case they need to take over | ||
168 | - if (map != null) { | ||
169 | - map.putIfAbsent(row, version); | ||
170 | - } | ||
171 | - } | ||
172 | - | ||
173 | - } catch (Exception e) { | ||
174 | - log.warn("Failed to delete entry from the database after ttl " | ||
175 | - + "expiration. Operation will be retried.", e); | ||
176 | - map.putIfAbsent(row, version); | ||
177 | - } | ||
178 | - } | ||
179 | - } | ||
180 | - | ||
181 | - @Override | ||
182 | - public void handle(LeaderElectEvent event) { | ||
183 | - isLocalMemberLeader.set(localMember.equals(event.leader())); | ||
184 | - if (isLocalMemberLeader.get()) { | ||
185 | - log.info("{} is now the leader of Raft cluster", localNode.id()); | ||
186 | - } | ||
187 | - } | ||
188 | - | ||
189 | - /** | ||
190 | - * Wrapper class for a database row identifier. | ||
191 | - */ | ||
192 | - private class DatabaseRow { | ||
193 | - | ||
194 | - String tableName; | ||
195 | - String key; | ||
196 | - | ||
197 | - public DatabaseRow(String tableName, String key) { | ||
198 | - this.tableName = tableName; | ||
199 | - this.key = key; | ||
200 | - } | ||
201 | - | ||
202 | - @Override | ||
203 | - public String toString() { | ||
204 | - return MoreObjects.toStringHelper(getClass()) | ||
205 | - .add("tableName", tableName) | ||
206 | - .add("key", key) | ||
207 | - .toString(); | ||
208 | - } | ||
209 | - | ||
210 | - @Override | ||
211 | - public boolean equals(Object obj) { | ||
212 | - if (this == obj) { | ||
213 | - return true; | ||
214 | - } | ||
215 | - if (!(obj instanceof DatabaseRow)) { | ||
216 | - return false; | ||
217 | - } | ||
218 | - DatabaseRow that = (DatabaseRow) obj; | ||
219 | - | ||
220 | - return Objects.equals(this.tableName, that.tableName) | ||
221 | - && Objects.equals(this.key, that.key); | ||
222 | - } | ||
223 | - | ||
224 | - @Override | ||
225 | - public int hashCode() { | ||
226 | - return Objects.hash(tableName, key); | ||
227 | - } | ||
228 | - } | ||
229 | - | ||
230 | - @Override | ||
231 | - public void snapshotInstalled(State state) { | ||
232 | - if (!tableEntryExpirationMap.isEmpty()) { | ||
233 | - return; | ||
234 | - } | ||
235 | - log.debug("Received a snapshot installed notification"); | ||
236 | - for (String tableName : state.getTableNames()) { | ||
237 | - | ||
238 | - TableMetadata metadata = state.getTableMetadata(tableName); | ||
239 | - if (!metadata.expireOldEntries()) { | ||
240 | - continue; | ||
241 | - } | ||
242 | - | ||
243 | - Map<DatabaseRow, Long> tableExpirationMap = ExpiringMap.builder() | ||
244 | - .expiration(metadata.ttlMillis(), TimeUnit.MILLISECONDS) | ||
245 | - .expirationListener(expirationObserver) | ||
246 | - .expirationPolicy(ExpirationPolicy.CREATED).build(); | ||
247 | - for (Map.Entry<String, VersionedValue> entry : state.getTable(tableName).entrySet()) { | ||
248 | - tableExpirationMap.put(new DatabaseRow(tableName, entry.getKey()), entry.getValue().version()); | ||
249 | - } | ||
250 | - | ||
251 | - tableEntryExpirationMap.put(tableName, tableExpirationMap); | ||
252 | - } | ||
253 | - } | ||
254 | -} |
core/store/dist/src/main/java/org/onosproject/store/service/impl/DatabaseManager.java
deleted
100644 → 0
This diff is collapsed. Click to expand it.
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import net.kuujo.copycat.cluster.TcpMember; | ||
19 | -import net.kuujo.copycat.spi.protocol.Protocol; | ||
20 | - | ||
21 | -// interface required for connecting DatabaseManager + ClusterMessagingProtocol | ||
22 | -// TODO: Consider changing ClusterMessagingProtocol to non-Service class | ||
23 | -public interface DatabaseProtocolService extends Protocol<TcpMember> { | ||
24 | - | ||
25 | -} |
core/store/dist/src/main/java/org/onosproject/store/service/impl/DatabaseStateMachine.java
deleted
100644 → 0
This diff is collapsed. Click to expand it.
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | - | ||
17 | -package org.onosproject.store.service.impl; | ||
18 | - | ||
19 | -import org.onosproject.store.service.impl.DatabaseStateMachine.TableMetadata; | ||
20 | - | ||
21 | -/** | ||
22 | - * Interface of database update event listeners. | ||
23 | - */ | ||
24 | -public interface DatabaseUpdateEventListener { | ||
25 | - | ||
26 | - /** | ||
27 | - * Notifies listeners of a table modified event. | ||
28 | - * @param event table modification event. | ||
29 | - */ | ||
30 | - public void tableModified(TableModificationEvent event); | ||
31 | - | ||
32 | - /** | ||
33 | - * Notifies listeners of a table created event. | ||
34 | - * @param metadata metadata for the created table. | ||
35 | - */ | ||
36 | - public void tableCreated(TableMetadata metadata); | ||
37 | - | ||
38 | - /** | ||
39 | - * Notifies listeners of a table deleted event. | ||
40 | - * @param tableName name of the table deleted | ||
41 | - */ | ||
42 | - public void tableDeleted(String tableName); | ||
43 | - | ||
44 | - /** | ||
45 | - * Notifies listeners of a snapshot installation event. | ||
46 | - * @param snapshotState installed snapshot state. | ||
47 | - */ | ||
48 | - public void snapshotInstalled(DatabaseStateMachine.State snapshotState); | ||
49 | -} |
core/store/dist/src/main/java/org/onosproject/store/service/impl/DistributedLock.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import static com.google.common.base.Verify.verify; | ||
19 | -import static org.slf4j.LoggerFactory.getLogger; | ||
20 | - | ||
21 | -import java.nio.charset.StandardCharsets; | ||
22 | -import java.util.Arrays; | ||
23 | -import java.util.UUID; | ||
24 | -import java.util.concurrent.CompletableFuture; | ||
25 | -import java.util.concurrent.ExecutionException; | ||
26 | -import java.util.concurrent.TimeUnit; | ||
27 | -import java.util.concurrent.TimeoutException; | ||
28 | -import java.util.concurrent.atomic.AtomicBoolean; | ||
29 | - | ||
30 | -import org.joda.time.DateTime; | ||
31 | -import org.onosproject.cluster.ClusterService; | ||
32 | -import org.onosproject.store.service.DatabaseException; | ||
33 | -import org.onosproject.store.service.DatabaseService; | ||
34 | -import org.onosproject.store.service.Lock; | ||
35 | -import org.onosproject.store.service.VersionedValue; | ||
36 | -import org.slf4j.Logger; | ||
37 | - | ||
38 | -/** | ||
39 | - * A distributed lock implementation. | ||
40 | - */ | ||
41 | -public class DistributedLock implements Lock { | ||
42 | - | ||
43 | - private final Logger log = getLogger(getClass()); | ||
44 | - | ||
45 | - private final DistributedLockManager lockManager; | ||
46 | - private final DatabaseService databaseService; | ||
47 | - private final String path; | ||
48 | - private DateTime lockExpirationTime; | ||
49 | - private AtomicBoolean isLocked = new AtomicBoolean(false); | ||
50 | - private volatile long epoch = 0; | ||
51 | - private byte[] lockId; | ||
52 | - | ||
53 | - public DistributedLock( | ||
54 | - String path, | ||
55 | - DatabaseService databaseService, | ||
56 | - ClusterService clusterService, | ||
57 | - DistributedLockManager lockManager) { | ||
58 | - | ||
59 | - this.path = path; | ||
60 | - this.databaseService = databaseService; | ||
61 | - this.lockManager = lockManager; | ||
62 | - this.lockId = | ||
63 | - (UUID.randomUUID().toString() + "::" + | ||
64 | - clusterService.getLocalNode().id().toString()). | ||
65 | - getBytes(StandardCharsets.UTF_8); | ||
66 | - } | ||
67 | - | ||
68 | - @Override | ||
69 | - public String path() { | ||
70 | - return path; | ||
71 | - } | ||
72 | - | ||
73 | - @Override | ||
74 | - public void lock(int leaseDurationMillis) throws InterruptedException { | ||
75 | - try { | ||
76 | - lockAsync(leaseDurationMillis).get(); | ||
77 | - } catch (ExecutionException e) { | ||
78 | - throw new DatabaseException(e); | ||
79 | - } | ||
80 | - } | ||
81 | - | ||
82 | - @Override | ||
83 | - public CompletableFuture<Void> lockAsync(int leaseDurationMillis) { | ||
84 | - try { | ||
85 | - if (isLocked() || tryLock(leaseDurationMillis)) { | ||
86 | - return CompletableFuture.<Void>completedFuture(null); | ||
87 | - } | ||
88 | - return lockManager.lockIfAvailable(this, leaseDurationMillis); | ||
89 | - } catch (DatabaseException e) { | ||
90 | - CompletableFuture<Void> lockFuture = new CompletableFuture<>(); | ||
91 | - lockFuture.completeExceptionally(e); | ||
92 | - return lockFuture; | ||
93 | - } | ||
94 | - } | ||
95 | - | ||
96 | - @Override | ||
97 | - public boolean tryLock(int leaseDurationMillis) { | ||
98 | - if (databaseService.putIfAbsent( | ||
99 | - DistributedLockManager.ONOS_LOCK_TABLE_NAME, | ||
100 | - path, | ||
101 | - lockId)) { | ||
102 | - VersionedValue vv = | ||
103 | - databaseService.get(DistributedLockManager.ONOS_LOCK_TABLE_NAME, path); | ||
104 | - verify(Arrays.equals(vv.value(), lockId)); | ||
105 | - epoch = vv.version(); | ||
106 | - isLocked.set(true); | ||
107 | - lockExpirationTime = DateTime.now().plusMillis(leaseDurationMillis); | ||
108 | - return true; | ||
109 | - } | ||
110 | - return false; | ||
111 | - } | ||
112 | - | ||
113 | - @Override | ||
114 | - public boolean tryLock( | ||
115 | - int waitTimeMillis, | ||
116 | - int leaseDurationMillis) throws InterruptedException { | ||
117 | - if (isLocked() || tryLock(leaseDurationMillis)) { | ||
118 | - return true; | ||
119 | - } | ||
120 | - | ||
121 | - CompletableFuture<Void> future = | ||
122 | - lockManager.lockIfAvailable(this, waitTimeMillis, leaseDurationMillis); | ||
123 | - try { | ||
124 | - future.get(waitTimeMillis, TimeUnit.MILLISECONDS); | ||
125 | - return true; | ||
126 | - } catch (ExecutionException e) { | ||
127 | - throw new DatabaseException(e); | ||
128 | - } catch (TimeoutException e) { | ||
129 | - log.debug("Timed out waiting to acquire lock for {}", path); | ||
130 | - return false; | ||
131 | - } | ||
132 | - } | ||
133 | - | ||
134 | - @Override | ||
135 | - public boolean isLocked() { | ||
136 | - if (isLocked.get()) { | ||
137 | - // We rely on local information to check | ||
138 | - // if the lock expired. | ||
139 | - // This should should make this call | ||
140 | - // light weight, while still retaining the | ||
141 | - // safety guarantees. | ||
142 | - if (DateTime.now().isAfter(lockExpirationTime)) { | ||
143 | - isLocked.set(false); | ||
144 | - return false; | ||
145 | - } else { | ||
146 | - return true; | ||
147 | - } | ||
148 | - } | ||
149 | - return false; | ||
150 | - } | ||
151 | - | ||
152 | - @Override | ||
153 | - public long epoch() { | ||
154 | - return epoch; | ||
155 | - } | ||
156 | - | ||
157 | - @Override | ||
158 | - public void unlock() { | ||
159 | - if (!isLocked()) { | ||
160 | - return; | ||
161 | - } else { | ||
162 | - if (databaseService.removeIfValueMatches(DistributedLockManager.ONOS_LOCK_TABLE_NAME, path, lockId)) { | ||
163 | - isLocked.set(false); | ||
164 | - } | ||
165 | - } | ||
166 | - } | ||
167 | - | ||
168 | - @Override | ||
169 | - public boolean extendExpiration(int leaseDurationMillis) { | ||
170 | - if (!isLocked()) { | ||
171 | - log.warn("Ignoring request to extend expiration for lock {}." | ||
172 | - + " ExtendExpiration must be called for locks that are already acquired.", path); | ||
173 | - return false; | ||
174 | - } | ||
175 | - | ||
176 | - if (databaseService.putIfValueMatches( | ||
177 | - DistributedLockManager.ONOS_LOCK_TABLE_NAME, | ||
178 | - path, | ||
179 | - lockId, | ||
180 | - lockId)) { | ||
181 | - lockExpirationTime = DateTime.now().plusMillis(leaseDurationMillis); | ||
182 | - log.debug("Succeeded in extending lock {} expiration time to {}", lockExpirationTime); | ||
183 | - return true; | ||
184 | - } else { | ||
185 | - log.info("Failed to extend expiration for {}", path); | ||
186 | - return false; | ||
187 | - } | ||
188 | - } | ||
189 | -} |
core/store/dist/src/main/java/org/onosproject/store/service/impl/DistributedLockManager.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import static org.onlab.util.Tools.namedThreads; | ||
19 | -import static org.slf4j.LoggerFactory.getLogger; | ||
20 | - | ||
21 | -import java.util.Iterator; | ||
22 | -import java.util.List; | ||
23 | -import java.util.Set; | ||
24 | -import java.util.concurrent.CompletableFuture; | ||
25 | -import java.util.concurrent.ExecutorService; | ||
26 | -import java.util.concurrent.Executors; | ||
27 | - | ||
28 | -import org.apache.felix.scr.annotations.Activate; | ||
29 | -import org.apache.felix.scr.annotations.Component; | ||
30 | -import org.apache.felix.scr.annotations.Deactivate; | ||
31 | -import org.apache.felix.scr.annotations.Reference; | ||
32 | -import org.apache.felix.scr.annotations.ReferenceCardinality; | ||
33 | -import org.apache.felix.scr.annotations.Service; | ||
34 | -import org.joda.time.DateTime; | ||
35 | -import org.onosproject.cluster.ClusterService; | ||
36 | -import org.onosproject.store.cluster.messaging.ClusterCommunicationService; | ||
37 | -import org.onosproject.store.cluster.messaging.ClusterMessage; | ||
38 | -import org.onosproject.store.cluster.messaging.ClusterMessageHandler; | ||
39 | -import org.onosproject.store.service.DatabaseAdminService; | ||
40 | -import org.onosproject.store.service.DatabaseException; | ||
41 | -import org.onosproject.store.service.DatabaseService; | ||
42 | -import org.onosproject.store.service.Lock; | ||
43 | -import org.onosproject.store.service.LockEventListener; | ||
44 | -import org.onosproject.store.service.LockService; | ||
45 | -import org.slf4j.Logger; | ||
46 | - | ||
47 | -import com.google.common.collect.LinkedListMultimap; | ||
48 | -import com.google.common.collect.ListMultimap; | ||
49 | -import com.google.common.collect.Multimaps; | ||
50 | - | ||
51 | -@Component(immediate = false) | ||
52 | -@Service | ||
53 | -public class DistributedLockManager implements LockService { | ||
54 | - | ||
55 | - private static final ExecutorService THREAD_POOL = | ||
56 | - Executors.newCachedThreadPool(namedThreads("onos-lock-manager-%d")); | ||
57 | - | ||
58 | - private final Logger log = getLogger(getClass()); | ||
59 | - | ||
60 | - public static final String ONOS_LOCK_TABLE_NAME = "onos-locks"; | ||
61 | - | ||
62 | - public static final int DEAD_LOCK_TIMEOUT_MS = 5000; | ||
63 | - | ||
64 | - private final ListMultimap<String, LockRequest> locksToAcquire = | ||
65 | - Multimaps.synchronizedListMultimap(LinkedListMultimap.<String, LockRequest>create()); | ||
66 | - | ||
67 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
68 | - private ClusterCommunicationService clusterCommunicator; | ||
69 | - | ||
70 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
71 | - private DatabaseAdminService databaseAdminService; | ||
72 | - | ||
73 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
74 | - private DatabaseService databaseService; | ||
75 | - | ||
76 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
77 | - private ClusterService clusterService; | ||
78 | - | ||
79 | - @Activate | ||
80 | - public void activate() { | ||
81 | - try { | ||
82 | - Set<String> tables = databaseAdminService.listTables(); | ||
83 | - | ||
84 | - if (!tables.contains(ONOS_LOCK_TABLE_NAME)) { | ||
85 | - if (databaseAdminService.createTable(ONOS_LOCK_TABLE_NAME, DEAD_LOCK_TIMEOUT_MS)) { | ||
86 | - log.info("Created {} table.", ONOS_LOCK_TABLE_NAME); | ||
87 | - } | ||
88 | - } | ||
89 | - } catch (DatabaseException e) { | ||
90 | - log.error("DistributedLockManager#activate failed.", e); | ||
91 | - } | ||
92 | - | ||
93 | - clusterCommunicator.addSubscriber( | ||
94 | - DatabaseStateMachine.DATABASE_UPDATE_EVENTS, | ||
95 | - new LockEventMessageListener()); | ||
96 | - | ||
97 | - log.info("Started"); | ||
98 | - } | ||
99 | - | ||
100 | - @Deactivate | ||
101 | - public void deactivate() { | ||
102 | - clusterCommunicator.removeSubscriber(DatabaseStateMachine.DATABASE_UPDATE_EVENTS); | ||
103 | - locksToAcquire.clear(); | ||
104 | - log.info("Stopped."); | ||
105 | - } | ||
106 | - | ||
107 | - @Override | ||
108 | - public Lock create(String path) { | ||
109 | - return new DistributedLock(path, databaseService, clusterService, this); | ||
110 | - } | ||
111 | - | ||
112 | - @Override | ||
113 | - public void addListener(LockEventListener listener) { | ||
114 | - throw new UnsupportedOperationException(); | ||
115 | - } | ||
116 | - | ||
117 | - @Override | ||
118 | - public void removeListener(LockEventListener listener) { | ||
119 | - throw new UnsupportedOperationException(); | ||
120 | - } | ||
121 | - | ||
122 | - /** | ||
123 | - * Attempts to acquire the lock as soon as it becomes available. | ||
124 | - * @param lock lock to acquire. | ||
125 | - * @param waitTimeMillis maximum time to wait before giving up. | ||
126 | - * @param leaseDurationMillis the duration for which to acquire the lock initially. | ||
127 | - * @return Future that can be blocked on until lock becomes available. | ||
128 | - */ | ||
129 | - protected CompletableFuture<Void> lockIfAvailable( | ||
130 | - Lock lock, | ||
131 | - int waitTimeMillis, | ||
132 | - int leaseDurationMillis) { | ||
133 | - CompletableFuture<Void> future = new CompletableFuture<>(); | ||
134 | - LockRequest request = new LockRequest( | ||
135 | - lock, | ||
136 | - leaseDurationMillis, | ||
137 | - DateTime.now().plusMillis(waitTimeMillis), | ||
138 | - future); | ||
139 | - locksToAcquire.put(lock.path(), request); | ||
140 | - return future; | ||
141 | - } | ||
142 | - | ||
143 | - /** | ||
144 | - * Attempts to acquire the lock as soon as it becomes available. | ||
145 | - * @param lock lock to acquire. | ||
146 | - * @param leaseDurationMillis the duration for which to acquire the lock initially. | ||
147 | - * @return Future lease expiration date. | ||
148 | - */ | ||
149 | - protected CompletableFuture<Void> lockIfAvailable( | ||
150 | - Lock lock, | ||
151 | - int leaseDurationMillis) { | ||
152 | - CompletableFuture<Void> future = new CompletableFuture<>(); | ||
153 | - LockRequest request = new LockRequest( | ||
154 | - lock, | ||
155 | - leaseDurationMillis, | ||
156 | - DateTime.now().plusYears(100), | ||
157 | - future); | ||
158 | - locksToAcquire.put(lock.path(), request); | ||
159 | - return future; | ||
160 | - } | ||
161 | - | ||
162 | - private class LockEventMessageListener implements ClusterMessageHandler { | ||
163 | - @Override | ||
164 | - public void handle(ClusterMessage message) { | ||
165 | - TableModificationEvent event = ClusterMessagingProtocol.DB_SERIALIZER | ||
166 | - .decode(message.payload()); | ||
167 | - if (event.tableName().equals(ONOS_LOCK_TABLE_NAME) && | ||
168 | - event.type().equals(TableModificationEvent.Type.ROW_DELETED)) { | ||
169 | - THREAD_POOL.submit(new RetryLockTask(event.key())); | ||
170 | - } | ||
171 | - } | ||
172 | - } | ||
173 | - | ||
174 | - private class RetryLockTask implements Runnable { | ||
175 | - | ||
176 | - private final String path; | ||
177 | - | ||
178 | - public RetryLockTask(String path) { | ||
179 | - this.path = path; | ||
180 | - } | ||
181 | - | ||
182 | - @Override | ||
183 | - public void run() { | ||
184 | - if (!locksToAcquire.containsKey(path)) { | ||
185 | - return; | ||
186 | - } | ||
187 | - | ||
188 | - List<LockRequest> existingRequests = locksToAcquire.get(path); | ||
189 | - if (existingRequests == null || existingRequests.isEmpty()) { | ||
190 | - return; | ||
191 | - } | ||
192 | - log.info("Path {} is now available for locking. There are {} outstanding " | ||
193 | - + "requests for it.", | ||
194 | - path, existingRequests.size()); | ||
195 | - | ||
196 | - synchronized (existingRequests) { | ||
197 | - Iterator<LockRequest> existingRequestIterator = existingRequests.iterator(); | ||
198 | - while (existingRequestIterator.hasNext()) { | ||
199 | - LockRequest request = existingRequestIterator.next(); | ||
200 | - if (DateTime.now().isAfter(request.requestExpirationTime())) { | ||
201 | - // request expired. | ||
202 | - existingRequestIterator.remove(); | ||
203 | - } else { | ||
204 | - if (request.lock().tryLock(request.leaseDurationMillis())) { | ||
205 | - request.future().complete(null); | ||
206 | - existingRequestIterator.remove(); | ||
207 | - } | ||
208 | - } | ||
209 | - } | ||
210 | - } | ||
211 | - } | ||
212 | - } | ||
213 | - | ||
214 | - private class LockRequest { | ||
215 | - | ||
216 | - private final Lock lock; | ||
217 | - private final DateTime requestExpirationTime; | ||
218 | - private final int leaseDurationMillis; | ||
219 | - private final CompletableFuture<Void> future; | ||
220 | - | ||
221 | - public LockRequest( | ||
222 | - Lock lock, | ||
223 | - int leaseDurationMillis, | ||
224 | - DateTime requestExpirationTime, | ||
225 | - CompletableFuture<Void> future) { | ||
226 | - | ||
227 | - this.lock = lock; | ||
228 | - this.requestExpirationTime = requestExpirationTime; | ||
229 | - this.leaseDurationMillis = leaseDurationMillis; | ||
230 | - this.future = future; | ||
231 | - } | ||
232 | - | ||
233 | - public Lock lock() { | ||
234 | - return lock; | ||
235 | - } | ||
236 | - | ||
237 | - public DateTime requestExpirationTime() { | ||
238 | - return requestExpirationTime; | ||
239 | - } | ||
240 | - | ||
241 | - public int leaseDurationMillis() { | ||
242 | - return leaseDurationMillis; | ||
243 | - } | ||
244 | - | ||
245 | - public CompletableFuture<Void> future() { | ||
246 | - return future; | ||
247 | - } | ||
248 | - } | ||
249 | -} |
This diff is collapsed. Click to expand it.
core/store/dist/src/main/java/org/onosproject/store/service/impl/SnapshotException.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import org.onosproject.store.service.DatabaseException; | ||
19 | - | ||
20 | -/** | ||
21 | - * Exception that indicates a problem with the state machine snapshotting. | ||
22 | - */ | ||
23 | -@SuppressWarnings("serial") | ||
24 | -public class SnapshotException extends DatabaseException { | ||
25 | - public SnapshotException(Throwable t) { | ||
26 | - super(t); | ||
27 | - } | ||
28 | -} |
core/store/dist/src/main/java/org/onosproject/store/service/impl/TableModificationEvent.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import org.onosproject.store.service.VersionedValue; | ||
19 | - | ||
20 | -import com.google.common.base.MoreObjects; | ||
21 | - | ||
22 | -/** | ||
23 | - * A table modification event. | ||
24 | - */ | ||
25 | -public final class TableModificationEvent { | ||
26 | - | ||
27 | - /** | ||
28 | - * Type of table modification event. | ||
29 | - */ | ||
30 | - public enum Type { | ||
31 | - ROW_ADDED, | ||
32 | - ROW_DELETED, | ||
33 | - ROW_UPDATED | ||
34 | - } | ||
35 | - | ||
36 | - private final String tableName; | ||
37 | - private final String key; | ||
38 | - private final VersionedValue value; | ||
39 | - private final Type type; | ||
40 | - | ||
41 | - /** | ||
42 | - * Creates a new row deleted table modification event. | ||
43 | - * @param tableName table name. | ||
44 | - * @param key row key | ||
45 | - * @param value value associated with the key when it was deleted. | ||
46 | - * @return table modification event. | ||
47 | - */ | ||
48 | - public static TableModificationEvent rowDeleted(String tableName, String key, VersionedValue value) { | ||
49 | - return new TableModificationEvent(tableName, key, value, Type.ROW_DELETED); | ||
50 | - } | ||
51 | - | ||
52 | - /** | ||
53 | - * Creates a new row added table modification event. | ||
54 | - * @param tableName table name. | ||
55 | - * @param key row key | ||
56 | - * @param value value associated with the key | ||
57 | - * @return table modification event. | ||
58 | - */ | ||
59 | - public static TableModificationEvent rowAdded(String tableName, String key, VersionedValue value) { | ||
60 | - return new TableModificationEvent(tableName, key, value, Type.ROW_ADDED); | ||
61 | - } | ||
62 | - | ||
63 | - /** | ||
64 | - * Creates a new row updated table modification event. | ||
65 | - * @param tableName table name. | ||
66 | - * @param key row key | ||
67 | - * @param newValue value | ||
68 | - * @return table modification event. | ||
69 | - */ | ||
70 | - public static TableModificationEvent rowUpdated(String tableName, String key, VersionedValue newValue) { | ||
71 | - return new TableModificationEvent(tableName, key, newValue, Type.ROW_UPDATED); | ||
72 | - } | ||
73 | - | ||
74 | - private TableModificationEvent(String tableName, String key, VersionedValue value, Type type) { | ||
75 | - this.tableName = tableName; | ||
76 | - this.key = key; | ||
77 | - this.value = value; | ||
78 | - this.type = type; | ||
79 | - } | ||
80 | - | ||
81 | - /** | ||
82 | - * Returns name of table this event is for. | ||
83 | - * @return table name | ||
84 | - */ | ||
85 | - public String tableName() { | ||
86 | - return tableName; | ||
87 | - } | ||
88 | - | ||
89 | - /** | ||
90 | - * Returns the row key this event is for. | ||
91 | - * @return row key | ||
92 | - */ | ||
93 | - public String key() { | ||
94 | - return key; | ||
95 | - } | ||
96 | - | ||
97 | - /** | ||
98 | - * Returns the value associated with the key. If the event for a deletion, this | ||
99 | - * method returns value that was deleted. | ||
100 | - * @return row value | ||
101 | - */ | ||
102 | - public VersionedValue value() { | ||
103 | - return value; | ||
104 | - } | ||
105 | - | ||
106 | - /** | ||
107 | - * Returns the type of table modification event. | ||
108 | - * @return event type. | ||
109 | - */ | ||
110 | - public Type type() { | ||
111 | - return type; | ||
112 | - } | ||
113 | - | ||
114 | - @Override | ||
115 | - public String toString() { | ||
116 | - return MoreObjects.toStringHelper(getClass()) | ||
117 | - .add("type", type) | ||
118 | - .add("tableName", tableName) | ||
119 | - .add("key", key) | ||
120 | - .add("version", value.version()) | ||
121 | - .toString(); | ||
122 | - } | ||
123 | -} |
core/store/dist/src/main/java/org/onosproject/store/service/impl/TabletDefinitionStore.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import static com.google.common.base.Preconditions.checkArgument; | ||
19 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
20 | -import static org.slf4j.LoggerFactory.getLogger; | ||
21 | - | ||
22 | -import java.io.File; | ||
23 | -import java.io.IOException; | ||
24 | -import java.util.HashMap; | ||
25 | -import java.util.HashSet; | ||
26 | -import java.util.Iterator; | ||
27 | -import java.util.Map; | ||
28 | -import java.util.Map.Entry; | ||
29 | -import java.util.Set; | ||
30 | - | ||
31 | -import org.onosproject.cluster.DefaultControllerNode; | ||
32 | -import org.onosproject.cluster.NodeId; | ||
33 | -import org.onlab.packet.IpAddress; | ||
34 | -import org.slf4j.Logger; | ||
35 | - | ||
36 | -import com.fasterxml.jackson.core.JsonEncoding; | ||
37 | -import com.fasterxml.jackson.core.JsonFactory; | ||
38 | -import com.fasterxml.jackson.databind.JsonNode; | ||
39 | -import com.fasterxml.jackson.databind.ObjectMapper; | ||
40 | -import com.fasterxml.jackson.databind.node.ArrayNode; | ||
41 | -import com.fasterxml.jackson.databind.node.ObjectNode; | ||
42 | - | ||
43 | -/** | ||
44 | - * Allows for reading and writing tablet definition as a JSON file. | ||
45 | - */ | ||
46 | -public class TabletDefinitionStore { | ||
47 | - | ||
48 | - private final Logger log = getLogger(getClass()); | ||
49 | - | ||
50 | - private final File file; | ||
51 | - | ||
52 | - /** | ||
53 | - * Creates a reader/writer of the tablet definition file. | ||
54 | - * | ||
55 | - * @param filePath location of the definition file | ||
56 | - */ | ||
57 | - public TabletDefinitionStore(String filePath) { | ||
58 | - file = new File(filePath); | ||
59 | - } | ||
60 | - | ||
61 | - /** | ||
62 | - * Creates a reader/writer of the tablet definition file. | ||
63 | - * | ||
64 | - * @param filePath location of the definition file | ||
65 | - */ | ||
66 | - public TabletDefinitionStore(File filePath) { | ||
67 | - file = checkNotNull(filePath); | ||
68 | - } | ||
69 | - | ||
70 | - /** | ||
71 | - * Returns the Map from tablet name to set of initial member nodes. | ||
72 | - * | ||
73 | - * @return Map from tablet name to set of initial member nodes | ||
74 | - * @throws IOException when I/O exception of some sort has occurred. | ||
75 | - */ | ||
76 | - public Map<String, Set<DefaultControllerNode>> read() throws IOException { | ||
77 | - | ||
78 | - final Map<String, Set<DefaultControllerNode>> tablets = new HashMap<>(); | ||
79 | - | ||
80 | - final ObjectMapper mapper = new ObjectMapper(); | ||
81 | - final ObjectNode tabletNodes = (ObjectNode) mapper.readTree(file); | ||
82 | - final Iterator<Entry<String, JsonNode>> fields = tabletNodes.fields(); | ||
83 | - while (fields.hasNext()) { | ||
84 | - final Entry<String, JsonNode> next = fields.next(); | ||
85 | - final Set<DefaultControllerNode> nodes = new HashSet<>(); | ||
86 | - final Iterator<JsonNode> elements = next.getValue().elements(); | ||
87 | - while (elements.hasNext()) { | ||
88 | - ObjectNode nodeDef = (ObjectNode) elements.next(); | ||
89 | - nodes.add(new DefaultControllerNode(new NodeId(nodeDef.get("id").asText()), | ||
90 | - IpAddress.valueOf(nodeDef.get("ip").asText()), | ||
91 | - nodeDef.get("tcpPort").asInt(9876))); | ||
92 | - } | ||
93 | - | ||
94 | - tablets.put(next.getKey(), nodes); | ||
95 | - } | ||
96 | - return tablets; | ||
97 | - } | ||
98 | - | ||
99 | - /** | ||
100 | - * Updates the Map from tablet name to set of member nodes. | ||
101 | - * | ||
102 | - * @param tabletName name of the tablet to update | ||
103 | - * @param nodes set of initial member nodes | ||
104 | - * @throws IOException when I/O exception of some sort has occurred. | ||
105 | - */ | ||
106 | - public void write(String tabletName, Set<DefaultControllerNode> nodes) throws IOException { | ||
107 | - checkNotNull(tabletName); | ||
108 | - checkArgument(tabletName.isEmpty(), "Tablet name cannot be empty"); | ||
109 | - // TODO should validate if tabletName is allowed in JSON | ||
110 | - | ||
111 | - // load current | ||
112 | - Map<String, Set<DefaultControllerNode>> config; | ||
113 | - try { | ||
114 | - config = read(); | ||
115 | - } catch (IOException e) { | ||
116 | - log.info("Reading tablet config failed, assuming empty definition."); | ||
117 | - config = new HashMap<>(); | ||
118 | - } | ||
119 | - // update with specified | ||
120 | - config.put(tabletName, nodes); | ||
121 | - | ||
122 | - // write back to file | ||
123 | - final ObjectMapper mapper = new ObjectMapper(); | ||
124 | - final ObjectNode tabletNodes = mapper.createObjectNode(); | ||
125 | - for (Entry<String, Set<DefaultControllerNode>> tablet : config.entrySet()) { | ||
126 | - ArrayNode nodeDefs = mapper.createArrayNode(); | ||
127 | - tabletNodes.set(tablet.getKey(), nodeDefs); | ||
128 | - | ||
129 | - for (DefaultControllerNode node : tablet.getValue()) { | ||
130 | - ObjectNode nodeDef = mapper.createObjectNode(); | ||
131 | - nodeDef.put("id", node.id().toString()) | ||
132 | - .put("ip", node.ip().toString()) | ||
133 | - .put("tcpPort", node.tcpPort()); | ||
134 | - nodeDefs.add(nodeDef); | ||
135 | - } | ||
136 | - } | ||
137 | - mapper.writeTree(new JsonFactory().createGenerator(file, JsonEncoding.UTF8), | ||
138 | - tabletNodes); | ||
139 | - } | ||
140 | - | ||
141 | -} |
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import java.util.Collection; | ||
19 | - | ||
20 | -import net.kuujo.copycat.cluster.TcpClusterConfig; | ||
21 | -import net.kuujo.copycat.cluster.TcpMember; | ||
22 | - | ||
23 | -import com.esotericsoftware.kryo.Kryo; | ||
24 | -import com.esotericsoftware.kryo.Serializer; | ||
25 | -import com.esotericsoftware.kryo.io.Input; | ||
26 | -import com.esotericsoftware.kryo.io.Output; | ||
27 | - | ||
28 | -public class TcpClusterConfigSerializer extends Serializer<TcpClusterConfig> { | ||
29 | - | ||
30 | - @Override | ||
31 | - public void write(Kryo kryo, Output output, TcpClusterConfig object) { | ||
32 | - kryo.writeClassAndObject(output, object.getLocalMember()); | ||
33 | - kryo.writeClassAndObject(output, object.getRemoteMembers()); | ||
34 | - } | ||
35 | - | ||
36 | - @Override | ||
37 | - public TcpClusterConfig read(Kryo kryo, Input input, | ||
38 | - Class<TcpClusterConfig> type) { | ||
39 | - TcpMember localMember = (TcpMember) kryo.readClassAndObject(input); | ||
40 | - @SuppressWarnings("unchecked") | ||
41 | - Collection<TcpMember> remoteMembers = (Collection<TcpMember>) kryo.readClassAndObject(input); | ||
42 | - return new TcpClusterConfig(localMember, remoteMembers); | ||
43 | - } | ||
44 | - | ||
45 | -} |
core/store/dist/src/main/java/org/onosproject/store/service/impl/TcpMemberSerializer.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import net.kuujo.copycat.cluster.TcpMember; | ||
19 | - | ||
20 | -import com.esotericsoftware.kryo.Kryo; | ||
21 | -import com.esotericsoftware.kryo.Serializer; | ||
22 | -import com.esotericsoftware.kryo.io.Input; | ||
23 | -import com.esotericsoftware.kryo.io.Output; | ||
24 | - | ||
25 | -public class TcpMemberSerializer extends Serializer<TcpMember> { | ||
26 | - | ||
27 | - @Override | ||
28 | - public void write(Kryo kryo, Output output, TcpMember object) { | ||
29 | - output.writeString(object.host()); | ||
30 | - output.writeInt(object.port()); | ||
31 | - } | ||
32 | - | ||
33 | - @Override | ||
34 | - public TcpMember read(Kryo kryo, Input input, Class<TcpMember> type) { | ||
35 | - String host = input.readString(); | ||
36 | - int port = input.readInt(); | ||
37 | - return new TcpMember(host, port); | ||
38 | - } | ||
39 | -} |
core/store/dist/src/main/java/org/onosproject/store/service/impl/package-info.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | - | ||
17 | -/** | ||
18 | - * Strongly consistent, fault-tolerant and durable state management | ||
19 | - * based on Raft consensus protocol. | ||
20 | - */ | ||
21 | -package org.onosproject.store.service.impl; |
core/store/dist/src/test/java/org/onosproject/store/service/impl/MapDBLogTest.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.service.impl; | ||
17 | - | ||
18 | -import java.io.File; | ||
19 | -import java.io.IOException; | ||
20 | -import java.nio.file.Files; | ||
21 | -import java.util.List; | ||
22 | - | ||
23 | -import net.kuujo.copycat.internal.log.OperationEntry; | ||
24 | -import net.kuujo.copycat.log.Entry; | ||
25 | -import net.kuujo.copycat.log.Log; | ||
26 | - | ||
27 | -import org.junit.After; | ||
28 | -import org.junit.Assert; | ||
29 | -import org.junit.Before; | ||
30 | -import org.junit.Test; | ||
31 | -import org.onosproject.store.serializers.StoreSerializer; | ||
32 | - | ||
33 | -import com.google.common.testing.EqualsTester; | ||
34 | - | ||
35 | -/** | ||
36 | - * Test the MapDBLog implementation. | ||
37 | - */ | ||
38 | -public class MapDBLogTest { | ||
39 | - | ||
40 | - private static final StoreSerializer SERIALIZER = ClusterMessagingProtocol.DB_SERIALIZER; | ||
41 | - private static final Entry TEST_ENTRY1 = new OperationEntry(1, "test1"); | ||
42 | - private static final Entry TEST_ENTRY2 = new OperationEntry(2, "test12"); | ||
43 | - private static final Entry TEST_ENTRY3 = new OperationEntry(3, "test123"); | ||
44 | - private static final Entry TEST_ENTRY4 = new OperationEntry(4, "test1234"); | ||
45 | - | ||
46 | - private static final Entry TEST_SNAPSHOT_ENTRY = new OperationEntry(5, "snapshot"); | ||
47 | - | ||
48 | - private static final long TEST_ENTRY1_SIZE = SERIALIZER.encode(TEST_ENTRY1).length; | ||
49 | - private static final long TEST_ENTRY2_SIZE = SERIALIZER.encode(TEST_ENTRY2).length; | ||
50 | - private static final long TEST_ENTRY3_SIZE = SERIALIZER.encode(TEST_ENTRY3).length; | ||
51 | - private static final long TEST_ENTRY4_SIZE = SERIALIZER.encode(TEST_ENTRY4).length; | ||
52 | - | ||
53 | - private static final long TEST_SNAPSHOT_ENTRY_SIZE = SERIALIZER.encode(TEST_SNAPSHOT_ENTRY).length; | ||
54 | - | ||
55 | - private String dbFileName; | ||
56 | - | ||
57 | - | ||
58 | - @Before | ||
59 | - public void setUp() throws Exception { | ||
60 | - File logFile = File.createTempFile("mapdbTest", null); | ||
61 | - dbFileName = logFile.getAbsolutePath(); | ||
62 | - } | ||
63 | - | ||
64 | - @After | ||
65 | - public void tearDown() throws Exception { | ||
66 | - Files.deleteIfExists(new File(dbFileName).toPath()); | ||
67 | - Files.deleteIfExists(new File(dbFileName + ".t").toPath()); | ||
68 | - Files.deleteIfExists(new File(dbFileName + ".p").toPath()); | ||
69 | - } | ||
70 | - | ||
71 | - @Test(expected = IllegalStateException.class) | ||
72 | - public void testAssertOpen() { | ||
73 | - Log log = new MapDBLog(dbFileName, SERIALIZER); | ||
74 | - log.size(); | ||
75 | - } | ||
76 | - | ||
77 | - @Test | ||
78 | - public void testAppendEntry() throws IOException { | ||
79 | - Log log = new MapDBLog(dbFileName, SERIALIZER); | ||
80 | - log.open(); | ||
81 | - log.appendEntry(TEST_ENTRY1); | ||
82 | - OperationEntry first = log.firstEntry(); | ||
83 | - OperationEntry last = log.lastEntry(); | ||
84 | - new EqualsTester() | ||
85 | - .addEqualityGroup(first, last, TEST_ENTRY1) | ||
86 | - .testEquals(); | ||
87 | - Assert.assertEquals(TEST_ENTRY1_SIZE, log.size()); | ||
88 | - Assert.assertEquals(1, log.firstIndex()); | ||
89 | - Assert.assertEquals(1, log.lastIndex()); | ||
90 | - } | ||
91 | - | ||
92 | - @Test | ||
93 | - public void testAppendEntries() throws IOException { | ||
94 | - Log log = new MapDBLog(dbFileName, SERIALIZER); | ||
95 | - log.open(); | ||
96 | - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3); | ||
97 | - OperationEntry first = log.firstEntry(); | ||
98 | - OperationEntry last = log.lastEntry(); | ||
99 | - new EqualsTester() | ||
100 | - .addEqualityGroup(first, TEST_ENTRY1) | ||
101 | - .addEqualityGroup(last, TEST_ENTRY3) | ||
102 | - .testEquals(); | ||
103 | - Assert.assertEquals(TEST_ENTRY1_SIZE + TEST_ENTRY2_SIZE, TEST_ENTRY3_SIZE, log.size()); | ||
104 | - Assert.assertEquals(1, log.firstIndex()); | ||
105 | - Assert.assertEquals(3, log.lastIndex()); | ||
106 | - Assert.assertTrue(log.containsEntry(1)); | ||
107 | - Assert.assertTrue(log.containsEntry(2)); | ||
108 | - } | ||
109 | - | ||
110 | - @Test | ||
111 | - public void testDelete() throws IOException { | ||
112 | - Log log = new MapDBLog(dbFileName, SERIALIZER); | ||
113 | - log.open(); | ||
114 | - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2); | ||
115 | - log.delete(); | ||
116 | - Assert.assertEquals(0, log.size()); | ||
117 | - Assert.assertTrue(log.isEmpty()); | ||
118 | - Assert.assertEquals(0, log.firstIndex()); | ||
119 | - Assert.assertNull(log.firstEntry()); | ||
120 | - Assert.assertEquals(0, log.lastIndex()); | ||
121 | - Assert.assertNull(log.lastEntry()); | ||
122 | - } | ||
123 | - | ||
124 | - @Test | ||
125 | - public void testGetEntries() throws IOException { | ||
126 | - Log log = new MapDBLog(dbFileName, SERIALIZER); | ||
127 | - log.open(); | ||
128 | - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3, TEST_ENTRY4); | ||
129 | - Assert.assertEquals( | ||
130 | - TEST_ENTRY1_SIZE + | ||
131 | - TEST_ENTRY2_SIZE + | ||
132 | - TEST_ENTRY3_SIZE + | ||
133 | - TEST_ENTRY4_SIZE, log.size()); | ||
134 | - | ||
135 | - List<Entry> entries = log.getEntries(2, 3); | ||
136 | - new EqualsTester() | ||
137 | - .addEqualityGroup(log.getEntry(4), TEST_ENTRY4) | ||
138 | - .addEqualityGroup(entries.get(0), TEST_ENTRY2) | ||
139 | - .addEqualityGroup(entries.get(1), TEST_ENTRY3) | ||
140 | - .testEquals(); | ||
141 | - } | ||
142 | - | ||
143 | - @Test | ||
144 | - public void testRemoveAfter() throws IOException { | ||
145 | - Log log = new MapDBLog(dbFileName, SERIALIZER); | ||
146 | - log.open(); | ||
147 | - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3, TEST_ENTRY4); | ||
148 | - log.removeAfter(1); | ||
149 | - Assert.assertEquals(TEST_ENTRY1_SIZE, log.size()); | ||
150 | - new EqualsTester() | ||
151 | - .addEqualityGroup(log.firstEntry(), log.lastEntry(), TEST_ENTRY1) | ||
152 | - .testEquals(); | ||
153 | - } | ||
154 | - | ||
155 | - @Test | ||
156 | - public void testAddAfterRemove() throws IOException { | ||
157 | - Log log = new MapDBLog(dbFileName, SERIALIZER); | ||
158 | - log.open(); | ||
159 | - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3, TEST_ENTRY4); | ||
160 | - log.removeAfter(1); | ||
161 | - log.appendEntry(TEST_ENTRY4); | ||
162 | - Assert.assertEquals(TEST_ENTRY1_SIZE + TEST_ENTRY4_SIZE, log.size()); | ||
163 | - new EqualsTester() | ||
164 | - .addEqualityGroup(log.firstEntry(), TEST_ENTRY1) | ||
165 | - .addEqualityGroup(log.lastEntry(), TEST_ENTRY4) | ||
166 | - .addEqualityGroup(log.size(), TEST_ENTRY1_SIZE + TEST_ENTRY4_SIZE) | ||
167 | - .testEquals(); | ||
168 | - } | ||
169 | - | ||
170 | - @Test | ||
171 | - public void testClose() throws IOException { | ||
172 | - Log log = new MapDBLog(dbFileName, SERIALIZER); | ||
173 | - Assert.assertFalse(log.isOpen()); | ||
174 | - log.open(); | ||
175 | - Assert.assertTrue(log.isOpen()); | ||
176 | - log.close(); | ||
177 | - Assert.assertFalse(log.isOpen()); | ||
178 | - } | ||
179 | - | ||
180 | - @Test | ||
181 | - public void testReopen() throws IOException { | ||
182 | - Log log = new MapDBLog(dbFileName, SERIALIZER); | ||
183 | - log.open(); | ||
184 | - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3, TEST_ENTRY4); | ||
185 | - log.close(); | ||
186 | - log.open(); | ||
187 | - | ||
188 | - new EqualsTester() | ||
189 | - .addEqualityGroup(log.firstEntry(), TEST_ENTRY1) | ||
190 | - .addEqualityGroup(log.getEntry(2), TEST_ENTRY2) | ||
191 | - .addEqualityGroup(log.lastEntry(), TEST_ENTRY4) | ||
192 | - .addEqualityGroup(log.size(), | ||
193 | - TEST_ENTRY1_SIZE + | ||
194 | - TEST_ENTRY2_SIZE + | ||
195 | - TEST_ENTRY3_SIZE + | ||
196 | - TEST_ENTRY4_SIZE) | ||
197 | - .testEquals(); | ||
198 | - } | ||
199 | - | ||
200 | - @Test | ||
201 | - public void testCompact() throws IOException { | ||
202 | - Log log = new MapDBLog(dbFileName, SERIALIZER); | ||
203 | - log.open(); | ||
204 | - log.appendEntries(TEST_ENTRY1, TEST_ENTRY2, TEST_ENTRY3, TEST_ENTRY4); | ||
205 | - log.compact(3, TEST_SNAPSHOT_ENTRY); | ||
206 | - new EqualsTester() | ||
207 | - .addEqualityGroup(log.firstEntry(), TEST_SNAPSHOT_ENTRY) | ||
208 | - .addEqualityGroup(log.lastEntry(), TEST_ENTRY4) | ||
209 | - .addEqualityGroup(log.size(), | ||
210 | - TEST_SNAPSHOT_ENTRY_SIZE + | ||
211 | - TEST_ENTRY4_SIZE) | ||
212 | - .testEquals(); | ||
213 | - } | ||
214 | -} |
... | @@ -69,7 +69,7 @@ | ... | @@ -69,7 +69,7 @@ |
69 | <properties> | 69 | <properties> |
70 | <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> | 70 | <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> |
71 | <netty4.version>4.0.23.Final</netty4.version> | 71 | <netty4.version>4.0.23.Final</netty4.version> |
72 | - <copycat.version>0.3.0.onos</copycat.version> | 72 | + <copycat.version>0.5.0.onos-SNAPSHOT</copycat.version> |
73 | <openflowj.version>0.3.9.oe</openflowj.version> | 73 | <openflowj.version>0.3.9.oe</openflowj.version> |
74 | </properties> | 74 | </properties> |
75 | 75 | ... | ... |
... | @@ -31,6 +31,14 @@ | ... | @@ -31,6 +31,14 @@ |
31 | 31 | ||
32 | <description>ONLab third-party dependencies</description> | 32 | <description>ONLab third-party dependencies</description> |
33 | 33 | ||
34 | + <!-- TODO: Needed for copycat snapshot. Remove before official release --> | ||
35 | + <repositories> | ||
36 | + <repository> | ||
37 | + <id>snapshots</id> | ||
38 | + <url>https://oss.sonatype.org/content/repositories/snapshots</url> | ||
39 | + </repository> | ||
40 | + </repositories> | ||
41 | + | ||
34 | <dependencies> | 42 | <dependencies> |
35 | <dependency> | 43 | <dependency> |
36 | <groupId>com.googlecode.concurrent-trees</groupId> | 44 | <groupId>com.googlecode.concurrent-trees</groupId> |
... | @@ -47,14 +55,14 @@ | ... | @@ -47,14 +55,14 @@ |
47 | <dependency> | 55 | <dependency> |
48 | <!-- FIXME once fixes get merged to upstream --> | 56 | <!-- FIXME once fixes get merged to upstream --> |
49 | <groupId>org.onosproject</groupId> | 57 | <groupId>org.onosproject</groupId> |
50 | - <artifactId>copycat</artifactId> | 58 | + <artifactId>copycat-api</artifactId> |
51 | <version>${copycat.version}</version> | 59 | <version>${copycat.version}</version> |
52 | </dependency> | 60 | </dependency> |
53 | 61 | ||
54 | <dependency> | 62 | <dependency> |
55 | <!-- FIXME once fixes get merged to upstream --> | 63 | <!-- FIXME once fixes get merged to upstream --> |
56 | <groupId>org.onosproject</groupId> | 64 | <groupId>org.onosproject</groupId> |
57 | - <artifactId>copycat-tcp</artifactId> | 65 | + <artifactId>copycat-netty</artifactId> |
58 | <version>${copycat.version}</version> | 66 | <version>${copycat.version}</version> |
59 | </dependency> | 67 | </dependency> |
60 | 68 | ... | ... |
-
Please register or login to post a comment