Support retry for WRITE_THROUGH and WRITE_BEHIND: add retry logic to MapWriter

Signed-off-by: zzhlhc <zhouzh_zzz@qq.com>
pull/5294/head
zzhlhc 1 year ago
parent 4e72fcbbbe
commit e5e976d2d4

@ -100,7 +100,7 @@ public class MapWriteBehindTask {
if (options.getWriter() != null) {
options.getWriter().write(addedMap);
} else {
options.getWriterAsync().write(addedMap);
options.getWriterAsync().write(addedMap).toCompletableFuture().join();
}
addedMap.clear();
}

@ -19,6 +19,7 @@ import org.redisson.api.map.MapLoader;
import org.redisson.api.map.MapLoaderAsync;
import org.redisson.api.map.MapWriter;
import org.redisson.api.map.MapWriterAsync;
import org.redisson.api.map.RetryableMapWriter;
import org.redisson.api.map.RetryableMapWriterAsync;
import java.util.concurrent.TimeUnit;
@ -95,7 +96,7 @@ public class MapOptions<K, V> {
* @return MapOptions instance
*/
public MapOptions<K, V> writer(MapWriter<K, V> writer) {
this.writer = writer;
this.writer = new RetryableMapWriter<>(this, writer);
return this;
}
public MapWriter<K, V> getWriter() {
@ -170,7 +171,16 @@ public class MapOptions<K, V> {
return writerRetryAttempts;
}
/**
* Sets max retry attempts for {@link RetryableMapWriter} or {@link RetryableMapWriterAsync}
*
* @param writerRetryAttempts object
* @return MapOptions instance
*/
public MapOptions<K, V> writerRetryAttempts(int writerRetryAttempts) {
if (writerRetryAttempts < 0){
throw new IllegalArgumentException("writerRetryAttempts must be positive");
}
this.writerRetryAttempts = writerRetryAttempts;
return this;
}
@ -179,7 +189,17 @@ public class MapOptions<K, V> {
return writerRetryInterval;
}
/**
* Sets retry interval for {@link RetryableMapWriter} or {@link RetryableMapWriterAsync}
*
* @param writerRetryInterval object
* @param timeUnit {@link TimeUnit}
* @return MapOptions instance
*/
public MapOptions<K, V> writerRetryInterval(long writerRetryInterval, TimeUnit timeUnit) {
if (writerRetryInterval < 0){
throw new IllegalArgumentException("writerRetryInterval must be positive");
}
this.writerRetryInterval = timeUnit.toMillis(writerRetryInterval);
return this;
}

@ -0,0 +1,82 @@
/**
* Copyright (c) 2013-2022 Nikita Koksharov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.redisson.api.map;
import org.redisson.api.MapOptions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Map;
public class RetryableMapWriter<K, V> implements MapWriter<K, V> {
private static final Logger log = LoggerFactory.getLogger(RetryableMapWriter.class);
private final MapOptions<K, V> options;
private final MapWriter<K, V> mapWriter;
public RetryableMapWriter(MapOptions<K, V> options, MapWriter<K, V> mapWriter) {
this.options = options;
this.mapWriter = mapWriter;
}
@Override
public void write(Map<K, V> addedMap) {
//execute at least once
int leftAddAttempts = Math.max(1, options.getWriterRetryAttempts());
while (leftAddAttempts > 0) {
try {
//do write
mapWriter.write(addedMap);
break;
} catch (Exception exception) {
if (--leftAddAttempts == 0) {
throw exception;
} else {
log.warn("Unable to add keys: {}, will retry after {}ms", addedMap, options.getWriterRetryInterval(), exception);
try {
Thread.sleep(options.getWriterRetryInterval());
} catch (InterruptedException ignore) {
}
}
}
}
}
@Override
public void delete(Collection<K> keys) {
//execute at least once
int leftDeleteAttempts = Math.max(1, options.getWriterRetryAttempts());
while (leftDeleteAttempts > 0) {
try {
//do delete
mapWriter.delete(keys);
break;
} catch (Exception exception) {
if (--leftDeleteAttempts == 0) {
throw exception;
} else {
log.warn("Unable to delete keys: {}, will retry after {}ms", keys, options.getWriterRetryInterval(), exception);
try {
Thread.sleep(options.getWriterRetryInterval());
} catch (InterruptedException ignore) {
}
}
}
}
}
}

@ -23,7 +23,6 @@ import java.util.Collection;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
public class RetryableMapWriterAsync<K, V> implements MapWriterAsync<K, V> {
@ -32,9 +31,6 @@ public class RetryableMapWriterAsync<K, V> implements MapWriterAsync<K, V> {
private final MapOptions<K, V> options;
private final MapWriterAsync<K, V> mapWriterAsync;
//store entries no need to be retried
private final Map<K, V> noRetriesForWrite = new ConcurrentHashMap<>();
public RetryableMapWriterAsync(MapOptions<K, V> options, MapWriterAsync<K, V> mapWriterAsync) {
this.options = options;
this.mapWriterAsync = mapWriterAsync;
@ -42,37 +38,51 @@ public class RetryableMapWriterAsync<K, V> implements MapWriterAsync<K, V> {
@Override
public CompletionStage<Void> write(Map<K, V> addedMap) {
//execute at least once
int leftAddAttempts = Math.max(1, options.getWriterRetryAttempts());
while (leftAddAttempts > 0) {
try {
//remove successful part
if (!noRetriesForWrite.isEmpty()) {
noRetriesForWrite.forEach(addedMap::remove);
noRetriesForWrite.clear();
}
//do write
mapWriterAsync.write(addedMap).toCompletableFuture().join();
break;
} catch (Exception exception) {
if (--leftAddAttempts == 0) {
throw exception;
} else {
log.warn("Unable to add keys: {}, will retry after {}ms", addedMap, options.getWriterRetryInterval(), exception);
try {
Thread.sleep(options.getWriterRetryInterval());
} catch (InterruptedException ignore) {
return CompletableFuture.runAsync(() -> {
//execute at least once
int leftAddAttempts = Math.max(1, options.getWriterRetryAttempts());
while (leftAddAttempts > 0) {
try {
//do write
mapWriterAsync.write(addedMap).toCompletableFuture().join();
break;
} catch (Exception exception) {
if (--leftAddAttempts == 0) {
throw exception;
} else {
log.warn("Unable to add keys: {}, will retry after {}ms", addedMap, options.getWriterRetryInterval(), exception);
try {
Thread.sleep(options.getWriterRetryInterval());
} catch (InterruptedException ignore) {
}
}
}
}
}
return CompletableFuture.completedFuture(null);
});
}
@Override
public CompletionStage<Void> delete(Collection<K> keys) {
return mapWriterAsync.delete(keys);
return CompletableFuture.runAsync(() -> {
//execute at least once
int leftDeleteAttempts = Math.max(1, options.getWriterRetryAttempts());
while (leftDeleteAttempts > 0) {
try {
//do delete
mapWriterAsync.delete(keys).toCompletableFuture().join();
break;
} catch (Exception exception) {
if (--leftDeleteAttempts == 0) {
throw exception;
} else {
log.warn("Unable to delete keys: {}, will retry after {}ms", keys, options.getWriterRetryInterval(), exception);
try {
Thread.sleep(options.getWriterRetryInterval());
} catch (InterruptedException ignore) {
}
}
}
}
});
}
}

@ -1442,7 +1442,7 @@ public abstract class BaseMapTest extends BaseTest {
}
@Test
public void testRetryableWriterSuccessAtLastRetry() throws InterruptedException {
public void testRetryableWriterAsyncSuccessAtLastRetry() throws InterruptedException {
//success at last retry
int expectedRetryAttempts = 3;
AtomicInteger actualRetryTimes = new AtomicInteger(0);
@ -1457,13 +1457,17 @@ public abstract class BaseMapTest extends BaseTest {
throw new IllegalStateException("retry");
}
store.putAll(map);
//todo writeSuccess(map);
});
}
@Override
public CompletionStage<Void> delete(Collection<String> keys) {
return null;
return CompletableFuture.runAsync(()->{
if (actualRetryTimes.incrementAndGet() < expectedRetryAttempts) {
throw new IllegalStateException("retry");
}
keys.forEach(store::remove);
});
}
})
.writeMode(MapOptions.WriteMode.WRITE_BEHIND)
@ -1471,61 +1475,90 @@ public abstract class BaseMapTest extends BaseTest {
.writerRetryInterval(100, TimeUnit.MILLISECONDS);
final RMap<String, String> map = redisson.getMap("test", options);
//do add
map.put("1", "11");
Thread.sleep(1400);
//assert add
Map<String, String> expectedMap = new HashMap<>();
expectedMap.put("1", "11");
assertThat(store).isEqualTo(expectedMap);
//assert retry times
//assert add retry times
assertThat(actualRetryTimes.get()).isEqualTo(expectedRetryAttempts);
//do delete
actualRetryTimes.set(0);
map.remove("1");
Thread.sleep(1400);
//assert delete
expectedMap.clear();
assertThat(store).isEqualTo(expectedMap);
//assert delete retry times
assertThat(actualRetryTimes.get()).isEqualTo(expectedRetryAttempts);
destroy(map);
}
/*@Test
public void testRetryableWriterOnlyRetryFailedPart() throws InterruptedException {
//lastWritingMap only contains the part that needs to be retried
Map<String, String> lastWritingMap = new HashMap<>();
@Test
public void testRetryableWriterSuccessAtLastRetry() throws InterruptedException {
//success at last retry
int expectedRetryAttempts = 3;
AtomicInteger actualRetryTimes = new AtomicInteger(0);
Map<String, String> store = new HashMap<>();
MapOptions<String, String> options = MapOptions.<String, String>defaults()
.writerAsync(new MapWriterAsync<String, String>() {
.writer(new MapWriter<String, String>() {
@Override
public CompletionStage<Void> write(Map<String, String> writingMap) {
lastWritingMap.clear();
lastWritingMap.putAll(writingMap);
for (Entry<String, String> entry : writingMap.entrySet()) {
if (entry.getKey().equals("illegalData")) {
throw new IllegalStateException("illegalData");
}
//writeSuccess will exclude entry in next retry
//todo writeSuccess(entry);
public void write(Map<String, String> map) {
if (actualRetryTimes.incrementAndGet() < expectedRetryAttempts) {
throw new IllegalStateException("retry");
}
return CompletableFuture.completedFuture(null);
store.putAll(map);
}
@Override
public CompletionStage<Void> delete(Collection<String> keys) {
return null;
public void delete(Collection<String> keys) {
if (actualRetryTimes.incrementAndGet() < expectedRetryAttempts) {
throw new IllegalStateException("retry");
}
keys.forEach(store::remove);
}
})
.writeMode(MapOptions.WriteMode.WRITE_BEHIND)
.writerRetryAttempts(3);
.writerRetryAttempts(expectedRetryAttempts)
.writerRetryInterval(100, TimeUnit.MILLISECONDS);
final RMap<String, String> map = redisson.getMap("test", options);
map.put("22", "11");
map.put("333", "11");
map.put("illegalData", "11");
//do add
map.put("1", "11");
Thread.sleep(1400);
Map<String, String> expectedLastWritingMap = new HashMap<>();
expectedLastWritingMap.put("illegalData", "11");
//finally, only "illegalData" still needs to be retried but the maximum number of retries is reached
assertThat(lastWritingMap).isEqualTo(expectedLastWritingMap);
//assert add
Map<String, String> expectedMap = new HashMap<>();
expectedMap.put("1", "11");
assertThat(store).isEqualTo(expectedMap);
//assert add retry times
assertThat(actualRetryTimes.get()).isEqualTo(expectedRetryAttempts);
//do delete
actualRetryTimes.set(0);
map.remove("1");
Thread.sleep(1400);
//assert delete
expectedMap.clear();
assertThat(store).isEqualTo(expectedMap);
//assert delete retry times
assertThat(actualRetryTimes.get()).isEqualTo(expectedRetryAttempts);
destroy(map);
}*/
}
@Test
public void testLoadAllReplaceValues() {
Map<String, String> cache = new HashMap<>();

Loading…
Cancel
Save