Feature - Spring Data Redis 3.0 implementation #4689
parent
d0c0187dca
commit
4dfafc722a
@ -0,0 +1,76 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>org.redisson</groupId>
|
||||
<artifactId>redisson-spring-data</artifactId>
|
||||
<version>3.18.1-SNAPSHOT</version>
|
||||
<relativePath>../</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>redisson-spring-data-30</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>Redisson/Spring Data Redis v3.0.x integration</name>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.data</groupId>
|
||||
<artifactId>spring-data-redis</artifactId>
|
||||
<version>3.0.0</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.1.2</version>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifestEntries>
|
||||
<Build-Time>${maven.build.timestamp}</Build-Time>
|
||||
<Automatic-Module-Name>redisson.spring.data27</Automatic-Module-Name>
|
||||
</manifestEntries>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>com.mycila</groupId>
|
||||
<artifactId>license-maven-plugin</artifactId>
|
||||
<version>3.0</version>
|
||||
<configuration>
|
||||
<basedir>${basedir}</basedir>
|
||||
<header>${basedir}/../../header.txt</header>
|
||||
<quiet>false</quiet>
|
||||
<failIfMissing>true</failIfMissing>
|
||||
<aggregate>false</aggregate>
|
||||
<includes>
|
||||
<include>src/main/java/org/redisson/</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>target/**</exclude>
|
||||
</excludes>
|
||||
<useDefaultExcludes>true</useDefaultExcludes>
|
||||
<mapping>
|
||||
<java>JAVADOC_STYLE</java>
|
||||
</mapping>
|
||||
<strictCheck>true</strictCheck>
|
||||
<useDefaultMapping>true</useDefaultMapping>
|
||||
<encoding>UTF-8</encoding>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>check</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
||||
</project>
|
@ -0,0 +1,37 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.client.protocol.convertor.Convertor;
|
||||
|
||||
import io.netty.util.CharsetUtil;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class BinaryConvertor implements Convertor<Object> {
|
||||
|
||||
@Override
|
||||
public Object convert(Object obj) {
|
||||
if (obj instanceof String) {
|
||||
return ((String) obj).getBytes(CharsetUtil.UTF_8);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,67 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.decoder.MultiDecoder;
|
||||
import org.springframework.data.geo.*;
|
||||
import org.springframework.data.redis.connection.RedisGeoCommands.GeoLocation;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class ByteBufferGeoResultsDecoder implements MultiDecoder<GeoResults<GeoLocation<ByteBuffer>>> {
|
||||
|
||||
private final Metric metric;
|
||||
|
||||
public ByteBufferGeoResultsDecoder() {
|
||||
this(null);
|
||||
}
|
||||
|
||||
public ByteBufferGeoResultsDecoder(Metric metric) {
|
||||
super();
|
||||
this.metric = metric;
|
||||
}
|
||||
|
||||
@Override
|
||||
public GeoResults<GeoLocation<ByteBuffer>> decode(List<Object> parts, State state) {
|
||||
List<GeoResult<GeoLocation<ByteBuffer>>> result = new ArrayList<GeoResult<GeoLocation<ByteBuffer>>>();
|
||||
for (Object object : parts) {
|
||||
if (object instanceof List) {
|
||||
List<Object> vals = ((List<Object>) object);
|
||||
|
||||
if (metric != null) {
|
||||
GeoLocation<ByteBuffer> location = new GeoLocation<ByteBuffer>(ByteBuffer.wrap((byte[])vals.get(0)), null);
|
||||
result.add(new GeoResult<GeoLocation<ByteBuffer>>(location, new Distance((Double)vals.get(1), metric)));
|
||||
} else {
|
||||
GeoLocation<ByteBuffer> location = new GeoLocation<ByteBuffer>(ByteBuffer.wrap((byte[])vals.get(0)), (Point)vals.get(1));
|
||||
result.add(new GeoResult<GeoLocation<ByteBuffer>>(location, null));
|
||||
}
|
||||
} else {
|
||||
GeoLocation<ByteBuffer> location = new GeoLocation<ByteBuffer>(ByteBuffer.wrap((byte[])object), null);
|
||||
result.add(new GeoResult<GeoLocation<ByteBuffer>>(location, new Distance(0)));
|
||||
}
|
||||
}
|
||||
return new GeoResults<GeoLocation<ByteBuffer>>(result);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.client.protocol.convertor.Convertor;
|
||||
import org.springframework.data.redis.connection.DataType;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class DataTypeConvertor implements Convertor<DataType> {
|
||||
|
||||
@Override
|
||||
public DataType convert(Object obj) {
|
||||
String val = obj.toString();
|
||||
return DataType.fromCode(val);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.client.protocol.convertor.Convertor;
|
||||
import org.springframework.data.geo.Distance;
|
||||
import org.springframework.data.geo.Metric;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class DistanceConvertor implements Convertor<Distance> {
|
||||
|
||||
private final Metric metric;
|
||||
|
||||
public DistanceConvertor(Metric metric) {
|
||||
super();
|
||||
this.metric = metric;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Distance convert(Object obj) {
|
||||
return new Distance((Double)obj, metric);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,66 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.decoder.MultiDecoder;
|
||||
import org.springframework.data.geo.*;
|
||||
import org.springframework.data.redis.connection.RedisGeoCommands.GeoLocation;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class GeoResultsDecoder implements MultiDecoder<GeoResults<GeoLocation<byte[]>>> {
|
||||
|
||||
private final Metric metric;
|
||||
|
||||
public GeoResultsDecoder() {
|
||||
this(null);
|
||||
}
|
||||
|
||||
public GeoResultsDecoder(Metric metric) {
|
||||
super();
|
||||
this.metric = metric;
|
||||
}
|
||||
|
||||
@Override
|
||||
public GeoResults<GeoLocation<byte[]>> decode(List<Object> parts, State state) {
|
||||
List<GeoResult<GeoLocation<byte[]>>> result = new ArrayList<GeoResult<GeoLocation<byte[]>>>();
|
||||
for (Object object : parts) {
|
||||
if (object instanceof List) {
|
||||
List<Object> vals = ((List<Object>) object);
|
||||
|
||||
if (metric != null) {
|
||||
GeoLocation<byte[]> location = new GeoLocation<byte[]>((byte[])vals.get(0), null);
|
||||
result.add(new GeoResult<GeoLocation<byte[]>>(location, new Distance((Double)vals.get(1), metric)));
|
||||
} else {
|
||||
GeoLocation<byte[]> location = new GeoLocation<byte[]>((byte[])vals.get(0), (Point)vals.get(1));
|
||||
result.add(new GeoResult<GeoLocation<byte[]>>(location, new Distance(0)));
|
||||
}
|
||||
} else {
|
||||
GeoLocation<byte[]> location = new GeoLocation<byte[]>((byte[])object, null);
|
||||
result.add(new GeoResult<GeoLocation<byte[]>>(location, new Distance(0)));
|
||||
}
|
||||
}
|
||||
return new GeoResults<GeoLocation<byte[]>>(result);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,61 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
import org.redisson.client.protocol.decoder.MultiDecoder;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
* @param <T> type
|
||||
*/
|
||||
public class ObjectListReplayDecoder2<T> implements MultiDecoder<List<T>> {
|
||||
|
||||
private final Decoder<Object> decoder;
|
||||
|
||||
public ObjectListReplayDecoder2() {
|
||||
this(null);
|
||||
}
|
||||
|
||||
public ObjectListReplayDecoder2(Decoder<Object> decoder) {
|
||||
super();
|
||||
this.decoder = decoder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<T> decode(List<Object> parts, State state) {
|
||||
for (int i = 0; i < parts.size(); i++) {
|
||||
Object object = parts.get(i);
|
||||
if (object instanceof List) {
|
||||
if (((List) object).isEmpty()) {
|
||||
parts.set(i, null);
|
||||
}
|
||||
}
|
||||
}
|
||||
return (List<T>) parts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state) {
|
||||
return decoder;
|
||||
}
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.codec.DoubleCodec;
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
import org.redisson.client.protocol.decoder.MultiDecoder;
|
||||
import org.springframework.data.geo.Point;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class PointDecoder implements MultiDecoder<Point> {
|
||||
|
||||
@Override
|
||||
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state) {
|
||||
return DoubleCodec.INSTANCE.getValueDecoder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Point decode(List<Object> parts, State state) {
|
||||
if (parts.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Double longitude = (Double)parts.get(0);
|
||||
Double latitude = (Double)parts.get(1);
|
||||
return new Point(longitude, latitude);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.util.CharsetUtil;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class PropertiesDecoder implements Decoder<Properties> {
|
||||
|
||||
@Override
|
||||
public Properties decode(ByteBuf buf, State state) {
|
||||
String value = buf.toString(CharsetUtil.UTF_8);
|
||||
Properties result = new Properties();
|
||||
for (String entry : value.split("\r\n|\n")) {
|
||||
String[] parts = entry.split(":");
|
||||
if (parts.length == 2) {
|
||||
result.put(parts[0], parts[1]);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
import org.redisson.client.protocol.decoder.MultiDecoder;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class PropertiesListDecoder implements MultiDecoder<Properties> {
|
||||
|
||||
@Override
|
||||
public Properties decode(List<Object> parts, State state) {
|
||||
Properties result = new Properties();
|
||||
for (int j = 0; j < parts.size(); j+=2) {
|
||||
Object key = parts.get(j);
|
||||
Object value = parts.get(j+1);
|
||||
result.put(key, value);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,120 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
import org.redisson.misc.RedisURI;
|
||||
import org.springframework.data.redis.connection.RedisClusterNode;
|
||||
import org.springframework.data.redis.connection.RedisClusterNode.Flag;
|
||||
import org.springframework.data.redis.connection.RedisClusterNode.LinkState;
|
||||
import org.springframework.data.redis.connection.RedisClusterNode.RedisClusterNodeBuilder;
|
||||
import org.springframework.data.redis.connection.RedisClusterNode.SlotRange;
|
||||
import org.springframework.data.redis.connection.RedisNode.NodeType;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.util.CharsetUtil;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedisClusterNodeDecoder implements Decoder<List<RedisClusterNode>> {
|
||||
|
||||
@Override
|
||||
public List<RedisClusterNode> decode(ByteBuf buf, State state) throws IOException {
|
||||
String response = buf.toString(CharsetUtil.UTF_8);
|
||||
|
||||
List<RedisClusterNode> nodes = new ArrayList<RedisClusterNode>();
|
||||
for (String nodeInfo : response.split("\n")) {
|
||||
String[] params = nodeInfo.split(" ");
|
||||
|
||||
String nodeId = params[0];
|
||||
|
||||
String flagsStr = params[2];
|
||||
Set<Flag> flags = EnumSet.noneOf(Flag.class);
|
||||
for (String flag : flagsStr.split(",")) {
|
||||
String flagValue = flag.replace("slave", "replica")
|
||||
.toUpperCase().replaceAll("\\?", "");
|
||||
flags.add(Flag.valueOf(flagValue));
|
||||
}
|
||||
|
||||
RedisURI address = null;
|
||||
if (!flags.contains(Flag.NOADDR)) {
|
||||
String addr = params[1].split("@")[0];
|
||||
address = new RedisURI("redis://" + addr);
|
||||
}
|
||||
|
||||
String masterId = params[3];
|
||||
if ("-".equals(masterId)) {
|
||||
masterId = null;
|
||||
}
|
||||
|
||||
Set<Integer> slotsCollection = new HashSet<Integer>();
|
||||
LinkState linkState = null;
|
||||
if (params.length >= 8 && params[7] != null) {
|
||||
linkState = LinkState.valueOf(params[7].toUpperCase());
|
||||
}
|
||||
if (params.length > 8) {
|
||||
for (int i = 0; i < params.length - 8; i++) {
|
||||
String slots = params[i + 8];
|
||||
if (slots.indexOf("-<-") != -1 || slots.indexOf("->-") != -1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String[] parts = slots.split("-");
|
||||
if(parts.length == 1) {
|
||||
slotsCollection.add(Integer.valueOf(parts[0]));
|
||||
} else if(parts.length == 2) {
|
||||
for (int j = Integer.valueOf(parts[0]); j < Integer.valueOf(parts[1]) + 1; j++) {
|
||||
slotsCollection.add(j);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NodeType type = null;
|
||||
if (flags.contains(Flag.MASTER)) {
|
||||
type = NodeType.MASTER;
|
||||
} else if (flags.contains(Flag.REPLICA)) {
|
||||
type = NodeType.REPLICA;
|
||||
}
|
||||
|
||||
RedisClusterNodeBuilder builder = RedisClusterNode.newRedisClusterNode()
|
||||
.linkState(linkState)
|
||||
.replicaOf(masterId)
|
||||
.serving(new SlotRange(slotsCollection))
|
||||
.withId(nodeId)
|
||||
.promotedAs(type)
|
||||
.withFlags(flags);
|
||||
|
||||
if (address != null) {
|
||||
builder.listeningAt(address.getHost(), address.getPort());
|
||||
}
|
||||
nodes.add(builder.build());
|
||||
}
|
||||
return nodes;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.api.RFuture;
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.command.CommandAsyncService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonBaseCommands {
|
||||
|
||||
private RedissonConnection connection;
|
||||
|
||||
public RedissonBaseCommands(RedissonConnection connection) {
|
||||
this.connection = connection;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.CompletionStage;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.api.RFuture;
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.connection.MasterSlaveEntry;
|
||||
import org.redisson.misc.CompletableFutureWrapper;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.RedisSystemException;
|
||||
import org.springframework.data.redis.connection.RedisClusterNode;
|
||||
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
abstract class RedissonBaseReactive {
|
||||
|
||||
final CommandReactiveExecutor executorService;
|
||||
|
||||
RedissonBaseReactive(CommandReactiveExecutor executorService) {
|
||||
this.executorService = executorService;
|
||||
}
|
||||
|
||||
public static byte[] toByteArray(ByteBuffer buffer) {
|
||||
byte[] dst = new byte[buffer.remaining()];
|
||||
int pos = buffer.position();
|
||||
buffer.get(dst);
|
||||
buffer.position(pos);
|
||||
return dst;
|
||||
}
|
||||
|
||||
RFuture<String> toStringFuture(RFuture<Void> f) {
|
||||
CompletionStage<String> ff = f.thenApply(r -> "OK");
|
||||
return new CompletableFutureWrapper<>(ff);
|
||||
}
|
||||
|
||||
<T> Mono<T> execute(RedisClusterNode node, RedisCommand<T> command, Object... params) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
return executorService.reactive(() -> {
|
||||
return executorService.writeAsync(entry, StringCodec.INSTANCE, command, params);
|
||||
});
|
||||
}
|
||||
|
||||
MasterSlaveEntry getEntry(RedisClusterNode node) {
|
||||
MasterSlaveEntry entry = executorService.getConnectionManager().getEntry(new InetSocketAddress(node.getHost(), node.getPort()));
|
||||
return entry;
|
||||
}
|
||||
|
||||
<V, T> Flux<T> execute(Publisher<V> commands, Function<V, Publisher<T>> mapper) {
|
||||
Flux<V> s = Flux.from(commands);
|
||||
return s.concatMap(mapper);
|
||||
}
|
||||
|
||||
<T> Mono<T> write(byte[] key, Codec codec, RedisCommand<?> command, Object... params) {
|
||||
Mono<T> f = executorService.reactive(() -> {
|
||||
return executorService.writeAsync(key, codec, command, params);
|
||||
});
|
||||
return f.onErrorMap(e -> new RedisSystemException(e.getMessage(), e));
|
||||
}
|
||||
|
||||
<T> Mono<T> read(byte[] key, Codec codec, RedisCommand<?> command, Object... params) {
|
||||
Mono<T> f = executorService.reactive(() -> {
|
||||
return executorService.readAsync(key, codec, command, params);
|
||||
});
|
||||
return f.onErrorMap(e -> new RedisSystemException(e.getMessage(), e));
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,558 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import io.netty.util.CharsetUtil;
|
||||
import org.redisson.api.BatchResult;
|
||||
import org.redisson.api.RFuture;
|
||||
import org.redisson.api.RedissonClient;
|
||||
import org.redisson.client.RedisClient;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.LongCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.client.protocol.decoder.ListScanResult;
|
||||
import org.redisson.client.protocol.decoder.ObjectDecoder;
|
||||
import org.redisson.client.protocol.decoder.ObjectListReplayDecoder;
|
||||
import org.redisson.client.protocol.decoder.StringMapDataDecoder;
|
||||
import org.redisson.command.CommandBatchService;
|
||||
import org.redisson.connection.MasterSlaveEntry;
|
||||
import org.springframework.dao.InvalidDataAccessResourceUsageException;
|
||||
import org.springframework.data.redis.connection.*;
|
||||
import org.springframework.data.redis.connection.RedisClusterNode.SlotRange;
|
||||
import org.springframework.data.redis.connection.convert.Converters;
|
||||
import org.springframework.data.redis.connection.convert.StringToRedisClientInfoConverter;
|
||||
import org.springframework.data.redis.core.Cursor;
|
||||
import org.springframework.data.redis.core.ScanCursor;
|
||||
import org.springframework.data.redis.core.ScanIteration;
|
||||
import org.springframework.data.redis.core.ScanOptions;
|
||||
import org.springframework.data.redis.core.types.RedisClientInfo;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.*;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonClusterConnection extends RedissonConnection implements RedisClusterConnection, DefaultedRedisClusterConnection {
|
||||
|
||||
private static final RedisStrictCommand<List<RedisClusterNode>> CLUSTER_NODES =
|
||||
new RedisStrictCommand<List<RedisClusterNode>>("CLUSTER", "NODES", new ObjectDecoder(new RedisClusterNodeDecoder()));
|
||||
|
||||
public RedissonClusterConnection(RedissonClient redisson) {
|
||||
super(redisson);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterable<RedisClusterNode> clusterGetNodes() {
|
||||
return read(null, StringCodec.INSTANCE, CLUSTER_NODES);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<RedisClusterNode> clusterGetReplicas(RedisClusterNode master) {
|
||||
Iterable<RedisClusterNode> res = clusterGetNodes();
|
||||
RedisClusterNode masterNode = null;
|
||||
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
|
||||
RedisClusterNode redisClusterNode = iterator.next();
|
||||
if (master.getHost().equals(redisClusterNode.getHost())
|
||||
&& master.getPort().equals(redisClusterNode.getPort())) {
|
||||
masterNode = redisClusterNode;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (masterNode == null) {
|
||||
throw new IllegalStateException("Unable to find master node: " + master);
|
||||
}
|
||||
|
||||
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
|
||||
RedisClusterNode redisClusterNode = iterator.next();
|
||||
if (redisClusterNode.getMasterId() == null
|
||||
|| !redisClusterNode.getMasterId().equals(masterNode.getId())) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
return (Collection<RedisClusterNode>) res;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<RedisClusterNode, Collection<RedisClusterNode>> clusterGetMasterReplicaMap() {
|
||||
Iterable<RedisClusterNode> res = clusterGetNodes();
|
||||
|
||||
Set<RedisClusterNode> masters = new HashSet<RedisClusterNode>();
|
||||
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
|
||||
RedisClusterNode redisClusterNode = iterator.next();
|
||||
if (redisClusterNode.isMaster()) {
|
||||
masters.add(redisClusterNode);
|
||||
}
|
||||
}
|
||||
|
||||
Map<RedisClusterNode, Collection<RedisClusterNode>> result = new HashMap<RedisClusterNode, Collection<RedisClusterNode>>();
|
||||
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
|
||||
RedisClusterNode redisClusterNode = iterator.next();
|
||||
|
||||
for (RedisClusterNode masterNode : masters) {
|
||||
if (redisClusterNode.getMasterId() != null
|
||||
&& redisClusterNode.getMasterId().equals(masterNode.getId())) {
|
||||
Collection<RedisClusterNode> list = result.get(masterNode);
|
||||
if (list == null) {
|
||||
list = new ArrayList<RedisClusterNode>();
|
||||
result.put(masterNode, list);
|
||||
}
|
||||
list.add(redisClusterNode);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer clusterGetSlotForKey(byte[] key) {
|
||||
RFuture<Integer> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.KEYSLOT, key);
|
||||
return syncFuture(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RedisClusterNode clusterGetNodeForSlot(int slot) {
|
||||
Iterable<RedisClusterNode> res = clusterGetNodes();
|
||||
for (RedisClusterNode redisClusterNode : res) {
|
||||
if (redisClusterNode.isMaster() && redisClusterNode.getSlotRange().contains(slot)) {
|
||||
return redisClusterNode;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RedisClusterNode clusterGetNodeForKey(byte[] key) {
|
||||
int slot = executorService.getConnectionManager().calcSlot(key);
|
||||
return clusterGetNodeForSlot(slot);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterInfo clusterGetClusterInfo() {
|
||||
RFuture<Map<String, String>> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_INFO);
|
||||
Map<String, String> entries = syncFuture(f);
|
||||
|
||||
Properties props = new Properties();
|
||||
for (Entry<String, String> entry : entries.entrySet()) {
|
||||
props.setProperty(entry.getKey(), entry.getValue());
|
||||
}
|
||||
return new ClusterInfo(props);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterAddSlots(RedisClusterNode node, int... slots) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
List<Integer> params = convert(slots);
|
||||
RFuture<Map<String, String>> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_ADDSLOTS, params.toArray());
|
||||
syncFuture(f);
|
||||
}
|
||||
|
||||
protected List<Integer> convert(int... slots) {
|
||||
List<Integer> params = new ArrayList<Integer>();
|
||||
for (int slot : slots) {
|
||||
params.add(slot);
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterAddSlots(RedisClusterNode node, SlotRange range) {
|
||||
clusterAddSlots(node, range.getSlotsArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long clusterCountKeysInSlot(int slot) {
|
||||
RedisClusterNode node = clusterGetNodeForSlot(slot);
|
||||
MasterSlaveEntry entry = executorService.getConnectionManager().getEntry(new InetSocketAddress(node.getHost(), node.getPort()));
|
||||
RFuture<Long> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_COUNTKEYSINSLOT, slot);
|
||||
return syncFuture(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterDeleteSlots(RedisClusterNode node, int... slots) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
List<Integer> params = convert(slots);
|
||||
RFuture<Long> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_DELSLOTS, params.toArray());
|
||||
syncFuture(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterDeleteSlotsInRange(RedisClusterNode node, SlotRange range) {
|
||||
clusterDeleteSlots(node, range.getSlotsArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterForget(RedisClusterNode node) {
|
||||
RFuture<Void> f = executorService.writeAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_FORGET, node.getId());
|
||||
syncFuture(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterMeet(RedisClusterNode node) {
|
||||
Assert.notNull(node, "Cluster node must not be null for CLUSTER MEET command!");
|
||||
Assert.hasText(node.getHost(), "Node to meet cluster must have a host!");
|
||||
Assert.isTrue(node.getPort() > 0, "Node to meet cluster must have a port greater 0!");
|
||||
|
||||
RFuture<Void> f = executorService.writeAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_MEET, node.getHost(), node.getPort());
|
||||
syncFuture(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterSetSlot(RedisClusterNode node, int slot, AddSlots mode) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<Map<String, String>> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_SETSLOT, slot, mode);
|
||||
syncFuture(f);
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<List<String>> CLUSTER_GETKEYSINSLOT = new RedisStrictCommand<List<String>>("CLUSTER", "GETKEYSINSLOT", new ObjectListReplayDecoder<String>());
|
||||
|
||||
@Override
|
||||
public List<byte[]> clusterGetKeysInSlot(int slot, Integer count) {
|
||||
RFuture<List<byte[]>> f = executorService.readAsync((String)null, ByteArrayCodec.INSTANCE, CLUSTER_GETKEYSINSLOT, slot, count);
|
||||
return syncFuture(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterReplicate(RedisClusterNode master, RedisClusterNode slave) {
|
||||
MasterSlaveEntry entry = getEntry(master);
|
||||
RFuture<Long> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_REPLICATE, slave.getId());
|
||||
syncFuture(f);
|
||||
}
|
||||
|
||||
// @Override
|
||||
// public String ping(RedisClusterNode node) {
|
||||
// return execute(node, RedisCommands.PING);
|
||||
// }
|
||||
|
||||
@Override
|
||||
public void bgReWriteAof(RedisClusterNode node) {
|
||||
execute(node, RedisCommands.BGREWRITEAOF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void bgSave(RedisClusterNode node) {
|
||||
execute(node, RedisCommands.BGSAVE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long lastSave(RedisClusterNode node) {
|
||||
return execute(node, RedisCommands.LASTSAVE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void save(RedisClusterNode node) {
|
||||
execute(node, RedisCommands.SAVE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long dbSize(RedisClusterNode node) {
|
||||
return execute(node, RedisCommands.DBSIZE);
|
||||
}
|
||||
|
||||
private <T> T execute(RedisClusterNode node, RedisCommand<T> command) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<T> f = executorService.writeAsync(entry, StringCodec.INSTANCE, command);
|
||||
return syncFuture(f);
|
||||
}
|
||||
|
||||
protected MasterSlaveEntry getEntry(RedisClusterNode node) {
|
||||
MasterSlaveEntry entry = executorService.getConnectionManager().getEntry(new InetSocketAddress(node.getHost(), node.getPort()));
|
||||
return entry;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flushDb(RedisClusterNode node) {
|
||||
execute(node, RedisCommands.FLUSHDB);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flushAll(RedisClusterNode node) {
|
||||
execute(node, RedisCommands.FLUSHALL);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Properties info(RedisClusterNode node) {
|
||||
Map<String, String> info = execute(node, RedisCommands.INFO_ALL);
|
||||
Properties result = new Properties();
|
||||
for (Entry<String, String> entry : info.entrySet()) {
|
||||
result.setProperty(entry.getKey(), entry.getValue());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Properties info(RedisClusterNode node, String section) {
|
||||
RedisStrictCommand<Map<String, String>> command = new RedisStrictCommand<Map<String, String>>("INFO", section, new StringMapDataDecoder());
|
||||
|
||||
Map<String, String> info = execute(node, command);
|
||||
Properties result = new Properties();
|
||||
for (Entry<String, String> entry : info.entrySet()) {
|
||||
result.setProperty(entry.getKey(), entry.getValue());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private final RedisStrictCommand<List<byte[]>> KEYS = new RedisStrictCommand<>("KEYS");
|
||||
|
||||
@Override
|
||||
public Set<byte[]> keys(RedisClusterNode node, byte[] pattern) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<Collection<byte[]>> f = executorService.readAsync(entry, ByteArrayCodec.INSTANCE, KEYS, pattern);
|
||||
Collection<byte[]> keys = syncFuture(f);
|
||||
return new HashSet<>(keys);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] randomKey(RedisClusterNode node) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<byte[]> f = executorService.readRandomAsync(entry, ByteArrayCodec.INSTANCE, RedisCommands.RANDOM_KEY);
|
||||
return syncFuture(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown(RedisClusterNode node) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<Void> f = executorService.readAsync(entry, ByteArrayCodec.INSTANCE, RedisCommands.SHUTDOWN);
|
||||
syncFuture(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Properties getConfig(RedisClusterNode node, String pattern) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<List<String>> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_GET, pattern);
|
||||
List<String> r = syncFuture(f);
|
||||
if (r != null) {
|
||||
return Converters.toProperties(r);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setConfig(RedisClusterNode node, String param, String value) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_SET, param, value);
|
||||
syncFuture(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resetConfigStats(RedisClusterNode node) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_RESETSTAT);
|
||||
syncFuture(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long time(RedisClusterNode node) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<Long> f = executorService.readAsync(entry, LongCodec.INSTANCE, RedisCommands.TIME_LONG);
|
||||
return syncFuture(f);
|
||||
}
|
||||
|
||||
private static final StringToRedisClientInfoConverter CONVERTER = new StringToRedisClientInfoConverter();
|
||||
|
||||
@Override
|
||||
public List<RedisClientInfo> getClientList(RedisClusterNode node) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST);
|
||||
List<String> list = syncFuture(f);
|
||||
return CONVERTER.convert(list.toArray(new String[list.size()]));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Cursor<byte[]> scan(RedisClusterNode node, ScanOptions options) {
|
||||
return new ScanCursor<byte[]>(0, options) {
|
||||
|
||||
private RedisClient client;
|
||||
private MasterSlaveEntry entry = getEntry(node);
|
||||
|
||||
@Override
|
||||
protected ScanIteration<byte[]> doScan(long cursorId, ScanOptions options) {
|
||||
if (isQueueing() || isPipelined()) {
|
||||
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline / transaction mode.");
|
||||
}
|
||||
|
||||
if (entry == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
List<Object> args = new ArrayList<Object>();
|
||||
// to avoid negative value
|
||||
cursorId = Math.max(cursorId, 0);
|
||||
args.add(cursorId);
|
||||
if (options.getPattern() != null) {
|
||||
args.add("MATCH");
|
||||
args.add(options.getPattern());
|
||||
}
|
||||
if (options.getCount() != null) {
|
||||
args.add("COUNT");
|
||||
args.add(options.getCount());
|
||||
}
|
||||
|
||||
RFuture<ListScanResult<byte[]>> f = executorService.readAsync(client, entry, ByteArrayCodec.INSTANCE, RedisCommands.SCAN, args.toArray());
|
||||
ListScanResult<byte[]> res = syncFuture(f);
|
||||
long pos = res.getPos();
|
||||
client = res.getRedisClient();
|
||||
if (pos == 0) {
|
||||
entry = null;
|
||||
}
|
||||
|
||||
return new ScanIteration<byte[]>(pos, res.getValues());
|
||||
}
|
||||
}.open();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rename(byte[] oldName, byte[] newName) {
|
||||
|
||||
if (isPipelined()) {
|
||||
throw new InvalidDataAccessResourceUsageException("Clustered rename is not supported in a pipeline");
|
||||
}
|
||||
|
||||
if (redisson.getConnectionManager().calcSlot(oldName) == redisson.getConnectionManager().calcSlot(newName)) {
|
||||
super.rename(oldName, newName);
|
||||
return;
|
||||
}
|
||||
|
||||
final byte[] value = dump(oldName);
|
||||
|
||||
if (null != value) {
|
||||
|
||||
final Long sourceTtlInSeconds = ttl(oldName);
|
||||
|
||||
final long ttlInMilliseconds;
|
||||
if (null != sourceTtlInSeconds && sourceTtlInSeconds > 0) {
|
||||
ttlInMilliseconds = sourceTtlInSeconds * 1000;
|
||||
} else {
|
||||
ttlInMilliseconds = 0;
|
||||
}
|
||||
|
||||
restore(newName, ttlInMilliseconds, value);
|
||||
del(oldName);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean renameNX(byte[] oldName, byte[] newName) {
|
||||
if (isPipelined()) {
|
||||
throw new InvalidDataAccessResourceUsageException("Clustered rename is not supported in a pipeline");
|
||||
}
|
||||
|
||||
if (redisson.getConnectionManager().calcSlot(oldName) == redisson.getConnectionManager().calcSlot(newName)) {
|
||||
return super.renameNX(oldName, newName);
|
||||
}
|
||||
|
||||
final byte[] value = dump(oldName);
|
||||
|
||||
if (null != value && !exists(newName)) {
|
||||
|
||||
final Long sourceTtlInSeconds = ttl(oldName);
|
||||
|
||||
final long ttlInMilliseconds;
|
||||
if (null != sourceTtlInSeconds && sourceTtlInSeconds > 0) {
|
||||
ttlInMilliseconds = sourceTtlInSeconds * 1000;
|
||||
} else {
|
||||
ttlInMilliseconds = 0;
|
||||
}
|
||||
|
||||
restore(newName, ttlInMilliseconds, value);
|
||||
del(oldName);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long del(byte[]... keys) {
|
||||
if (isQueueing() || isPipelined()) {
|
||||
for (byte[] key: keys) {
|
||||
write(key, LongCodec.INSTANCE, RedisCommands.DEL, key);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
CommandBatchService es = new CommandBatchService(executorService);
|
||||
for (byte[] key: keys) {
|
||||
es.writeAsync(key, StringCodec.INSTANCE, RedisCommands.DEL, key);
|
||||
}
|
||||
BatchResult<Long> b = (BatchResult<Long>) es.execute();
|
||||
return b.getResponses().stream().collect(Collectors.summarizingLong(v -> v)).getSum();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<byte[]> mGet(byte[]... keys) {
|
||||
if (isQueueing() || isPipelined()) {
|
||||
for (byte[] key : keys) {
|
||||
read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
CommandBatchService es = new CommandBatchService(executorService);
|
||||
for (byte[] key: keys) {
|
||||
es.readAsync(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key);
|
||||
}
|
||||
BatchResult<byte[]> r = (BatchResult<byte[]>) es.execute();
|
||||
return r.getResponses();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean mSet(Map<byte[], byte[]> tuple) {
|
||||
if (isQueueing() || isPipelined()) {
|
||||
for (Entry<byte[], byte[]> entry: tuple.entrySet()) {
|
||||
write(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
CommandBatchService es = new CommandBatchService(executorService);
|
||||
for (Entry<byte[], byte[]> entry: tuple.entrySet()) {
|
||||
es.writeAsync(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue());
|
||||
}
|
||||
es.execute();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RedisClusterCommands clusterCommands() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RedisClusterServerCommands serverCommands() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String ping(RedisClusterNode node) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
RFuture<String> f = executorService.readAsync(entry, LongCodec.INSTANCE, RedisCommands.PING);
|
||||
return syncFuture(f);
|
||||
}
|
||||
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,170 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.redisson.Redisson;
|
||||
import org.redisson.RedissonKeys;
|
||||
import org.redisson.RedissonReactive;
|
||||
import org.redisson.api.RedissonClient;
|
||||
import org.redisson.client.RedisClient;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.config.Config;
|
||||
import org.redisson.connection.SentinelConnectionManager;
|
||||
import org.redisson.reactive.CommandReactiveService;
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.dao.DataAccessException;
|
||||
import org.springframework.dao.InvalidDataAccessResourceUsageException;
|
||||
import org.springframework.data.redis.ExceptionTranslationStrategy;
|
||||
import org.springframework.data.redis.PassThroughExceptionTranslationStrategy;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisClusterConnection;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnectionFactory;
|
||||
import org.springframework.data.redis.connection.RedisClusterConnection;
|
||||
import org.springframework.data.redis.connection.RedisConnection;
|
||||
import org.springframework.data.redis.connection.RedisConnectionFactory;
|
||||
import org.springframework.data.redis.connection.RedisSentinelConnection;
|
||||
|
||||
/**
|
||||
* Redisson based connection factory
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonConnectionFactory implements RedisConnectionFactory,
|
||||
ReactiveRedisConnectionFactory, InitializingBean, DisposableBean {
|
||||
|
||||
private final static Log log = LogFactory.getLog(RedissonConnectionFactory.class);
|
||||
|
||||
public static final ExceptionTranslationStrategy EXCEPTION_TRANSLATION =
|
||||
new PassThroughExceptionTranslationStrategy(new RedissonExceptionConverter());
|
||||
|
||||
private Config config;
|
||||
private RedissonClient redisson;
|
||||
private boolean hasOwnRedisson;
|
||||
|
||||
/**
|
||||
* Creates factory with default Redisson configuration
|
||||
*/
|
||||
public RedissonConnectionFactory() {
|
||||
this(Redisson.create());
|
||||
hasOwnRedisson = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates factory with defined Redisson instance
|
||||
*
|
||||
* @param redisson - Redisson instance
|
||||
*/
|
||||
public RedissonConnectionFactory(RedissonClient redisson) {
|
||||
this.redisson = redisson;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates factory with defined Redisson config
|
||||
*
|
||||
* @param config - Redisson config
|
||||
*/
|
||||
public RedissonConnectionFactory(Config config) {
|
||||
super();
|
||||
this.config = config;
|
||||
hasOwnRedisson = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataAccessException translateExceptionIfPossible(RuntimeException ex) {
|
||||
return EXCEPTION_TRANSLATION.translate(ex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws Exception {
|
||||
if (hasOwnRedisson) {
|
||||
redisson.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
if (config != null) {
|
||||
redisson = Redisson.create(config);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RedisConnection getConnection() {
|
||||
if (redisson.getConfig().isClusterConfig()) {
|
||||
return new RedissonClusterConnection(redisson);
|
||||
}
|
||||
return new RedissonConnection(redisson);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RedisClusterConnection getClusterConnection() {
|
||||
if (!redisson.getConfig().isClusterConfig()) {
|
||||
throw new InvalidDataAccessResourceUsageException("Redisson is not in Cluster mode");
|
||||
}
|
||||
return new RedissonClusterConnection(redisson);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getConvertPipelineAndTxResults() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RedisSentinelConnection getSentinelConnection() {
|
||||
if (!redisson.getConfig().isSentinelConfig()) {
|
||||
throw new InvalidDataAccessResourceUsageException("Redisson is not in Sentinel mode");
|
||||
}
|
||||
|
||||
SentinelConnectionManager manager = (SentinelConnectionManager)(((Redisson)redisson).getConnectionManager());
|
||||
for (RedisClient client : manager.getSentinels()) {
|
||||
org.redisson.client.RedisConnection connection = client.connect();
|
||||
try {
|
||||
String res = connection.sync(RedisCommands.PING);
|
||||
if ("pong".equalsIgnoreCase(res)) {
|
||||
return new RedissonSentinelConnection(connection);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("Can't connect to " + client, e);
|
||||
connection.closeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
throw new InvalidDataAccessResourceUsageException("Sentinels are not found");
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveRedisConnection getReactiveConnection() {
|
||||
if (redisson.getConfig().isClusterConfig()) {
|
||||
return new RedissonReactiveRedisClusterConnection(((RedissonReactive)redisson.reactive()).getCommandExecutor());
|
||||
}
|
||||
|
||||
return new RedissonReactiveRedisConnection(((RedissonReactive)redisson.reactive()).getCommandExecutor());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveRedisClusterConnection getReactiveClusterConnection() {
|
||||
if (!redisson.getConfig().isClusterConfig()) {
|
||||
throw new InvalidDataAccessResourceUsageException("Redisson is not in Cluster mode");
|
||||
}
|
||||
|
||||
return new RedissonReactiveRedisClusterConnection(((RedissonReactive)redisson.reactive()).getCommandExecutor());
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,62 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.client.RedisConnectionException;
|
||||
import org.redisson.client.RedisException;
|
||||
import org.redisson.client.RedisRedirectException;
|
||||
import org.redisson.client.RedisTimeoutException;
|
||||
import org.springframework.core.convert.converter.Converter;
|
||||
import org.springframework.dao.DataAccessException;
|
||||
import org.springframework.dao.InvalidDataAccessApiUsageException;
|
||||
import org.springframework.dao.QueryTimeoutException;
|
||||
import org.springframework.data.redis.ClusterRedirectException;
|
||||
import org.springframework.data.redis.RedisConnectionFailureException;
|
||||
|
||||
/**
|
||||
* Converts Redisson exceptions to Spring compatible
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonExceptionConverter implements Converter<Exception, DataAccessException> {
|
||||
|
||||
@Override
|
||||
public DataAccessException convert(Exception source) {
|
||||
if (source instanceof RedisConnectionException) {
|
||||
return new RedisConnectionFailureException(source.getMessage(), source);
|
||||
}
|
||||
if (source instanceof RedisRedirectException) {
|
||||
RedisRedirectException ex = (RedisRedirectException) source;
|
||||
return new ClusterRedirectException(ex.getSlot(), ex.getUrl().getHost(), ex.getUrl().getPort(), source);
|
||||
}
|
||||
|
||||
if (source instanceof RedisTimeoutException) {
|
||||
return new QueryTimeoutException(source.getMessage(), source);
|
||||
}
|
||||
|
||||
if (source instanceof RedisException) {
|
||||
return new InvalidDataAccessApiUsageException(source.getMessage(), source);
|
||||
}
|
||||
|
||||
if (source instanceof DataAccessException) {
|
||||
return (DataAccessException) source;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterGeoCommands;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterGeoCommands extends RedissonReactiveGeoCommands implements ReactiveClusterGeoCommands {
|
||||
|
||||
RedissonReactiveClusterGeoCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterHashCommands;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterHashCommands extends RedissonReactiveHashCommands implements ReactiveClusterHashCommands {
|
||||
|
||||
RedissonReactiveClusterHashCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterHyperLogLogCommands;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterHyperLogLogCommands extends RedissonReactiveHyperLogLogCommands implements ReactiveClusterHyperLogLogCommands {
|
||||
|
||||
RedissonReactiveClusterHyperLogLogCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,143 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.connection.MasterSlaveEntry;
|
||||
import org.redisson.misc.CompletableFutureWrapper;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterKeyCommands;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse;
|
||||
import org.springframework.data.redis.connection.RedisClusterNode;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import io.netty.util.CharsetUtil;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterKeyCommands extends RedissonReactiveKeyCommands implements ReactiveClusterKeyCommands {
|
||||
|
||||
public RedissonReactiveClusterKeyCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<List<ByteBuffer>> keys(RedisClusterNode node, ByteBuffer pattern) {
|
||||
Mono<List<String>> m = executorService.reactive(() -> {
|
||||
List<CompletableFuture<List<String>>> futures = executorService.readAllAsync(StringCodec.INSTANCE, RedisCommands.KEYS, toByteArray(pattern));
|
||||
CompletableFuture<Void> ff = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
|
||||
CompletableFuture<List<String>> future = ff.thenApply(r -> {
|
||||
return futures.stream().flatMap(f -> f.getNow(new ArrayList<>()).stream()).collect(Collectors.toList());
|
||||
}).toCompletableFuture();
|
||||
return new CompletableFutureWrapper<>(future);
|
||||
});
|
||||
return m.map(v -> v.stream().map(t -> ByteBuffer.wrap(t.getBytes(CharsetUtil.UTF_8))).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ByteBuffer> randomKey(RedisClusterNode node) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
Mono<byte[]> m = executorService.reactive(() -> {
|
||||
return executorService.readRandomAsync(entry, ByteArrayCodec.INSTANCE, RedisCommands.RANDOM_KEY);
|
||||
});
|
||||
return m.map(v -> ByteBuffer.wrap(v));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) {
|
||||
|
||||
return execute(commands, command -> {
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getNewKey(), "New name must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] newKeyBuf = toByteArray(command.getNewKey());
|
||||
|
||||
if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) {
|
||||
return super.rename(commands);
|
||||
}
|
||||
|
||||
return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf)
|
||||
.filter(Objects::nonNull)
|
||||
.zipWith(
|
||||
Mono.defer(() -> pTtl(command.getKey())
|
||||
.filter(Objects::nonNull)
|
||||
.map(ttl -> Math.max(0, ttl))
|
||||
.switchIfEmpty(Mono.just(0L))
|
||||
)
|
||||
)
|
||||
.flatMap(valueAndTtl -> {
|
||||
return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1());
|
||||
})
|
||||
.thenReturn(new BooleanResponse<>(command, true))
|
||||
.doOnSuccess((ignored) -> del(command.getKey()));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.BooleanResponse<RenameCommand>> renameNX(Publisher<RenameCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getNewKey(), "New name must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] newKeyBuf = toByteArray(command.getNewKey());
|
||||
|
||||
if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) {
|
||||
return super.renameNX(commands);
|
||||
}
|
||||
|
||||
return exists(command.getNewKey())
|
||||
.zipWith(read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf))
|
||||
.filter(newKeyExistsAndDump -> !newKeyExistsAndDump.getT1() && Objects.nonNull(newKeyExistsAndDump.getT2()))
|
||||
.map(Tuple2::getT2)
|
||||
.zipWhen(value ->
|
||||
pTtl(command.getKey())
|
||||
.filter(Objects::nonNull)
|
||||
.map(ttl -> Math.max(0, ttl))
|
||||
.switchIfEmpty(Mono.just(0L))
|
||||
|
||||
)
|
||||
.flatMap(valueAndTtl -> write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1())
|
||||
.then(Mono.just(true)))
|
||||
.switchIfEmpty(Mono.just(false))
|
||||
.doOnSuccess(didRename -> {
|
||||
if (didRename) {
|
||||
del(command.getKey());
|
||||
}
|
||||
})
|
||||
.map(didRename -> new BooleanResponse<>(command, didRename));
|
||||
});
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterListCommands;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterListCommands extends RedissonReactiveListCommands implements ReactiveClusterListCommands {
|
||||
|
||||
RedissonReactiveClusterListCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterNumberCommands;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterNumberCommands extends RedissonReactiveNumberCommands implements ReactiveClusterNumberCommands {
|
||||
|
||||
public RedissonReactiveClusterNumberCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,153 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.connection.MasterSlaveEntry;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterServerCommands;
|
||||
import org.springframework.data.redis.connection.RedisClusterNode;
|
||||
import org.springframework.data.redis.connection.RedisServerCommands;
|
||||
import org.springframework.data.redis.connection.convert.StringToRedisClientInfoConverter;
|
||||
import org.springframework.data.redis.core.types.RedisClientInfo;
|
||||
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterServerCommands extends RedissonReactiveServerCommands implements ReactiveClusterServerCommands {
|
||||
|
||||
RedissonReactiveClusterServerCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> bgReWriteAof(RedisClusterNode node) {
|
||||
return execute(node, BGREWRITEAOF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> bgSave(RedisClusterNode node) {
|
||||
return execute(node, BGSAVE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> lastSave(RedisClusterNode node) {
|
||||
return execute(node, RedisCommands.LASTSAVE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> save(RedisClusterNode node) {
|
||||
return execute(node, SAVE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> dbSize(RedisClusterNode node) {
|
||||
return execute(node, RedisCommands.DBSIZE);
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<String> FLUSHDB = new RedisStrictCommand<String>("FLUSHDB");
|
||||
|
||||
@Override
|
||||
public Mono<String> flushDb(RedisClusterNode node) {
|
||||
return execute(node, FLUSHDB);
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<String> FLUSHALL = new RedisStrictCommand<String>("FLUSHALL");
|
||||
|
||||
@Override
|
||||
public Mono<String> flushAll(RedisClusterNode node) {
|
||||
return execute(node, FLUSHALL);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> flushDb(RedisClusterNode node, RedisServerCommands.FlushOption option) {
|
||||
if (option == RedisServerCommands.FlushOption.ASYNC) {
|
||||
return execute(node, FLUSHDB, option.toString());
|
||||
}
|
||||
return execute(node, FLUSHDB);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> flushAll(RedisClusterNode node, RedisServerCommands.FlushOption option) {
|
||||
if (option == RedisServerCommands.FlushOption.ASYNC) {
|
||||
return execute(node, FLUSHALL, option.toString());
|
||||
}
|
||||
return execute(node, FLUSHALL);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Properties> info() {
|
||||
return read(null, StringCodec.INSTANCE, INFO_DEFAULT);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Properties> info(String section) {
|
||||
return read(null, StringCodec.INSTANCE, INFO, section);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Mono<Properties> info(RedisClusterNode node) {
|
||||
return execute(node, INFO_DEFAULT);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Properties> info(RedisClusterNode node, String section) {
|
||||
return execute(node, INFO, section);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Properties> getConfig(RedisClusterNode node, String pattern) {
|
||||
return execute(node, CONFIG_GET, pattern);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> setConfig(RedisClusterNode node, String param, String value) {
|
||||
return execute(node, CONFIG_SET, param, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> resetConfigStats(RedisClusterNode node) {
|
||||
return execute(node, CONFIG_RESETSTAT);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> time(RedisClusterNode node) {
|
||||
return execute(node, TIME);
|
||||
}
|
||||
|
||||
private static final StringToRedisClientInfoConverter CONVERTER = new StringToRedisClientInfoConverter();
|
||||
|
||||
@Override
|
||||
public Flux<RedisClientInfo> getClientList(RedisClusterNode node) {
|
||||
MasterSlaveEntry entry = getEntry(node);
|
||||
Mono<List<String>> m = executorService.reactive(() -> {
|
||||
return executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST);
|
||||
});
|
||||
return m.flatMapMany(s -> Flux.fromIterable(CONVERTER.convert(s.toArray(new String[s.size()]))));
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterSetCommands;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterSetCommands extends RedissonReactiveSetCommands implements ReactiveClusterSetCommands {
|
||||
|
||||
RedissonReactiveClusterSetCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterStreamCommands;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterStreamCommands extends RedissonReactiveStreamCommands implements ReactiveClusterStreamCommands {
|
||||
|
||||
RedissonReactiveClusterStreamCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
}
|
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterStringCommands;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterStringCommands extends RedissonReactiveStringCommands implements ReactiveClusterStringCommands {
|
||||
|
||||
RedissonReactiveClusterStringCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,35 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveClusterZSetCommands;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveClusterZSetCommands extends RedissonReactiveZSetCommands implements ReactiveClusterZSetCommands {
|
||||
|
||||
RedissonReactiveClusterZSetCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,372 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.DoubleCodec;
|
||||
import org.redisson.client.codec.LongCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.decoder.*;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.geo.*;
|
||||
import org.springframework.data.redis.connection.ReactiveGeoCommands;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.MultiValueResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
|
||||
import org.springframework.data.redis.connection.RedisGeoCommands;
|
||||
import org.springframework.data.redis.connection.RedisGeoCommands.GeoLocation;
|
||||
import org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs;
|
||||
import org.springframework.data.redis.domain.geo.BoxShape;
|
||||
import org.springframework.data.redis.domain.geo.GeoReference;
|
||||
import org.springframework.data.redis.domain.geo.RadiusShape;
|
||||
import org.springframework.util.Assert;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveGeoCommands extends RedissonBaseReactive implements ReactiveGeoCommands {
|
||||
|
||||
RedissonReactiveGeoCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<GeoAddCommand, Long>> geoAdd(Publisher<GeoAddCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getGeoLocations(), "Locations must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
List<Object> args = new ArrayList<Object>();
|
||||
args.add(keyBuf);
|
||||
for (GeoLocation<ByteBuffer> location : command.getGeoLocations()) {
|
||||
args.add(location.getPoint().getX());
|
||||
args.add(location.getPoint().getY());
|
||||
args.add(toByteArray(location.getName()));
|
||||
}
|
||||
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.GEOADD, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<GeoDistCommand, Distance>> geoDist(Publisher<GeoDistCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getFrom(), "From member must not be null!");
|
||||
Assert.notNull(command.getTo(), "To member must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] fromBuf = toByteArray(command.getFrom());
|
||||
byte[] toBuf = toByteArray(command.getTo());
|
||||
|
||||
Metric metric = RedisGeoCommands.DistanceUnit.METERS;
|
||||
if (command.getMetric().isPresent()) {
|
||||
metric = command.getMetric().get();
|
||||
}
|
||||
|
||||
Mono<Distance> m = write(keyBuf, DoubleCodec.INSTANCE, new RedisCommand<Distance>("GEODIST", new DistanceConvertor(metric)),
|
||||
keyBuf, fromBuf, toBuf, metric.getAbbreviation());
|
||||
return m.map(v -> new CommandResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<List<Object>> GEOHASH = new RedisCommand<List<Object>>("GEOHASH", new ObjectListReplayDecoder<Object>());
|
||||
|
||||
@Override
|
||||
public Flux<MultiValueResponse<GeoHashCommand, String>> geoHash(Publisher<GeoHashCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getMembers(), "Members must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
List<Object> args = new ArrayList<Object>(command.getMembers().size() + 1);
|
||||
args.add(keyBuf);
|
||||
args.addAll(command.getMembers().stream().map(buf -> toByteArray(buf)).collect(Collectors.toList()));
|
||||
|
||||
Mono<List<String>> m = read(keyBuf, StringCodec.INSTANCE, GEOHASH, args.toArray());
|
||||
return m.map(v -> new MultiValueResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private final MultiDecoder<Map<Object, Object>> geoDecoder = new ListMultiDecoder2(new ObjectListReplayDecoder2(), new PointDecoder());
|
||||
|
||||
@Override
|
||||
public Flux<MultiValueResponse<GeoPosCommand, Point>> geoPos(Publisher<GeoPosCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getMembers(), "Members must not be null!");
|
||||
|
||||
RedisCommand<Map<Object, Object>> cmd = new RedisCommand<Map<Object, Object>>("GEOPOS", geoDecoder);
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
List<Object> args = new ArrayList<Object>(command.getMembers().size() + 1);
|
||||
args.add(keyBuf);
|
||||
args.addAll(command.getMembers().stream().map(buf -> toByteArray(buf)).collect(Collectors.toList()));
|
||||
|
||||
Mono<List<Point>> m = read(keyBuf, StringCodec.INSTANCE, cmd, args.toArray());
|
||||
return m.map(v -> new MultiValueResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private final MultiDecoder<GeoResults<GeoLocation<ByteBuffer>>> postitionDecoder = new ListMultiDecoder2(new ByteBufferGeoResultsDecoder(), new CodecDecoder(), new PointDecoder(), new ObjectListReplayDecoder());
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<GeoRadiusCommand, Flux<GeoResult<GeoLocation<ByteBuffer>>>>> geoRadius(
|
||||
Publisher<GeoRadiusCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getPoint(), "Point must not be null!");
|
||||
Assert.notNull(command.getDistance(), "Distance must not be null!");
|
||||
|
||||
GeoRadiusCommandArgs args = command.getArgs().orElse(GeoRadiusCommandArgs.newGeoRadiusArgs());
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
List<Object> params = new ArrayList<Object>();
|
||||
params.add(keyBuf);
|
||||
params.add(BigDecimal.valueOf(command.getPoint().getX()).toPlainString());
|
||||
params.add(BigDecimal.valueOf(command.getPoint().getY()).toPlainString());
|
||||
params.add(command.getDistance().getValue());
|
||||
params.add(command.getDistance().getMetric().getAbbreviation());
|
||||
|
||||
RedisCommand<GeoResults<GeoLocation<ByteBuffer>>> cmd;
|
||||
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
|
||||
cmd = new RedisCommand<>("GEORADIUS_RO", postitionDecoder);
|
||||
params.add("WITHCOORD");
|
||||
} else {
|
||||
MultiDecoder<GeoResults<GeoLocation<ByteBuffer>>> distanceDecoder = new ListMultiDecoder2(new ByteBufferGeoResultsDecoder(command.getDistance().getMetric()), new GeoDistanceDecoder());
|
||||
cmd = new RedisCommand<>("GEORADIUS_RO", distanceDecoder);
|
||||
params.add("WITHDIST");
|
||||
}
|
||||
|
||||
if (args.getLimit() != null) {
|
||||
params.add("COUNT");
|
||||
params.add(args.getLimit());
|
||||
}
|
||||
if (args.getSortDirection() != null) {
|
||||
params.add(args.getSortDirection().name());
|
||||
}
|
||||
|
||||
Mono<GeoResults<GeoLocation<ByteBuffer>>> m = read(keyBuf, ByteArrayCodec.INSTANCE, cmd, params.toArray());
|
||||
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v.getContent())));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<GeoRadiusByMemberCommand, Flux<GeoResult<GeoLocation<ByteBuffer>>>>> geoRadiusByMember(
|
||||
Publisher<GeoRadiusByMemberCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getMember(), "Member must not be null!");
|
||||
Assert.notNull(command.getDistance(), "Distance must not be null!");
|
||||
|
||||
GeoRadiusCommandArgs args = command.getArgs().orElse(GeoRadiusCommandArgs.newGeoRadiusArgs());
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] memberBuf = toByteArray(command.getMember());
|
||||
|
||||
List<Object> params = new ArrayList<Object>();
|
||||
params.add(keyBuf);
|
||||
params.add(memberBuf);
|
||||
params.add(command.getDistance().getValue());
|
||||
params.add(command.getDistance().getMetric().getAbbreviation());
|
||||
|
||||
RedisCommand<GeoResults<GeoLocation<ByteBuffer>>> cmd;
|
||||
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
|
||||
cmd = new RedisCommand<>("GEORADIUSBYMEMBER_RO", postitionDecoder);
|
||||
params.add("WITHCOORD");
|
||||
} else {
|
||||
MultiDecoder<GeoResults<GeoLocation<ByteBuffer>>> distanceDecoder = new ListMultiDecoder2(new ByteBufferGeoResultsDecoder(command.getDistance().getMetric()), new GeoDistanceDecoder());
|
||||
cmd = new RedisCommand<>("GEORADIUSBYMEMBER_RO", distanceDecoder);
|
||||
params.add("WITHDIST");
|
||||
}
|
||||
|
||||
if (args.getLimit() != null) {
|
||||
params.add("COUNT");
|
||||
params.add(args.getLimit());
|
||||
}
|
||||
if (args.getSortDirection() != null) {
|
||||
params.add(args.getSortDirection().name());
|
||||
}
|
||||
|
||||
Mono<GeoResults<GeoLocation<ByteBuffer>>> m = read(keyBuf, ByteArrayCodec.INSTANCE, cmd, params.toArray());
|
||||
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v.getContent())));
|
||||
});
|
||||
}
|
||||
|
||||
private String convert(double longitude) {
|
||||
return BigDecimal.valueOf(longitude).toPlainString();
|
||||
}
|
||||
|
||||
private ByteBuf encode(Object value) {
|
||||
return executorService.encode(ByteArrayCodec.INSTANCE, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<GeoSearchCommand, Flux<GeoResult<GeoLocation<ByteBuffer>>>>> geoSearch(Publisher<GeoSearchCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getArgs(), "Args must not be null!");
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getShape(), "Shape must not be null!");
|
||||
Assert.notNull(command.getReference(), "Reference must not be null!");
|
||||
|
||||
List<Object> commandParams = new ArrayList<>();
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
commandParams.add(keyBuf);
|
||||
|
||||
if (command.getReference() instanceof GeoReference.GeoCoordinateReference) {
|
||||
GeoReference.GeoCoordinateReference ref = (GeoReference.GeoCoordinateReference) command.getReference();
|
||||
commandParams.add("FROMLONLAT");
|
||||
commandParams.add(convert(ref.getLongitude()));
|
||||
commandParams.add(convert(ref.getLatitude()));
|
||||
} else if (command.getReference() instanceof GeoReference.GeoMemberReference) {
|
||||
GeoReference.GeoMemberReference ref = (GeoReference.GeoMemberReference) command.getReference();
|
||||
commandParams.add("FROMMEMBER");
|
||||
commandParams.add(encode(ref.getMember()));
|
||||
}
|
||||
|
||||
if (command.getShape() instanceof RadiusShape) {
|
||||
commandParams.add("BYRADIUS");
|
||||
RadiusShape shape = (RadiusShape) command.getShape();
|
||||
commandParams.add(shape.getRadius().getValue());
|
||||
commandParams.add(convert(shape.getMetric()).getAbbreviation());
|
||||
} else if (command.getShape() instanceof BoxShape) {
|
||||
BoxShape shape = (BoxShape) command.getShape();
|
||||
commandParams.add("BYBOX");
|
||||
commandParams.add(shape.getBoundingBox().getWidth().getValue());
|
||||
commandParams.add(shape.getBoundingBox().getHeight().getValue());
|
||||
commandParams.add(convert(shape.getMetric()).getAbbreviation());
|
||||
}
|
||||
|
||||
RedisGeoCommands.GeoSearchCommandArgs args = command.getArgs()
|
||||
.orElse(RedisGeoCommands.GeoSearchCommandArgs.newGeoSearchArgs());
|
||||
if (args.hasSortDirection()) {
|
||||
commandParams.add(args.getSortDirection());
|
||||
}
|
||||
if (args.getLimit() != null) {
|
||||
commandParams.add("COUNT");
|
||||
commandParams.add(args.getLimit());
|
||||
if (args.hasAnyLimit()) {
|
||||
commandParams.add("ANY");
|
||||
}
|
||||
}
|
||||
RedisCommand<GeoResults<GeoLocation<ByteBuffer>>> cmd;
|
||||
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
|
||||
cmd = new RedisCommand<>("GEOSEARCH", postitionDecoder);
|
||||
commandParams.add("WITHCOORD");
|
||||
} else {
|
||||
MultiDecoder<GeoResults<GeoLocation<ByteBuffer>>> distanceDecoder = new ListMultiDecoder2(new ByteBufferGeoResultsDecoder(command.getShape().getMetric()), new GeoDistanceDecoder());
|
||||
cmd = new RedisCommand<>("GEOSEARCH", distanceDecoder);
|
||||
commandParams.add("WITHDIST");
|
||||
}
|
||||
|
||||
Mono<GeoResults<GeoLocation<ByteBuffer>>> m = read(keyBuf, ByteArrayCodec.INSTANCE, cmd, commandParams.toArray());
|
||||
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v.getContent())));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<GeoSearchStoreCommand, Long>> geoSearchStore(Publisher<GeoSearchStoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getArgs(), "Args must not be null!");
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getDestKey(), "DestKey must not be null!");
|
||||
Assert.notNull(command.getShape(), "Shape must not be null!");
|
||||
Assert.notNull(command.getReference(), "Reference must not be null!");
|
||||
|
||||
List<Object> commandParams = new ArrayList<>();
|
||||
byte[] destKeyBuf = toByteArray(command.getDestKey());
|
||||
commandParams.add(destKeyBuf);
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
commandParams.add(keyBuf);
|
||||
|
||||
if (command.getReference() instanceof GeoReference.GeoCoordinateReference) {
|
||||
GeoReference.GeoCoordinateReference ref = (GeoReference.GeoCoordinateReference) command.getReference();
|
||||
commandParams.add("FROMLONLAT");
|
||||
commandParams.add(convert(ref.getLongitude()));
|
||||
commandParams.add(convert(ref.getLatitude()));
|
||||
} else if (command.getReference() instanceof GeoReference.GeoMemberReference) {
|
||||
GeoReference.GeoMemberReference ref = (GeoReference.GeoMemberReference) command.getReference();
|
||||
commandParams.add("FROMMEMBER");
|
||||
commandParams.add(encode(ref.getMember()));
|
||||
}
|
||||
|
||||
if (command.getShape() instanceof RadiusShape) {
|
||||
RadiusShape shape = (RadiusShape) command.getShape();
|
||||
commandParams.add("BYRADIUS");
|
||||
commandParams.add(shape.getRadius().getValue());
|
||||
commandParams.add(convert(shape.getMetric()).getAbbreviation());
|
||||
} else if (command.getShape() instanceof BoxShape) {
|
||||
BoxShape shape = (BoxShape) command.getShape();
|
||||
commandParams.add("BYBOX");
|
||||
commandParams.add(shape.getBoundingBox().getWidth().getValue());
|
||||
commandParams.add(shape.getBoundingBox().getHeight().getValue());
|
||||
commandParams.add(convert(shape.getMetric()).getAbbreviation());
|
||||
}
|
||||
|
||||
RedisGeoCommands.GeoSearchStoreCommandArgs args = command.getArgs()
|
||||
.orElse(RedisGeoCommands.GeoSearchStoreCommandArgs.newGeoSearchStoreArgs());
|
||||
if (args.hasSortDirection()) {
|
||||
commandParams.add(args.getSortDirection());
|
||||
}
|
||||
if (args.getLimit() != null) {
|
||||
commandParams.add("COUNT");
|
||||
commandParams.add(args.getLimit());
|
||||
if (args.hasAnyLimit()) {
|
||||
commandParams.add("ANY");
|
||||
}
|
||||
}
|
||||
if (args.isStoreDistance()) {
|
||||
commandParams.add("STOREDIST");
|
||||
}
|
||||
|
||||
Mono<Long> m = write(keyBuf, LongCodec.INSTANCE, RedisCommands.GEOSEARCHSTORE_STORE, commandParams.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private Metric convert(Metric metric) {
|
||||
if (metric == Metrics.NEUTRAL) {
|
||||
return RedisGeoCommands.DistanceUnit.METERS;
|
||||
}
|
||||
return metric;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,302 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.ScanResult;
|
||||
import org.redisson.api.RFuture;
|
||||
import org.redisson.client.RedisClient;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.decoder.MapScanResult;
|
||||
import org.redisson.client.protocol.decoder.MultiDecoder;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.redisson.reactive.MapReactiveIterator;
|
||||
import org.springframework.data.redis.connection.ReactiveHashCommands;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.KeyCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.KeyScanCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.MultiValueResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveHashCommands extends RedissonBaseReactive implements ReactiveHashCommands {
|
||||
|
||||
RedissonReactiveHashCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
private static final RedisCommand<String> HMSET = new RedisCommand<String>("HMSET");
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<HSetCommand>> hSet(Publisher<HSetCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getFieldValueMap(), "FieldValueMap must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
if (command.getFieldValueMap().size() == 1) {
|
||||
Entry<ByteBuffer, ByteBuffer> entry = command.getFieldValueMap().entrySet().iterator().next();
|
||||
byte[] mapKeyBuf = toByteArray(entry.getKey());
|
||||
byte[] mapValueBuf = toByteArray(entry.getValue());
|
||||
RedisCommand<Boolean> cmd = RedisCommands.HSETNX;
|
||||
if (command.isUpsert()) {
|
||||
cmd = RedisCommands.HSET;
|
||||
}
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, cmd, keyBuf, mapKeyBuf, mapValueBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
} else {
|
||||
List<Object> params = new ArrayList<Object>(command.getFieldValueMap().size()*2 + 1);
|
||||
params.add(keyBuf);
|
||||
for (Entry<ByteBuffer, ByteBuffer> entry : command.getFieldValueMap().entrySet()) {
|
||||
params.add(toByteArray(entry.getKey()));
|
||||
params.add(toByteArray(entry.getValue()));
|
||||
}
|
||||
|
||||
Mono<String> m = write(keyBuf, StringCodec.INSTANCE, HMSET, params.toArray());
|
||||
return m.map(v -> new BooleanResponse<>(command, true));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<List<Object>> HMGET = new RedisCommand<List<Object>>("HMGET", new MultiDecoder<List<Object>>() {
|
||||
|
||||
@Override
|
||||
public List<Object> decode(List<Object> parts, State state) {
|
||||
List<Object> list = parts.stream().filter(e -> e != null).collect(Collectors.toList());
|
||||
if (list.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
return parts;
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
@Override
|
||||
public Flux<MultiValueResponse<HGetCommand, ByteBuffer>> hMGet(Publisher<HGetCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getFields(), "Fields must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
List<Object> args = new ArrayList<Object>(command.getFields().size() + 1);
|
||||
args.add(keyBuf);
|
||||
args.addAll(command.getFields().stream().map(buf -> toByteArray(buf)).collect(Collectors.toList()));
|
||||
Mono<List<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, HMGET, args.toArray());
|
||||
return m.map(v -> {
|
||||
List<ByteBuffer> values = v.stream().map(array -> {
|
||||
if (array != null) {
|
||||
return ByteBuffer.wrap(array);
|
||||
}
|
||||
return null;
|
||||
}).collect(Collectors.toList());
|
||||
return new MultiValueResponse<>(command, values);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<HExistsCommand>> hExists(Publisher<HExistsCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getField(), "Field must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] fieldBuf = toByteArray(command.getField());
|
||||
|
||||
Mono<Boolean> m =read(keyBuf, StringCodec.INSTANCE, RedisCommands.HEXISTS, keyBuf, fieldBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<HDelCommand, Long>> hDel(Publisher<HDelCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getFields(), "Fields must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<Object>(command.getFields().size() + 1);
|
||||
args.add(toByteArray(command.getKey()));
|
||||
args.addAll(command.getFields().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
|
||||
Mono<Long> m = write((byte[])args.get(0), StringCodec.INSTANCE, RedisCommands.HDEL, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> hLen(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, RedisCommands.HLEN_LONG, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<KeyCommand, Flux<ByteBuffer>>> hKeys(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.HKEYS, keyBuf);
|
||||
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e))));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<KeyCommand, Flux<ByteBuffer>>> hVals(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
Mono<List<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.HVALS, keyBuf);
|
||||
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e))));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<KeyCommand, Flux<Entry<ByteBuffer, ByteBuffer>>>> hGetAll(
|
||||
Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
Mono<Map<byte[], byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.HGETALL, keyBuf);
|
||||
Mono<Map<ByteBuffer, ByteBuffer>> f = m.map(v -> v.entrySet().stream().collect(Collectors.toMap(e -> ByteBuffer.wrap(e.getKey()), e -> ByteBuffer.wrap(e.getValue()))));
|
||||
return f.map(v -> new CommandResponse<>(command, Flux.fromIterable(v.entrySet())));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<KeyCommand, Flux<Entry<ByteBuffer, ByteBuffer>>>> hScan(
|
||||
Publisher<KeyScanCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getOptions(), "ScanOptions must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Flux<Entry<Object, Object>> flux = Flux.create(new MapReactiveIterator<Object, Object, Entry<Object, Object>>(null, null, 0) {
|
||||
@Override
|
||||
public RFuture<ScanResult<Object>> scanIterator(RedisClient client, long nextIterPos) {
|
||||
if (command.getOptions().getPattern() == null) {
|
||||
return executorService.readAsync(client, keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.HSCAN,
|
||||
keyBuf, nextIterPos, "COUNT", Optional.ofNullable(command.getOptions().getCount()).orElse(10L));
|
||||
}
|
||||
|
||||
return executorService.readAsync(client, keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.HSCAN,
|
||||
keyBuf, nextIterPos, "MATCH", command.getOptions().getPattern(),
|
||||
"COUNT", Optional.ofNullable(command.getOptions().getCount()).orElse(10L));
|
||||
}
|
||||
});
|
||||
Flux<Entry<ByteBuffer, ByteBuffer>> f = flux.map(v -> Collections.singletonMap(ByteBuffer.wrap((byte[])v.getKey()), ByteBuffer.wrap((byte[])v.getValue())).entrySet().iterator().next());
|
||||
return Mono.just(new CommandResponse<>(command, f));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Long> HSTRLEN = new RedisCommand<Long>("HSTRLEN");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<HStrLenCommand, Long>> hStrLen(Publisher<HStrLenCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getField(), "Field must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] fieldBuf = toByteArray(command.getField());
|
||||
|
||||
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, HSTRLEN, keyBuf, fieldBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<HRandFieldCommand, Flux<ByteBuffer>>> hRandField(Publisher<HRandFieldCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
Mono<Set<byte[]>> m;
|
||||
if (command.getCount() > 0) {
|
||||
m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.HRANDFIELD_KEYS, keyBuf, command.getCount());
|
||||
} else {
|
||||
m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.HRANDFIELD_KEYS, keyBuf);
|
||||
}
|
||||
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e))));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<HRandFieldCommand, Flux<Entry<ByteBuffer, ByteBuffer>>>> hRandFieldWithValues(Publisher<HRandFieldCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
Mono<Map<byte[], byte[]>> m;
|
||||
if (command.getCount() > 0) {
|
||||
m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.HRANDFIELD, keyBuf, command.getCount());
|
||||
} else {
|
||||
m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.HRANDFIELD, keyBuf);
|
||||
}
|
||||
|
||||
Mono<Map<ByteBuffer, ByteBuffer>> f = m.map(v -> v.entrySet().stream().collect(Collectors.toMap(e -> ByteBuffer.wrap(e.getKey()), e -> ByteBuffer.wrap(e.getValue()))));
|
||||
return f.map(v -> new CommandResponse<>(command, Flux.fromIterable(v.entrySet())));
|
||||
});
|
||||
}
|
||||
}
|
@ -0,0 +1,98 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveHyperLogLogCommands;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveHyperLogLogCommands extends RedissonBaseReactive implements ReactiveHyperLogLogCommands {
|
||||
|
||||
RedissonReactiveHyperLogLogCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
private static final RedisCommand<Long> PFADD = new RedisCommand<Long>("PFADD");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<PfAddCommand, Long>> pfAdd(Publisher<PfAddCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notEmpty(command.getValues(), "Values must not be empty!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
List<Object> params = new ArrayList<Object>(command.getValues().size() + 1);
|
||||
params.add(keyBuf);
|
||||
params.addAll(command.getValues().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, PFADD, params.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<PfCountCommand, Long>> pfCount(Publisher<PfCountCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notEmpty(command.getKeys(), "Keys must not be empty!");
|
||||
|
||||
Object[] args = command.getKeys().stream().map(v -> toByteArray(v)).toArray();
|
||||
|
||||
Mono<Long> m = write((byte[])args[0], StringCodec.INSTANCE, RedisCommands.PFCOUNT, args);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<String> PFMERGE = new RedisStrictCommand<String>("PFMERGE");
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<PfMergeCommand>> pfMerge(Publisher<PfMergeCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Destination key must not be null!");
|
||||
Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
List<Object> args = new ArrayList<Object>(command.getSourceKeys().size() + 1);
|
||||
args.add(keyBuf);
|
||||
args.addAll(command.getSourceKeys().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
Mono<String> m = write(keyBuf, StringCodec.INSTANCE, PFMERGE, args.toArray());
|
||||
return m.map(v -> new BooleanResponse<>(command, true));
|
||||
});
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,371 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.client.protocol.convertor.BooleanReplayConvertor;
|
||||
import org.redisson.client.protocol.convertor.Convertor;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.redisson.reactive.RedissonKeysReactive;
|
||||
import org.springframework.data.redis.connection.DataType;
|
||||
import org.springframework.data.redis.connection.ReactiveKeyCommands;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.KeyCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.MultiValueResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
|
||||
import org.springframework.data.redis.connection.ValueEncoding;
|
||||
import org.springframework.data.redis.core.ScanOptions;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveKeyCommands extends RedissonBaseReactive implements ReactiveKeyCommands {
|
||||
|
||||
public RedissonReactiveKeyCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<KeyCommand>> exists(Publisher<KeyCommand> keys) {
|
||||
return execute(keys, key -> {
|
||||
|
||||
Assert.notNull(key.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key.getKey());
|
||||
Mono<Boolean> m = read(keyBuf, StringCodec.INSTANCE, RedisCommands.EXISTS, keyBuf);
|
||||
return m.map(v -> new BooleanResponse<>(key, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<DataType> TYPE = new RedisStrictCommand<DataType>("TYPE", new Convertor<DataType>() {
|
||||
@Override
|
||||
public DataType convert(Object obj) {
|
||||
return DataType.fromCode(obj.toString());
|
||||
}
|
||||
});
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<KeyCommand, DataType>> type(Publisher<KeyCommand> keys) {
|
||||
return execute(keys, key -> {
|
||||
|
||||
Assert.notNull(key.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key.getKey());
|
||||
Mono<DataType> m = read(keyBuf, StringCodec.INSTANCE, TYPE, keyBuf);
|
||||
return m.map(v -> new CommandResponse<>(key, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<Collection<ByteBuffer>, Long>> touch(Publisher<Collection<ByteBuffer>> keys) {
|
||||
return execute(keys, coll -> {
|
||||
|
||||
Assert.notNull(coll, "Collection must not be null!");
|
||||
|
||||
Object[] params = coll.stream().map(buf -> toByteArray(buf)).toArray(Object[]::new);
|
||||
|
||||
Mono<Long> m = write(null, StringCodec.INSTANCE, RedisCommands.TOUCH_LONG, params);
|
||||
return m.map(v -> new NumericResponse<>(coll, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<MultiValueResponse<ByteBuffer, ByteBuffer>> keys(Publisher<ByteBuffer> patterns) {
|
||||
return execute(patterns, pattern -> {
|
||||
|
||||
Assert.notNull(pattern, "Pattern must not be null!");
|
||||
|
||||
Mono<List<String>> m = read(null, StringCodec.INSTANCE, RedisCommands.KEYS, toByteArray(pattern));
|
||||
return m.map(v -> {
|
||||
List<ByteBuffer> values = v.stream().map(t -> ByteBuffer.wrap(t.getBytes())).collect(Collectors.toList());
|
||||
return new MultiValueResponse<>(pattern, values);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBuffer> scan(ScanOptions options) {
|
||||
RedissonKeysReactive reactive = new RedissonKeysReactive(executorService);
|
||||
return reactive.getKeysByPattern(options.getPattern(), options.getCount().intValue()).map(t -> ByteBuffer.wrap(t.getBytes()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ByteBuffer> randomKey() {
|
||||
return executorService.reactive(() -> {
|
||||
return executorService.readRandomAsync(ByteArrayCodec.INSTANCE, RedisCommands.RANDOM_KEY);
|
||||
});
|
||||
}
|
||||
|
||||
static final RedisStrictCommand<String> RENAME = new RedisStrictCommand<String>("RENAME");
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getNewKey(), "New name must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] newKeyBuf = toByteArray(command.getNewKey());
|
||||
Mono<String> m = write(keyBuf, StringCodec.INSTANCE, RENAME, keyBuf, newKeyBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, true));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<RenameCommand>> renameNX(Publisher<RenameCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getNewKey(), "New name must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] newKeyBuf = toByteArray(command.getNewKey());
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.RENAMENX, keyBuf, newKeyBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> del(Publisher<KeyCommand> keys) {
|
||||
Flux<KeyCommand> s = Flux.from(keys);
|
||||
return s.concatMap(command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.DEL, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<List<ByteBuffer>, Long>> mDel(Publisher<List<ByteBuffer>> keys) {
|
||||
return execute(keys, coll -> {
|
||||
|
||||
Assert.notNull(coll, "List must not be null!");
|
||||
|
||||
Object[] params = coll.stream().map(buf -> toByteArray(buf)).toArray(Object[]::new);
|
||||
|
||||
Mono<Long> m = write(null, StringCodec.INSTANCE, RedisCommands.DEL, params);
|
||||
return m.map(v -> new NumericResponse<>(coll, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> unlink(Publisher<KeyCommand> keys) {
|
||||
return execute(keys, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.UNLINK, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<List<ByteBuffer>, Long>> mUnlink(Publisher<List<ByteBuffer>> keys) {
|
||||
return execute(keys, coll -> {
|
||||
|
||||
Assert.notNull(coll, "List must not be null!");
|
||||
|
||||
Object[] params = coll.stream().map(buf -> toByteArray(buf)).toArray(Object[]::new);
|
||||
|
||||
Mono<Long> m = write(null, StringCodec.INSTANCE, RedisCommands.UNLINK, params);
|
||||
return m.map(v -> new NumericResponse<>(coll, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Boolean> EXPIRE = new RedisStrictCommand<Boolean>("EXPIRE", new BooleanReplayConvertor());
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<ExpireCommand>> expire(Publisher<ExpireCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, EXPIRE, keyBuf, command.getTimeout().getSeconds());
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<ExpireCommand>> pExpire(Publisher<ExpireCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.PEXPIRE, keyBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Boolean> EXPIREAT = new RedisStrictCommand<Boolean>("EXPIREAT", new BooleanReplayConvertor());
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<ExpireAtCommand>> expireAt(Publisher<ExpireAtCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, EXPIREAT, keyBuf, command.getExpireAt().getEpochSecond());
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<ExpireAtCommand>> pExpireAt(Publisher<ExpireAtCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.PEXPIREAT, keyBuf, command.getExpireAt().toEpochMilli());
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<KeyCommand>> persist(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.PERSIST, keyBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> TTL = new RedisStrictCommand<Long>("TTL");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> ttl(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, TTL, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> pTtl(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, RedisCommands.PTTL, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<MoveCommand>> move(Publisher<MoveCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getDatabase(), "Database must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.MOVE, keyBuf, command.getDatabase());
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<ValueEncoding> OBJECT_ENCODING = new RedisStrictCommand<ValueEncoding>("OBJECT", "ENCODING", new Convertor<ValueEncoding>() {
|
||||
@Override
|
||||
public ValueEncoding convert(Object obj) {
|
||||
return ValueEncoding.of((String) obj);
|
||||
}
|
||||
});
|
||||
|
||||
@Override
|
||||
public Mono<ValueEncoding> encodingOf(ByteBuffer key) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key);
|
||||
return read(keyBuf, StringCodec.INSTANCE, OBJECT_ENCODING, keyBuf);
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> OBJECT_IDLETIME = new RedisStrictCommand<Long>("OBJECT", "IDLETIME");
|
||||
|
||||
@Override
|
||||
public Mono<Duration> idletime(ByteBuffer key) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key);
|
||||
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, OBJECT_IDLETIME, keyBuf);
|
||||
return m.map(Duration::ofSeconds);
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> OBJECT_REFCOUNT = new RedisStrictCommand<Long>("OBJECT", "REFCOUNT");
|
||||
|
||||
@Override
|
||||
public Mono<Long> refcount(ByteBuffer key) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key);
|
||||
return read(keyBuf, StringCodec.INSTANCE, OBJECT_REFCOUNT, keyBuf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<CopyCommand>> copy(Publisher<CopyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getTarget(), "Target must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
params.add(keyBuf);
|
||||
byte[] targetBuf = toByteArray(command.getTarget());
|
||||
params.add(targetBuf);
|
||||
if (command.getDatabase() != null) {
|
||||
params.add("DB");
|
||||
params.add(command.getDatabase());
|
||||
}
|
||||
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.COPY, params.toArray());
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
}
|
@ -0,0 +1,370 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.dao.InvalidDataAccessApiUsageException;
|
||||
import org.springframework.data.redis.connection.ReactiveListCommands;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.ByteBufferResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.KeyCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.RangeCommand;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveListCommands extends RedissonBaseReactive implements ReactiveListCommands {
|
||||
|
||||
private static final RedisStrictCommand<Long> RPUSH = new RedisStrictCommand<Long>("RPUSH");
|
||||
private static final RedisStrictCommand<Long> LPUSH = new RedisStrictCommand<Long>("LPUSH");
|
||||
private static final RedisStrictCommand<Long> RPUSHX = new RedisStrictCommand<Long>("RPUSHX");
|
||||
private static final RedisStrictCommand<Long> LPUSHX = new RedisStrictCommand<Long>("LPUSHX");
|
||||
|
||||
RedissonReactiveListCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<PushCommand, Long>> push(Publisher<PushCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notEmpty(command.getValues(), "Values must not be null or empty!");
|
||||
|
||||
if (!command.getUpsert() && command.getValues().size() > 1) {
|
||||
throw new InvalidDataAccessApiUsageException(
|
||||
String.format("%s PUSHX only allows one value!", command.getDirection()));
|
||||
}
|
||||
|
||||
RedisStrictCommand<Long> redisCommand;
|
||||
|
||||
List<Object> params = new ArrayList<Object>();
|
||||
params.add(toByteArray(command.getKey()));
|
||||
params.addAll(command.getValues().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
|
||||
if (ObjectUtils.nullSafeEquals(Direction.RIGHT, command.getDirection())) {
|
||||
if (command.getUpsert()) {
|
||||
redisCommand = RPUSH;
|
||||
} else {
|
||||
redisCommand = RPUSHX;
|
||||
}
|
||||
} else {
|
||||
if (command.getUpsert()) {
|
||||
redisCommand = LPUSH;
|
||||
} else {
|
||||
redisCommand = LPUSHX;
|
||||
}
|
||||
}
|
||||
|
||||
Mono<Long> m = write((byte[])params.get(0), StringCodec.INSTANCE, redisCommand, params.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> LLEN = new RedisStrictCommand<Long>("LLEN");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> lLen(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, key -> {
|
||||
|
||||
Assert.notNull(key.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key.getKey());
|
||||
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, LLEN, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(key, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<RangeCommand, Flux<ByteBuffer>>> lRange(Publisher<RangeCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<List<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.LRANGE,
|
||||
keyBuf, command.getRange().getLowerBound().getValue().orElse(0L),
|
||||
command.getRange().getUpperBound().getValue().orElse(-1L));
|
||||
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e))));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<String> LTRIM = new RedisStrictCommand<String>("LTRIM");
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<RangeCommand>> lTrim(Publisher<RangeCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<String> m = write(keyBuf, StringCodec.INSTANCE, LTRIM,
|
||||
keyBuf, command.getRange().getLowerBound().getValue().orElse(0L),
|
||||
command.getRange().getUpperBound().getValue().orElse(-1L));
|
||||
return m.map(v -> new BooleanResponse<>(command, true));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<LIndexCommand>> lIndex(Publisher<LIndexCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getIndex(), "Index value must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<byte[]> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.LINDEX,
|
||||
keyBuf, command.getIndex());
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> LINSERT = new RedisStrictCommand<Long>("LINSERT");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<LInsertCommand, Long>> lInsert(Publisher<LInsertCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
Assert.notNull(command.getPivot(), "Pivot must not be null!");
|
||||
Assert.notNull(command.getPosition(), "Position must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
byte[] pivotBuf = toByteArray(command.getPivot());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, LINSERT, keyBuf, command.getPosition(), pivotBuf, valueBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<String> LSET = new RedisStrictCommand<String>("LSET");
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<LSetCommand>> lSet(Publisher<LSetCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "value must not be null!");
|
||||
Assert.notNull(command.getIndex(), "Index must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
Mono<String> m = write(keyBuf, StringCodec.INSTANCE, LSET,
|
||||
keyBuf, command.getIndex(), valueBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, true));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> LREM = new RedisStrictCommand<Long>("LREM");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<LRemCommand, Long>> lRem(Publisher<LRemCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
Assert.notNull(command.getCount(), "Count must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, LREM, keyBuf, command.getCount(), valueBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<PopCommand>> pop(Publisher<PopCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getDirection(), "Direction must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
RedisCommand<Object> redisCommand = RedisCommands.LPOP;
|
||||
if (command.getDirection() == Direction.RIGHT) {
|
||||
redisCommand = RedisCommands.RPOP;
|
||||
}
|
||||
|
||||
Mono<byte[]> m = write(keyBuf, ByteArrayCodec.INSTANCE, redisCommand, keyBuf);
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<PopResponse> bPop(Publisher<BPopCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKeys(), "Keys must not be null!");
|
||||
Assert.notNull(command.getDirection(), "Direction must not be null!");
|
||||
Assert.notNull(command.getTimeout(), "Timeout must not be null!");
|
||||
|
||||
RedisCommand<List<Object>> redisCommand = RedisCommands.BLPOP;
|
||||
if (command.getDirection() == Direction.RIGHT) {
|
||||
redisCommand = RedisCommands.BRPOP;
|
||||
}
|
||||
|
||||
List<Object> params = new ArrayList<Object>(command.getKeys().size() + 1);
|
||||
params.addAll(command.getKeys().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
params.add(command.getTimeout().getSeconds());
|
||||
|
||||
Mono<List<byte[]>> m = write((byte[])params.get(0), ByteArrayCodec.INSTANCE, redisCommand, params.toArray());
|
||||
return m.map(v -> new PopResponse(command,
|
||||
new PopResult(v.stream().map(e -> ByteBuffer.wrap(e)).collect(Collectors.toList()))));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<RPopLPushCommand>> rPopLPush(Publisher<RPopLPushCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getDestination(), "Destination key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] destinationBuf = toByteArray(command.getDestination());
|
||||
|
||||
Mono<byte[]> m = write(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.RPOPLPUSH, keyBuf, destinationBuf);
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<BRPopLPushCommand>> bRPopLPush(Publisher<BRPopLPushCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getDestination(), "Destination key must not be null!");
|
||||
Assert.notNull(command.getTimeout(), "Timeout must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] destinationBuf = toByteArray(command.getDestination());
|
||||
|
||||
Mono<byte[]> m = write(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.BRPOPLPUSH,
|
||||
keyBuf, destinationBuf, command.getTimeout().getSeconds());
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<LPosCommand, Long>> lPos(Publisher<LPosCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getElement(), "Element must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<Object>();
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
params.add(keyBuf);
|
||||
params.add(toByteArray(command.getElement()));
|
||||
if (command.getRank() != null) {
|
||||
params.add("RANK");
|
||||
params.add(command.getRank());
|
||||
}
|
||||
if (command.getCount() != null) {
|
||||
params.add("COUNT");
|
||||
params.add(command.getCount());
|
||||
}
|
||||
|
||||
Mono<Long> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.LPOS, params.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<LMoveCommand>> lMove(Publisher<? extends LMoveCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getDestinationKey(), "Destination key must not be null!");
|
||||
Assert.notNull(command.getFrom(), "From must not be null!");
|
||||
Assert.notNull(command.getTo(), "To must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] destinationBuf = toByteArray(command.getDestinationKey());
|
||||
|
||||
Mono<byte[]> m = write(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.LMOVE,
|
||||
keyBuf, destinationBuf, command.getFrom(), command.getTo());
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<BLMoveCommand>> bLMove(Publisher<BLMoveCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getDestinationKey(), "Destination key must not be null!");
|
||||
Assert.notNull(command.getFrom(), "From must not be null!");
|
||||
Assert.notNull(command.getTo(), "To must not be null!");
|
||||
Assert.notNull(command.getTimeout(), "Timeout must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] destinationBuf = toByteArray(command.getDestinationKey());
|
||||
|
||||
Mono<byte[]> m = write(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.BLMOVE,
|
||||
keyBuf, destinationBuf, command.getFrom(), command.getTo(), command.getTimeout().getSeconds());
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<PopCommand, Flux<ByteBuffer>>> popList(Publisher<PopCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
RedisCommand cmd;
|
||||
if (command.getDirection() == Direction.RIGHT) {
|
||||
cmd = RedisCommands.RPOP_LIST;
|
||||
} else {
|
||||
cmd = RedisCommands.LPOP_LIST;
|
||||
}
|
||||
|
||||
List<Object> params = new ArrayList<>(2);
|
||||
params.add(keyBuf);
|
||||
if (command.getCount() > 0) {
|
||||
params.add(command.getCount());
|
||||
}
|
||||
|
||||
Mono<List<byte[]>> m = write(keyBuf, ByteArrayCodec.INSTANCE, cmd, params.toArray());
|
||||
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e))));
|
||||
});
|
||||
}
|
||||
}
|
@ -0,0 +1,119 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.convertor.NumberConvertor;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveNumberCommands;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.KeyCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveNumberCommands extends RedissonBaseReactive implements ReactiveNumberCommands {
|
||||
|
||||
public RedissonReactiveNumberCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> incr(Publisher<KeyCommand> keys) {
|
||||
return execute(keys, key -> {
|
||||
|
||||
Assert.notNull(key.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key.getKey());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.INCR, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(key, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Number> Flux<NumericResponse<IncrByCommand<T>, T>> incrBy(Publisher<IncrByCommand<T>> commands) {
|
||||
return execute(commands, key -> {
|
||||
|
||||
Assert.notNull(key.getKey(), "Key must not be null!");
|
||||
Assert.notNull(key.getValue(), "Value must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key.getKey());
|
||||
|
||||
Mono<T> m = write(keyBuf, StringCodec.INSTANCE,
|
||||
new RedisCommand<Object>("INCRBYFLOAT", new NumberConvertor(key.getValue().getClass())),
|
||||
keyBuf, new BigDecimal(key.getValue().toString()).toPlainString());
|
||||
return m.map(v -> new NumericResponse<>(key, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> decr(Publisher<KeyCommand> keys) {
|
||||
return execute(keys, key -> {
|
||||
|
||||
Assert.notNull(key.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key.getKey());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.DECR, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(key, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Number> Flux<NumericResponse<DecrByCommand<T>, T>> decrBy(Publisher<DecrByCommand<T>> commands) {
|
||||
return execute(commands, key -> {
|
||||
|
||||
Assert.notNull(key.getKey(), "Key must not be null!");
|
||||
Assert.notNull(key.getValue(), "Value must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key.getKey());
|
||||
|
||||
Mono<T> m = write(keyBuf, StringCodec.INSTANCE,
|
||||
new RedisCommand<Object>("INCRBYFLOAT", new NumberConvertor(key.getValue().getClass())),
|
||||
keyBuf, "-" + new BigDecimal(key.getValue().toString()).toPlainString());
|
||||
return m.map(v -> new NumericResponse<>(key, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Number> Flux<NumericResponse<HIncrByCommand<T>, T>> hIncrBy(
|
||||
Publisher<HIncrByCommand<T>> commands) {
|
||||
return execute(commands, key -> {
|
||||
|
||||
Assert.notNull(key.getKey(), "Key must not be null!");
|
||||
Assert.notNull(key.getValue(), "Value must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(key.getKey());
|
||||
byte[] fieldBuf = toByteArray(key.getField());
|
||||
|
||||
Mono<T> m = write(keyBuf, StringCodec.INSTANCE,
|
||||
new RedisCommand<Object>("HINCRBYFLOAT", new NumberConvertor(key.getValue().getClass())),
|
||||
keyBuf, fieldBuf, new BigDecimal(key.getValue().toString()).toPlainString());
|
||||
return m.map(v -> new NumericResponse<>(key, v));
|
||||
});
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,64 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactivePubSubCommands;
|
||||
import org.springframework.data.redis.connection.ReactiveSubscription;
|
||||
import org.springframework.data.redis.connection.ReactiveSubscription.ChannelMessage;
|
||||
|
||||
import org.springframework.data.redis.connection.SubscriptionListener;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactivePubSubCommands extends RedissonBaseReactive implements ReactivePubSubCommands {
|
||||
|
||||
RedissonReactivePubSubCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<Long> publish(Publisher<ChannelMessage<ByteBuffer, ByteBuffer>> messageStream) {
|
||||
return execute(messageStream, msg -> {
|
||||
return write(toByteArray(msg.getChannel()), StringCodec.INSTANCE, RedisCommands.PUBLISH, toByteArray(msg.getChannel()), toByteArray(msg.getMessage()));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> subscribe(ByteBuffer... channels) {
|
||||
throw new UnsupportedOperationException("Subscribe through ReactiveSubscription object created by createSubscription method");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> pSubscribe(ByteBuffer... patterns) {
|
||||
throw new UnsupportedOperationException("Subscribe through ReactiveSubscription object created by createSubscription method");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ReactiveSubscription> createSubscription(SubscriptionListener subscriptionListener) {
|
||||
return Mono.just(new RedissonReactiveSubscription(executorService.getConnectionManager(), subscriptionListener));
|
||||
}
|
||||
}
|
@ -0,0 +1,226 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.client.protocol.decoder.ObjectDecoder;
|
||||
import org.redisson.client.protocol.decoder.ObjectListReplayDecoder;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.*;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveRedisClusterConnection extends RedissonReactiveRedisConnection implements ReactiveRedisClusterConnection {
|
||||
|
||||
public RedissonReactiveRedisClusterConnection(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterKeyCommands keyCommands() {
|
||||
return new RedissonReactiveClusterKeyCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterStringCommands stringCommands() {
|
||||
return new RedissonReactiveClusterStringCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterNumberCommands numberCommands() {
|
||||
return new RedissonReactiveClusterNumberCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterListCommands listCommands() {
|
||||
return new RedissonReactiveClusterListCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterSetCommands setCommands() {
|
||||
return new RedissonReactiveClusterSetCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterZSetCommands zSetCommands() {
|
||||
return new RedissonReactiveClusterZSetCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterHashCommands hashCommands() {
|
||||
return new RedissonReactiveClusterHashCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterGeoCommands geoCommands() {
|
||||
return new RedissonReactiveClusterGeoCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterHyperLogLogCommands hyperLogLogCommands() {
|
||||
return new RedissonReactiveClusterHyperLogLogCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterServerCommands serverCommands() {
|
||||
return new RedissonReactiveClusterServerCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveClusterStreamCommands streamCommands() {
|
||||
return new RedissonReactiveClusterStreamCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> ping(RedisClusterNode node) {
|
||||
return execute(node, RedisCommands.PING);
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<List<RedisClusterNode>> CLUSTER_NODES =
|
||||
new RedisStrictCommand<>("CLUSTER", "NODES", new ObjectDecoder(new RedisClusterNodeDecoder()));
|
||||
|
||||
@Override
|
||||
public Flux<RedisClusterNode> clusterGetNodes() {
|
||||
Mono<List<RedisClusterNode>> result = read(null, StringCodec.INSTANCE, CLUSTER_NODES);
|
||||
return result.flatMapMany(e -> Flux.fromIterable(e));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<RedisClusterNode> clusterGetReplicas(RedisClusterNode redisClusterNode) {
|
||||
Flux<RedisClusterNode> nodes = clusterGetNodes();
|
||||
Flux<RedisClusterNode> master = nodes.filter(e -> e.getHost().equals(redisClusterNode.getHost()) && e.getPort().equals(redisClusterNode.getPort()));
|
||||
return master.flatMap(node -> clusterGetNodes().filter(e -> Objects.equals(e.getMasterId(), node.getMasterId())));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Map<RedisClusterNode, Collection<RedisClusterNode>>> clusterGetMasterReplicaMap() {
|
||||
Flux<RedisClusterNode> nodes = clusterGetNodes();
|
||||
Flux<RedisClusterNode> masters = nodes.filter(e -> e.isMaster());
|
||||
return masters.flatMap(master -> Mono.just(master).zipWith(clusterGetNodes()
|
||||
.filter(e -> Objects.equals(e.getMasterId(), master.getMasterId()))
|
||||
.collect(Collectors.toSet())))
|
||||
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Integer> clusterGetSlotForKey(ByteBuffer byteBuffer) {
|
||||
return read(null, StringCodec.INSTANCE, RedisCommands.KEYSLOT, toByteArray(byteBuffer));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<RedisClusterNode> clusterGetNodeForSlot(int slot) {
|
||||
return clusterGetNodes().filter(n -> n.isMaster() && n.getSlotRange().contains(slot)).next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<RedisClusterNode> clusterGetNodeForKey(ByteBuffer byteBuffer) {
|
||||
int slot = executorService.getConnectionManager().calcSlot(toByteArray(byteBuffer));
|
||||
return clusterGetNodeForSlot(slot);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ClusterInfo> clusterGetClusterInfo() {
|
||||
Mono<Map<String, String>> mono = read(null, StringCodec.INSTANCE, RedisCommands.CLUSTER_INFO);
|
||||
return mono.map(e -> {
|
||||
Properties props = new Properties();
|
||||
for (Map.Entry<String, String> entry : e.entrySet()) {
|
||||
props.setProperty(entry.getKey(), entry.getValue());
|
||||
}
|
||||
return new ClusterInfo(props);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clusterAddSlots(RedisClusterNode redisClusterNode, int... ints) {
|
||||
List<Integer> params = convert(ints);
|
||||
return execute(redisClusterNode, RedisCommands.CLUSTER_ADDSLOTS, params.toArray());
|
||||
}
|
||||
|
||||
private List<Integer> convert(int... slots) {
|
||||
List<Integer> params = new ArrayList<Integer>();
|
||||
for (int slot : slots) {
|
||||
params.add(slot);
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clusterAddSlots(RedisClusterNode redisClusterNode, RedisClusterNode.SlotRange slotRange) {
|
||||
return clusterAddSlots(redisClusterNode, slotRange.getSlotsArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> clusterCountKeysInSlot(int slot) {
|
||||
Mono<RedisClusterNode> node = clusterGetNodeForSlot(slot);
|
||||
return node.flatMap(e -> {
|
||||
return execute(e, RedisCommands.CLUSTER_COUNTKEYSINSLOT, slot);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clusterDeleteSlots(RedisClusterNode redisClusterNode, int... ints) {
|
||||
List<Integer> params = convert(ints);
|
||||
return execute(redisClusterNode, RedisCommands.CLUSTER_DELSLOTS, params.toArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clusterDeleteSlotsInRange(RedisClusterNode redisClusterNode, RedisClusterNode.SlotRange slotRange) {
|
||||
return clusterDeleteSlots(redisClusterNode, slotRange.getSlotsArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clusterForget(RedisClusterNode redisClusterNode) {
|
||||
return execute(redisClusterNode, RedisCommands.CLUSTER_FORGET, redisClusterNode.getId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clusterMeet(RedisClusterNode redisClusterNode) {
|
||||
return execute(redisClusterNode, RedisCommands.CLUSTER_MEET, redisClusterNode.getHost(), redisClusterNode.getPort());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clusterSetSlot(RedisClusterNode redisClusterNode, int slot, AddSlots addSlots) {
|
||||
return execute(redisClusterNode, RedisCommands.CLUSTER_SETSLOT, slot, addSlots);
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<List<String>> CLUSTER_GETKEYSINSLOT = new RedisStrictCommand<List<String>>("CLUSTER", "GETKEYSINSLOT", new ObjectListReplayDecoder<String>());
|
||||
|
||||
@Override
|
||||
public Flux<ByteBuffer> clusterGetKeysInSlot(int slot, int count) {
|
||||
Mono<List<byte[]>> f = executorService.reactive(() -> {
|
||||
return executorService.readAsync((String) null, ByteArrayCodec.INSTANCE, CLUSTER_GETKEYSINSLOT, slot, count);
|
||||
});
|
||||
return f.flatMapMany(e -> Flux.fromIterable(e)).map(e -> ByteBuffer.wrap(e));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clusterReplicate(RedisClusterNode redisClusterNode, RedisClusterNode slave) {
|
||||
return execute(redisClusterNode, RedisCommands.CLUSTER_REPLICATE, slave.getId());
|
||||
}
|
||||
}
|
@ -0,0 +1,111 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.*;
|
||||
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveRedisConnection extends RedissonBaseReactive implements ReactiveRedisConnection {
|
||||
|
||||
public RedissonReactiveRedisConnection(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> closeLater() {
|
||||
return Mono.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveKeyCommands keyCommands() {
|
||||
return new RedissonReactiveKeyCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveStringCommands stringCommands() {
|
||||
return new RedissonReactiveStringCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveNumberCommands numberCommands() {
|
||||
return new RedissonReactiveNumberCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveListCommands listCommands() {
|
||||
return new RedissonReactiveListCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveSetCommands setCommands() {
|
||||
return new RedissonReactiveSetCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveZSetCommands zSetCommands() {
|
||||
return new RedissonReactiveZSetCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveHashCommands hashCommands() {
|
||||
return new RedissonReactiveHashCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveGeoCommands geoCommands() {
|
||||
return new RedissonReactiveGeoCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveHyperLogLogCommands hyperLogLogCommands() {
|
||||
return new RedissonReactiveHyperLogLogCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactivePubSubCommands pubSubCommands() {
|
||||
return new RedissonReactivePubSubCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveScriptingCommands scriptingCommands() {
|
||||
return new RedissonReactiveScriptingCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveServerCommands serverCommands() {
|
||||
return new RedissonReactiveServerCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReactiveStreamCommands streamCommands() {
|
||||
return new RedissonReactiveStreamCommands(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> ping() {
|
||||
return read(null, StringCodec.INSTANCE, RedisCommands.PING);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,150 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.api.RFuture;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.misc.CompletableFutureWrapper;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveScriptingCommands;
|
||||
import org.springframework.data.redis.connection.ReturnType;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveScriptingCommands extends RedissonBaseReactive implements ReactiveScriptingCommands {
|
||||
|
||||
RedissonReactiveScriptingCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> scriptFlush() {
|
||||
return executorService.reactive(() -> {
|
||||
RFuture<Void> f = executorService.writeAllVoidAsync(RedisCommands.SCRIPT_FLUSH);
|
||||
return toStringFuture(f);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> scriptKill() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> scriptLoad(ByteBuffer script) {
|
||||
return executorService.reactive(() -> {
|
||||
List<CompletableFuture<String>> futures = executorService.executeAllAsync(RedisCommands.SCRIPT_LOAD, (Object)toByteArray(script));
|
||||
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
|
||||
CompletableFuture<String> s = f.thenApply(r -> futures.get(0).getNow(null));
|
||||
return new CompletableFutureWrapper<>(s);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<Boolean> scriptExists(List<String> scriptShas) {
|
||||
Mono<List<Boolean>> m = executorService.reactive(() -> {
|
||||
List<CompletableFuture<List<Boolean>>> futures = executorService.writeAllAsync(RedisCommands.SCRIPT_EXISTS, scriptShas.toArray());
|
||||
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
|
||||
CompletableFuture<List<Boolean>> s = f.thenApply(r -> {
|
||||
List<Boolean> result = futures.get(0).getNow(new ArrayList<>());
|
||||
for (CompletableFuture<List<Boolean>> future : futures.subList(1, futures.size())) {
|
||||
List<Boolean> l = future.getNow(new ArrayList<>());
|
||||
for (int i = 0; i < l.size(); i++) {
|
||||
result.set(i, result.get(i) | l.get(i));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
});
|
||||
return new CompletableFutureWrapper<>(s);
|
||||
});
|
||||
return m.flatMapMany(v -> Flux.fromIterable(v));
|
||||
}
|
||||
|
||||
protected RedisCommand<?> toCommand(ReturnType returnType, String name) {
|
||||
RedisCommand<?> c = null;
|
||||
if (returnType == ReturnType.BOOLEAN) {
|
||||
c = org.redisson.api.RScript.ReturnType.BOOLEAN.getCommand();
|
||||
} else if (returnType == ReturnType.INTEGER) {
|
||||
c = org.redisson.api.RScript.ReturnType.INTEGER.getCommand();
|
||||
} else if (returnType == ReturnType.MULTI) {
|
||||
c = org.redisson.api.RScript.ReturnType.MULTI.getCommand();
|
||||
return new RedisCommand(c, name, new BinaryConvertor());
|
||||
} else if (returnType == ReturnType.STATUS) {
|
||||
c = org.redisson.api.RScript.ReturnType.STATUS.getCommand();
|
||||
} else if (returnType == ReturnType.VALUE) {
|
||||
c = org.redisson.api.RScript.ReturnType.VALUE.getCommand();
|
||||
return new RedisCommand(c, name, new BinaryConvertor());
|
||||
}
|
||||
return new RedisCommand(c, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> Flux<T> eval(ByteBuffer script, ReturnType returnType, int numKeys, ByteBuffer... keysAndArgs) {
|
||||
RedisCommand<?> c = toCommand(returnType, "EVAL");
|
||||
List<Object> params = new ArrayList<Object>();
|
||||
params.add(toByteArray(script));
|
||||
params.add(numKeys);
|
||||
params.addAll(Arrays.stream(keysAndArgs).map(m -> toByteArray(m)).collect(Collectors.toList()));
|
||||
Mono<T> m = write(null, ByteArrayCodec.INSTANCE, c, params.toArray());
|
||||
return convert(m);
|
||||
}
|
||||
|
||||
protected <T> Flux<T> convert(Mono<T> m) {
|
||||
return (Flux<T>) m.map(e -> {
|
||||
if (e.getClass().isArray()) {
|
||||
return ByteBuffer.wrap((byte[])e);
|
||||
}
|
||||
if (e instanceof List) {
|
||||
List l = (List) e;
|
||||
if (!l.isEmpty()) {
|
||||
for (int i = 0; i < l.size(); i++) {
|
||||
if (l.get(i).getClass().isArray()) {
|
||||
l.set(i, ByteBuffer.wrap((byte[])l.get(i)));
|
||||
}
|
||||
}
|
||||
return l;
|
||||
}
|
||||
}
|
||||
return e;
|
||||
}).flux();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> Flux<T> evalSha(String scriptSha, ReturnType returnType, int numKeys, ByteBuffer... keysAndArgs) {
|
||||
RedisCommand<?> c = toCommand(returnType, "EVALSHA");
|
||||
List<Object> params = new ArrayList<Object>();
|
||||
params.add(scriptSha);
|
||||
params.add(numKeys);
|
||||
params.addAll(Arrays.stream(keysAndArgs).map(m -> toByteArray(m)).collect(Collectors.toList()));
|
||||
Mono<T> m = write(null, ByteArrayCodec.INSTANCE, c, params.toArray());
|
||||
return convert(m);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,201 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.redisson.api.RFuture;
|
||||
import org.redisson.client.codec.LongCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.client.protocol.decoder.ObjectDecoder;
|
||||
import org.redisson.client.protocol.decoder.TimeLongObjectDecoder;
|
||||
import org.redisson.misc.CompletableFutureWrapper;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.redis.connection.ReactiveServerCommands;
|
||||
import org.springframework.data.redis.connection.RedisServerCommands;
|
||||
import org.springframework.data.redis.core.types.RedisClientInfo;
|
||||
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveServerCommands extends RedissonBaseReactive implements ReactiveServerCommands {
|
||||
|
||||
RedissonReactiveServerCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
static final RedisStrictCommand<String> BGREWRITEAOF = new RedisStrictCommand<String>("BGREWRITEAOF");
|
||||
|
||||
@Override
|
||||
public Mono<String> bgReWriteAof() {
|
||||
return write(null, StringCodec.INSTANCE, BGREWRITEAOF);
|
||||
}
|
||||
|
||||
static final RedisStrictCommand<String> BGSAVE = new RedisStrictCommand<String>("BGSAVE");
|
||||
|
||||
@Override
|
||||
public Mono<String> bgSave() {
|
||||
return write(null, StringCodec.INSTANCE, BGSAVE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> lastSave() {
|
||||
return write(null, StringCodec.INSTANCE, RedisCommands.LASTSAVE);
|
||||
}
|
||||
|
||||
static final RedisStrictCommand<String> SAVE = new RedisStrictCommand<String>("SAVE");
|
||||
|
||||
@Override
|
||||
public Mono<String> save() {
|
||||
return write(null, StringCodec.INSTANCE, SAVE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> dbSize() {
|
||||
return executorService.reactive(() -> {
|
||||
List<CompletableFuture<Long>> futures = executorService.readAllAsync(RedisCommands.DBSIZE);
|
||||
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
|
||||
CompletableFuture<Long> s = f.thenApply(r -> futures.stream().mapToLong(v -> v.getNow(0L)).sum());
|
||||
return new CompletableFutureWrapper<>(s);
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<String> FLUSHDB = new RedisStrictCommand<String>("FLUSHDB");
|
||||
|
||||
@Override
|
||||
public Mono<String> flushDb() {
|
||||
return executorService.reactive(() -> {
|
||||
RFuture<Void> f = executorService.writeAllVoidAsync(FLUSHDB);
|
||||
return toStringFuture(f);
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<String> FLUSHALL = new RedisStrictCommand<String>("FLUSHALL");
|
||||
|
||||
@Override
|
||||
public Mono<String> flushAll() {
|
||||
return executorService.reactive(() -> {
|
||||
RFuture<Void> f = executorService.writeAllVoidAsync(FLUSHALL);
|
||||
return toStringFuture(f);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> flushDb(RedisServerCommands.FlushOption option) {
|
||||
if (option == RedisServerCommands.FlushOption.ASYNC) {
|
||||
return executorService.reactive(() -> {
|
||||
RFuture<Void> f = executorService.writeAllVoidAsync(FLUSHDB, option.toString());
|
||||
return toStringFuture(f);
|
||||
});
|
||||
}
|
||||
return flushDb();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> flushAll(RedisServerCommands.FlushOption option) {
|
||||
if (option == RedisServerCommands.FlushOption.ASYNC) {
|
||||
return executorService.reactive(() -> {
|
||||
RFuture<Void> f = executorService.writeAllVoidAsync(FLUSHALL, option.toString());
|
||||
return toStringFuture(f);
|
||||
});
|
||||
}
|
||||
return flushAll();
|
||||
}
|
||||
|
||||
static final RedisStrictCommand<Properties> INFO_DEFAULT = new RedisStrictCommand<Properties>("INFO", "DEFAULT", new ObjectDecoder(new PropertiesDecoder()));
|
||||
static final RedisStrictCommand<Properties> INFO = new RedisStrictCommand<Properties>("INFO", new ObjectDecoder(new PropertiesDecoder()));
|
||||
|
||||
@Override
|
||||
public Mono<Properties> info() {
|
||||
return read(null, StringCodec.INSTANCE, INFO_DEFAULT);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Properties> info(String section) {
|
||||
return read(null, StringCodec.INSTANCE, INFO, section);
|
||||
}
|
||||
|
||||
static final RedisStrictCommand<Properties> CONFIG_GET = new RedisStrictCommand<Properties>("CONFIG", "GET", new PropertiesListDecoder());
|
||||
|
||||
@Override
|
||||
public Mono<Properties> getConfig(String pattern) {
|
||||
return read(null, StringCodec.INSTANCE, CONFIG_GET, pattern);
|
||||
}
|
||||
|
||||
static final RedisStrictCommand<String> CONFIG_SET = new RedisStrictCommand<String>("CONFIG", "SET");
|
||||
|
||||
@Override
|
||||
public Mono<String> setConfig(String param, String value) {
|
||||
return write(null, StringCodec.INSTANCE, CONFIG_SET, param, value);
|
||||
}
|
||||
|
||||
static final RedisStrictCommand<String> CONFIG_RESETSTAT = new RedisStrictCommand<String>("CONFIG", "RESETSTAT");
|
||||
|
||||
@Override
|
||||
public Mono<String> resetConfigStats() {
|
||||
return write(null, StringCodec.INSTANCE, CONFIG_RESETSTAT);
|
||||
}
|
||||
|
||||
static final RedisStrictCommand<Long> TIME = new RedisStrictCommand<Long>("TIME", new TimeLongObjectDecoder());
|
||||
|
||||
@Override
|
||||
public Mono<Long> time() {
|
||||
return read(null, LongCodec.INSTANCE, TIME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> time(TimeUnit timeUnit) {
|
||||
return read(null, LongCodec.INSTANCE, new RedisStrictCommand<>("TIME", new TimeLongObjectDecoder() {
|
||||
@Override
|
||||
public Long decode(List<Object> parts, State state) {
|
||||
Long time = super.decode(parts, state);
|
||||
return timeUnit.convert(time, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> killClient(String host, int port) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> setClientName(String name) {
|
||||
throw new UnsupportedOperationException("Should be defined through Redisson Config object");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> getClientName() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<RedisClientInfo> getClientList() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,320 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.ScanResult;
|
||||
import org.redisson.api.RFuture;
|
||||
import org.redisson.client.RedisClient;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.client.protocol.convertor.BooleanReplayConvertor;
|
||||
import org.redisson.client.protocol.decoder.ListScanResult;
|
||||
import org.redisson.client.protocol.decoder.ObjectListReplayDecoder;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.redisson.reactive.SetReactiveIterator;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.ByteBufferResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.KeyCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.KeyScanCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveSetCommands;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveSetCommands extends RedissonBaseReactive implements ReactiveSetCommands {
|
||||
|
||||
RedissonReactiveSetCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
private static final RedisCommand<Long> SADD = new RedisCommand<Long>("SADD");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<SAddCommand, Long>> sAdd(Publisher<SAddCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValues(), "Values must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<Object>(command.getValues().size() + 1);
|
||||
args.add(toByteArray(command.getKey()));
|
||||
args.addAll(command.getValues().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, SADD, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Long> SREM = new RedisCommand<Long>("SREM");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<SRemCommand, Long>> sRem(Publisher<SRemCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValues(), "Values must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<Object>(command.getValues().size() + 1);
|
||||
args.add(toByteArray(command.getKey()));
|
||||
args.addAll(command.getValues().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
|
||||
Mono<Long> m = write((byte[])args.get(0), StringCodec.INSTANCE, SREM, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBuffer> sPop(SPopCommand command) {
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Set<byte[]>> m = write(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.SPOP, keyBuf, command.getCount());
|
||||
return m.flatMapMany(v -> Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<KeyCommand>> sPop(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<byte[]> m = write(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.SPOP_SINGLE, keyBuf);
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<SMoveCommand>> sMove(Publisher<SMoveCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getDestination(), "Destination key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] destinationBuf = toByteArray(command.getDestination());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
Mono<Boolean> m = write(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.SMOVE, keyBuf, destinationBuf, valueBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> SCARD = new RedisStrictCommand<Long>("SCARD");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> sCard(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, SCARD, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<SIsMemberCommand>> sIsMember(Publisher<SIsMemberCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
Mono<Boolean> m = read(keyBuf, StringCodec.INSTANCE, RedisCommands.SISMEMBER, keyBuf, valueBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<SInterCommand, Flux<ByteBuffer>>> sInter(Publisher<SInterCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKeys(), "Key must not be null!");
|
||||
|
||||
List<byte[]> list = command.getKeys().stream().map(v -> toByteArray(v)).collect(Collectors.toList());
|
||||
Mono<Set<byte[]>> m = write((byte[])list.get(0), ByteArrayCodec.INSTANCE, RedisCommands.SINTER, list.toArray());
|
||||
return m.map(v -> new CommandResponse<>(command,
|
||||
Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e))));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<SInterStoreCommand, Long>> sInterStore(Publisher<SInterStoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKeys(), "Keys must not be null!");
|
||||
Assert.notNull(command.getKey(), "Destination key must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<Object>(command.getKeys().size() + 1);
|
||||
args.add(toByteArray(command.getKey()));
|
||||
args.addAll(command.getKeys().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
|
||||
Mono<Long> m = write((byte[])args.get(0), StringCodec.INSTANCE, RedisCommands.SINTERSTORE, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<SUnionCommand, Flux<ByteBuffer>>> sUnion(Publisher<SUnionCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKeys(), "Key must not be null!");
|
||||
|
||||
List<byte[]> list = command.getKeys().stream().map(v -> toByteArray(v)).collect(Collectors.toList());
|
||||
Mono<Set<byte[]>> m = write((byte[])list.get(0), ByteArrayCodec.INSTANCE, RedisCommands.SUNION, list.toArray());
|
||||
return m.map(v -> new CommandResponse<>(command,
|
||||
Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e))));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<SUnionStoreCommand, Long>> sUnionStore(Publisher<SUnionStoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKeys(), "Keys must not be null!");
|
||||
Assert.notNull(command.getKey(), "Destination key must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<Object>(command.getKeys().size() + 1);
|
||||
args.add(toByteArray(command.getKey()));
|
||||
args.addAll(command.getKeys().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
|
||||
Mono<Long> m = write((byte[])args.get(0), StringCodec.INSTANCE, RedisCommands.SUNIONSTORE, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<SDiffCommand, Flux<ByteBuffer>>> sDiff(Publisher<SDiffCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKeys(), "Key must not be null!");
|
||||
|
||||
List<byte[]> list = command.getKeys().stream().map(v -> toByteArray(v)).collect(Collectors.toList());
|
||||
Mono<Set<byte[]>> m = write((byte[])list.get(0), ByteArrayCodec.INSTANCE, RedisCommands.SDIFF, list.toArray());
|
||||
return m.map(v -> new CommandResponse<>(command,
|
||||
Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e))));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<SDiffStoreCommand, Long>> sDiffStore(Publisher<SDiffStoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKeys(), "Keys must not be null!");
|
||||
Assert.notNull(command.getKey(), "Destination key must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<Object>(command.getKeys().size() + 1);
|
||||
args.add(toByteArray(command.getKey()));
|
||||
args.addAll(command.getKeys().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
|
||||
Mono<Long> m = write((byte[])args.get(0), StringCodec.INSTANCE, RedisCommands.SDIFFSTORE, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<KeyCommand, Flux<ByteBuffer>>> sMembers(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.SMEMBERS, keyBuf);
|
||||
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e))));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<KeyCommand, Flux<ByteBuffer>>> sScan(Publisher<KeyScanCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getOptions(), "ScanOptions must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Flux<byte[]> flux = Flux.create(new SetReactiveIterator<byte[]>() {
|
||||
@Override
|
||||
protected RFuture<ScanResult<Object>> scanIterator(RedisClient client, long nextIterPos) {
|
||||
if (command.getOptions().getPattern() == null) {
|
||||
return executorService.readAsync(client, keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.SSCAN,
|
||||
keyBuf, nextIterPos, "COUNT", Optional.ofNullable(command.getOptions().getCount()).orElse(10L));
|
||||
}
|
||||
|
||||
return executorService.readAsync(client, keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.SSCAN,
|
||||
keyBuf, nextIterPos, "MATCH", command.getOptions().getPattern(),
|
||||
"COUNT", Optional.ofNullable(command.getOptions().getCount()).orElse(10L));
|
||||
}
|
||||
});
|
||||
return Mono.just(new CommandResponse<>(command, flux.map(v -> ByteBuffer.wrap(v))));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<SRandMembersCommand, Flux<ByteBuffer>>> sRandMember(
|
||||
Publisher<SRandMembersCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.SRANDMEMBER, keyBuf, command.getCount().orElse(1L));
|
||||
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v).map(e -> ByteBuffer.wrap(e))));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<List<Boolean>> SMISMEMBER = new RedisCommand<>("SMISMEMBER", new ObjectListReplayDecoder<>());
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.MultiValueResponse<SMIsMemberCommand, Boolean>> sMIsMember(Publisher<SMIsMemberCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValues(), "Values must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<>();
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
args.add(keyBuf);
|
||||
args.addAll(command.getValues());
|
||||
|
||||
Mono<List<Boolean>> m = read(keyBuf, StringCodec.INSTANCE, SMISMEMBER, args.toArray());
|
||||
return m.map(v -> new ReactiveRedisConnection.MultiValueResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
}
|
@ -0,0 +1,496 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.api.*;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.client.protocol.decoder.*;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.domain.Range;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection;
|
||||
import org.springframework.data.redis.connection.ReactiveStreamCommands;
|
||||
import org.springframework.data.redis.connection.RedisStreamCommands;
|
||||
import org.springframework.data.redis.connection.stream.StreamInfo;
|
||||
import org.springframework.data.redis.connection.stream.*;
|
||||
import org.springframework.util.Assert;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.time.Duration;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveStreamCommands extends RedissonBaseReactive implements ReactiveStreamCommands {
|
||||
|
||||
RedissonReactiveStreamCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
private static List<String> toStringList(List<RecordId> recordIds) {
|
||||
return recordIds.stream().map(RecordId::getValue).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<XClaimCommand, Flux<RecordId>>> xClaimJustId(Publisher<XClaimCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getGroupName(), "Group name must not be null!");
|
||||
Assert.notNull(command.getNewOwner(), "NewOwner must not be null!");
|
||||
Assert.notEmpty(command.getOptions().getIds(), "Ids collection must not be empty!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
params.add(k);
|
||||
params.add(command.getGroupName());
|
||||
params.add(command.getNewOwner());
|
||||
params.add(Objects.requireNonNull(command.getOptions().getIdleTime()).toMillis());
|
||||
params.addAll(Arrays.asList(command.getOptions().getIdsAsStringArray()));
|
||||
params.add("JUSTID");
|
||||
|
||||
Mono<Map<StreamMessageId, Map<byte[], byte[]>>> m = write(k, ByteArrayCodec.INSTANCE, RedisCommands.XCLAIM, params.toArray());
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, Flux.fromStream(v.entrySet().stream()).map(e -> {
|
||||
return RecordId.of(e.getKey().toString());
|
||||
})));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<XClaimCommand, Flux<ByteBufferRecord>>> xClaim(Publisher<XClaimCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getGroupName(), "Group name must not be null!");
|
||||
Assert.notNull(command.getNewOwner(), "NewOwner must not be null!");
|
||||
Assert.notEmpty(command.getOptions().getIds(), "Ids collection must not be empty!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
params.add(k);
|
||||
params.add(command.getGroupName());
|
||||
params.add(command.getNewOwner());
|
||||
params.add(Objects.requireNonNull(command.getOptions().getIdleTime()).toMillis());
|
||||
params.addAll(Arrays.asList(command.getOptions().getIdsAsStringArray()));
|
||||
|
||||
Mono<Map<StreamMessageId, Map<byte[], byte[]>>> m = write(k, ByteArrayCodec.INSTANCE, RedisCommands.XCLAIM, params.toArray());
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, Flux.fromStream(v.entrySet().stream()).map(e -> {
|
||||
Map<ByteBuffer, ByteBuffer> map = e.getValue().entrySet().stream()
|
||||
.collect(Collectors.toMap(entry -> ByteBuffer.wrap(entry.getKey()),
|
||||
entry -> ByteBuffer.wrap(entry.getValue())));
|
||||
return StreamRecords.newRecord()
|
||||
.in(command.getKey())
|
||||
.withId(RecordId.of(e.getKey().toString()))
|
||||
.ofBuffer(map);
|
||||
})));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<PendingRecordsCommand, PendingMessagesSummary>> xPendingSummary(Publisher<PendingRecordsCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getGroupName(), "Group name must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
|
||||
Mono<PendingResult> m = write(k, StringCodec.INSTANCE, RedisCommands.XPENDING, k, command.getGroupName());
|
||||
return m.map(v -> {
|
||||
Range<String> range = Range.open(v.getLowestId().toString(), v.getHighestId().toString());
|
||||
PendingMessagesSummary s = new PendingMessagesSummary(command.getGroupName(), v.getTotal(), range, v.getConsumerNames());
|
||||
return new ReactiveRedisConnection.CommandResponse<>(command, s);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<PendingRecordsCommand, PendingMessages>> xPending(Publisher<PendingRecordsCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getGroupName(), "Group name must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
params.add(k);
|
||||
|
||||
params.add(((Range.Bound<String>)command.getRange().getLowerBound()).getValue().orElse("-"));
|
||||
params.add(((Range.Bound<String>)command.getRange().getUpperBound()).getValue().orElse("+"));
|
||||
|
||||
if (command.getCount() != null) {
|
||||
params.add(command.getCount());
|
||||
}
|
||||
if (command.getConsumerName() != null) {
|
||||
params.add(command.getConsumerName());
|
||||
}
|
||||
|
||||
Mono<List<PendingEntry>> m = write(k, StringCodec.INSTANCE, RedisCommands.XPENDING_ENTRIES, params.toArray());
|
||||
return m.map(list -> {
|
||||
List<PendingMessage> msgs = list.stream().map(v -> new PendingMessage(RecordId.of(v.getId().toString()),
|
||||
Consumer.from(command.getGroupName(), v.getConsumerName()),
|
||||
Duration.of(v.getIdleTime(), ChronoUnit.MILLIS),
|
||||
v.getLastTimeDelivered())).collect(Collectors.toList());
|
||||
PendingMessages s = new PendingMessages(command.getGroupName(), command.getRange(), msgs);
|
||||
return new ReactiveRedisConnection.CommandResponse<>(command, s);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<org.redisson.api.StreamInfo<Object, Object>> XINFO_STREAM = new RedisCommand<>("XINFO", "STREAM",
|
||||
new ListMultiDecoder2(
|
||||
new StreamInfoDecoder(),
|
||||
new ObjectDecoder(StringCodec.INSTANCE.getValueDecoder()),
|
||||
new ObjectMapDecoder(false)));
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<XInfoCommand, StreamInfo.XInfoStream>> xInfo(Publisher<XInfoCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
|
||||
Mono<org.redisson.api.StreamInfo<byte[], byte[]>> m = write(k, ByteArrayCodec.INSTANCE, XINFO_STREAM, k);
|
||||
return m.map(i -> {
|
||||
|
||||
Map<String, Object> res = new HashMap<>();
|
||||
res.put("length", (long) i.getLength());
|
||||
res.put("first-entry", i.getFirstEntry().getData());
|
||||
res.put("last-entry", i.getLastEntry().getData());
|
||||
res.put("radix-tree-keys", i.getRadixTreeKeys());
|
||||
res.put("radix-tree-nodes", i.getRadixTreeNodes());
|
||||
res.put("groups", (long) i.getGroups());
|
||||
res.put("last-generated-id", i.getLastGeneratedId().toString());
|
||||
|
||||
List<Object> list = res.entrySet().stream()
|
||||
.flatMap(e -> Stream.of(e.getKey(), e.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
return new ReactiveRedisConnection.CommandResponse<>(command, StreamInfo.XInfoStream.fromList(list));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<XInfoCommand, Flux<StreamInfo.XInfoGroup>>> xInfoGroups(Publisher<XInfoCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
|
||||
Mono<List<StreamGroup>> m = write(k, StringCodec.INSTANCE, RedisCommands.XINFO_GROUPS, k);
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, Flux.fromStream(v.stream()).map(r -> {
|
||||
Map<String, Object> res = new HashMap<>();
|
||||
res.put("name", r.getName());
|
||||
res.put("consumers", (long) r.getConsumers());
|
||||
res.put("pending", (long) r.getPending());
|
||||
res.put("last-delivered-id", r.getLastDeliveredId().toString());
|
||||
|
||||
List<Object> list = res.entrySet().stream()
|
||||
.flatMap(e -> Stream.of(e.getKey(), e.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
return StreamInfo.XInfoGroup.fromList(list);
|
||||
})));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<XInfoCommand, Flux<StreamInfo.XInfoConsumer>>> xInfoConsumers(Publisher<XInfoCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getGroupName(), "Group name must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
|
||||
Mono<List<StreamConsumer>> m = write(k, StringCodec.INSTANCE, RedisCommands.XINFO_CONSUMERS, k, command.getGroupName());
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, Flux.fromStream(v.stream()).map(r -> {
|
||||
Map<String, Object> res = new HashMap<>();
|
||||
res.put("name", r.getName());
|
||||
res.put("idle", r.getIdleTime());
|
||||
res.put("pending", (long) r.getPending());
|
||||
|
||||
List<Object> list = res.entrySet().stream()
|
||||
.flatMap(e -> Stream.of(e.getKey(), e.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
return new StreamInfo.XInfoConsumer(command.getGroupName(), list);
|
||||
})));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.NumericResponse<AcknowledgeCommand, Long>> xAck(Publisher<AcknowledgeCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getGroup(), "Group must not be null!");
|
||||
Assert.notNull(command.getRecordIds(), "recordIds must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
params.add(k);
|
||||
params.add(command.getGroup());
|
||||
params.addAll(toStringList(command.getRecordIds()));
|
||||
|
||||
Mono<Long> m = write(k, StringCodec.INSTANCE, RedisCommands.XACK, params.toArray());
|
||||
return m.map(v -> new ReactiveRedisConnection.NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<AddStreamRecord, RecordId>> xAdd(Publisher<AddStreamRecord> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getBody(), "Body must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
List<Object> params = new LinkedList<>();
|
||||
params.add(k);
|
||||
|
||||
if (command.getMaxlen() != null) {
|
||||
params.add("MAXLEN");
|
||||
params.add(command.getMaxlen());
|
||||
}
|
||||
|
||||
if (!command.getRecord().getId().shouldBeAutoGenerated()) {
|
||||
params.add(command.getRecord().getId().getValue());
|
||||
} else {
|
||||
params.add("*");
|
||||
}
|
||||
|
||||
for (Map.Entry<ByteBuffer, ByteBuffer> entry : command.getBody().entrySet()) {
|
||||
params.add(toByteArray(entry.getKey()));
|
||||
params.add(toByteArray(entry.getValue()));
|
||||
}
|
||||
|
||||
Mono<StreamMessageId> m = write(k, StringCodec.INSTANCE, RedisCommands.XADD, params.toArray());
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, RecordId.of(v.toString())));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<DeleteCommand, Long>> xDel(Publisher<DeleteCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRecordIds(), "recordIds must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
List<Object> params = new ArrayList<>();
|
||||
params.add(k);
|
||||
params.addAll(toStringList(command.getRecordIds()));
|
||||
|
||||
Mono<Long> m = write(k, StringCodec.INSTANCE, RedisCommands.XDEL, params.toArray());
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.NumericResponse<ReactiveRedisConnection.KeyCommand, Long>> xLen(Publisher<ReactiveRedisConnection.KeyCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
|
||||
Mono<Long> m = write(k, StringCodec.INSTANCE, RedisCommands.XLEN, k);
|
||||
return m.map(v -> new ReactiveRedisConnection.NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<RangeCommand, Flux<ByteBufferRecord>>> xRange(Publisher<RangeCommand> publisher) {
|
||||
return range(RedisCommands.XRANGE, publisher);
|
||||
}
|
||||
|
||||
private Flux<ReactiveRedisConnection.CommandResponse<RangeCommand, Flux<ByteBufferRecord>>> range(RedisCommand<?> rangeCommand, Publisher<RangeCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
Assert.notNull(command.getLimit(), "Limit must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
|
||||
List<Object> params = new LinkedList<>();
|
||||
params.add(k);
|
||||
|
||||
if (rangeCommand == RedisCommands.XRANGE) {
|
||||
params.add(command.getRange().getLowerBound().getValue().orElse("-"));
|
||||
params.add(command.getRange().getUpperBound().getValue().orElse("+"));
|
||||
} else {
|
||||
params.add(command.getRange().getUpperBound().getValue().orElse("+"));
|
||||
params.add(command.getRange().getLowerBound().getValue().orElse("-"));
|
||||
}
|
||||
|
||||
|
||||
if (command.getLimit().getCount() > 0) {
|
||||
params.add("COUNT");
|
||||
params.add(command.getLimit().getCount());
|
||||
}
|
||||
|
||||
Mono<Map<StreamMessageId, Map<byte[], byte[]>>> m = write(k, ByteArrayCodec.INSTANCE, rangeCommand, params.toArray());
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, Flux.fromStream(v.entrySet().stream()).map(e -> {
|
||||
Map<ByteBuffer, ByteBuffer> map = e.getValue().entrySet().stream()
|
||||
.collect(Collectors.toMap(entry -> ByteBuffer.wrap(entry.getKey()),
|
||||
entry -> ByteBuffer.wrap(entry.getValue())));
|
||||
return StreamRecords.newRecord()
|
||||
.in(command.getKey())
|
||||
.withId(RecordId.of(e.getKey().toString()))
|
||||
.ofBuffer(map);
|
||||
})));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<ReadCommand, Flux<ByteBufferRecord>>> read(Publisher<ReadCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getStreamOffsets(), "StreamOffsets must not be null!");
|
||||
Assert.notNull(command.getReadOptions(), "ReadOptions must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
|
||||
if (command.getConsumer() != null) {
|
||||
params.add("GROUP");
|
||||
params.add(command.getConsumer().getGroup());
|
||||
params.add(command.getConsumer().getName());
|
||||
}
|
||||
|
||||
if (command.getReadOptions().getCount() != null && command.getReadOptions().getCount() > 0) {
|
||||
params.add("COUNT");
|
||||
params.add(command.getReadOptions().getCount());
|
||||
}
|
||||
|
||||
if (command.getReadOptions().getBlock() != null && command.getReadOptions().getBlock() > 0) {
|
||||
params.add("BLOCK");
|
||||
params.add(command.getReadOptions().getBlock());
|
||||
}
|
||||
|
||||
if (command.getConsumer() != null && command.getReadOptions().isNoack()) {
|
||||
params.add("NOACK");
|
||||
}
|
||||
|
||||
params.add("STREAMS");
|
||||
for (StreamOffset<ByteBuffer> streamOffset : command.getStreamOffsets()) {
|
||||
params.add(toByteArray(streamOffset.getKey()));
|
||||
}
|
||||
|
||||
for (StreamOffset<ByteBuffer> streamOffset : command.getStreamOffsets()) {
|
||||
params.add(streamOffset.getOffset().getOffset());
|
||||
}
|
||||
|
||||
Mono<Map<String, Map<StreamMessageId, Map<byte[], byte[]>>>> m;
|
||||
|
||||
if (command.getConsumer() == null) {
|
||||
if (command.getReadOptions().getBlock() != null && command.getReadOptions().getBlock() > 0) {
|
||||
m = read(toByteArray(command.getStreamOffsets().get(0).getKey()), ByteArrayCodec.INSTANCE, RedisCommands.XREAD_BLOCKING, params.toArray());
|
||||
} else {
|
||||
m = read(toByteArray(command.getStreamOffsets().get(0).getKey()), ByteArrayCodec.INSTANCE, RedisCommands.XREAD, params.toArray());
|
||||
}
|
||||
} else {
|
||||
if (command.getReadOptions().getBlock() != null && command.getReadOptions().getBlock() > 0) {
|
||||
m = read(toByteArray(command.getStreamOffsets().get(0).getKey()), ByteArrayCodec.INSTANCE, RedisCommands.XREADGROUP_BLOCKING, params.toArray());
|
||||
} else {
|
||||
m = read(toByteArray(command.getStreamOffsets().get(0).getKey()), ByteArrayCodec.INSTANCE, RedisCommands.XREADGROUP, params.toArray());
|
||||
}
|
||||
}
|
||||
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, Flux.fromStream(v.entrySet().stream())
|
||||
.map(ee -> {
|
||||
return ee.getValue().entrySet().stream().map(e -> {
|
||||
Map<ByteBuffer, ByteBuffer> map = e.getValue().entrySet().stream()
|
||||
.collect(Collectors.toMap(entry -> ByteBuffer.wrap(entry.getKey()),
|
||||
entry -> ByteBuffer.wrap(entry.getValue())));
|
||||
return StreamRecords.newRecord()
|
||||
.in(ee.getKey())
|
||||
.withId(RecordId.of(e.getKey().toString()))
|
||||
.ofBuffer(map);
|
||||
});
|
||||
}).flatMap(Flux::fromStream)
|
||||
));
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<String> XGROUP_STRING = new RedisStrictCommand<>("XGROUP");
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<GroupCommand, String>> xGroup(Publisher<GroupCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getGroupName(), "GroupName must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
|
||||
if (command.getAction().equals(GroupCommand.GroupCommandAction.CREATE)) {
|
||||
Assert.notNull(command.getReadOffset(), "ReadOffset must not be null!");
|
||||
|
||||
Mono<String> m = write(k, StringCodec.INSTANCE, XGROUP_STRING, "CREATE", k, command.getGroupName(), command.getReadOffset().getOffset(), "MKSTREAM");
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, v));
|
||||
}
|
||||
|
||||
if (command.getAction().equals(GroupCommand.GroupCommandAction.DELETE_CONSUMER)) {
|
||||
Assert.notNull(command.getConsumerName(), "ConsumerName must not be null!");
|
||||
|
||||
Mono<Long> m = write(k, StringCodec.INSTANCE, RedisCommands.XGROUP_LONG, "DELCONSUMER", k, command.getGroupName(), command.getConsumerName());
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, v > 0 ? "OK" : "Error"));
|
||||
}
|
||||
|
||||
if (command.getAction().equals(GroupCommand.GroupCommandAction.DESTROY)) {
|
||||
Mono<Long> m = write(k, StringCodec.INSTANCE, RedisCommands.XGROUP_LONG, "DESTROY", k, command.getGroupName());
|
||||
return m.map(v -> new ReactiveRedisConnection.CommandResponse<>(command, v > 0 ? "OK" : "Error"));
|
||||
}
|
||||
|
||||
throw new IllegalArgumentException("unknown command " + command.getAction());
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.CommandResponse<RangeCommand, Flux<ByteBufferRecord>>> xRevRange(Publisher<RangeCommand> publisher) {
|
||||
return range(RedisCommands.XREVRANGE, publisher);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.NumericResponse<ReactiveRedisConnection.KeyCommand, Long>> xTrim(Publisher<TrimCommand> publisher) {
|
||||
return execute(publisher, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getCount(), "Count must not be null!");
|
||||
|
||||
byte[] k = toByteArray(command.getKey());
|
||||
|
||||
Mono<Long> m = write(k, StringCodec.INSTANCE, RedisCommands.XTRIM, k, "MAXLEN", command.getCount());
|
||||
return m.map(v -> new ReactiveRedisConnection.NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
}
|
@ -0,0 +1,459 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.client.protocol.convertor.BooleanReplayConvertor;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.springframework.data.domain.Range;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.AbsentByteBufferResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.ByteBufferResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.KeyCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.MultiValueResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.RangeCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveStringCommands;
|
||||
import org.springframework.data.redis.connection.RedisStringCommands.BitOperation;
|
||||
import org.springframework.data.redis.connection.RedisStringCommands.SetOption;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveStringCommands extends RedissonBaseReactive implements ReactiveStringCommands {
|
||||
|
||||
RedissonReactiveStringCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
private static final RedisCommand<Boolean> SET = new RedisCommand<Boolean>("SET", new BooleanReplayConvertor());
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<SetCommand>> set(Publisher<SetCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
|
||||
byte[] key = toByteArray(command.getKey());
|
||||
byte[] value = toByteArray(command.getValue());
|
||||
|
||||
Mono<Boolean> m = Mono.empty();
|
||||
|
||||
if (!command.getExpiration().isPresent()) {
|
||||
m = write(key, StringCodec.INSTANCE, SET, key, value);
|
||||
} else if (command.getExpiration().get().isPersistent()) {
|
||||
if (!command.getOption().isPresent() || command.getOption().get() == SetOption.UPSERT) {
|
||||
m = write(key, StringCodec.INSTANCE, SET, key, value);
|
||||
}
|
||||
if (command.getOption().get() == SetOption.SET_IF_ABSENT) {
|
||||
m = write(key, StringCodec.INSTANCE, SET, key, value, "NX");
|
||||
}
|
||||
if (command.getOption().get() == SetOption.SET_IF_PRESENT) {
|
||||
m = write(key, StringCodec.INSTANCE, SET, key, value, "XX");
|
||||
}
|
||||
} else {
|
||||
if (!command.getOption().isPresent() || command.getOption().get() == SetOption.UPSERT) {
|
||||
m = write(key, StringCodec.INSTANCE, SET, key, value, "PX", command.getExpiration().get().getExpirationTimeInMilliseconds());
|
||||
}
|
||||
if (command.getOption().get() == SetOption.SET_IF_ABSENT) {
|
||||
m = write(key, StringCodec.INSTANCE, SET, key, value, "PX", command.getExpiration().get().getExpirationTimeInMilliseconds(), "NX");
|
||||
}
|
||||
if (command.getOption().get() == SetOption.SET_IF_PRESENT) {
|
||||
m = write(key, StringCodec.INSTANCE, SET, key, value, "PX", command.getExpiration().get().getExpirationTimeInMilliseconds(), "XX");
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
}
|
||||
}
|
||||
return m.map(v -> new BooleanResponse<>(command, v))
|
||||
.switchIfEmpty(Mono.just(new BooleanResponse<>(command, Boolean.FALSE)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<KeyCommand>> get(Publisher<KeyCommand> keys) {
|
||||
return execute(keys, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<byte[]> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.GET, keyBuf);
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)))
|
||||
.defaultIfEmpty(new AbsentByteBufferResponse<>(command));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<SetCommand>> getSet(Publisher<SetCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
|
||||
if (command.getExpiration().isPresent() || command.getOption().isPresent()) {
|
||||
throw new IllegalArgumentException("Command must not define expiration nor option for GETSET.");
|
||||
}
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
|
||||
Mono<byte[]> m = write(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.GETSET, keyBuf, valueBuf);
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<MultiValueResponse<List<ByteBuffer>, ByteBuffer>> mGet(Publisher<List<ByteBuffer>> keysets) {
|
||||
return execute(keysets, coll -> {
|
||||
|
||||
Assert.notNull(coll, "List must not be null!");
|
||||
|
||||
Object[] params = coll.stream().map(buf -> toByteArray(buf)).toArray(Object[]::new);
|
||||
|
||||
Mono<List<byte[]>> m = read(null, ByteArrayCodec.INSTANCE, RedisCommands.MGET, params);
|
||||
return m.map(v -> {
|
||||
List<ByteBuffer> values = v.stream().map(array -> {
|
||||
if (array == null) {
|
||||
return ByteBuffer.allocate(0);
|
||||
}
|
||||
return ByteBuffer.wrap(array);
|
||||
}).collect(Collectors.toList());
|
||||
return new MultiValueResponse<>(coll, values);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<SetCommand>> setNX(Publisher<SetCommand> values) {
|
||||
return execute(values, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
|
||||
if (command.getExpiration().isPresent() || command.getOption().isPresent()) {
|
||||
throw new IllegalArgumentException("Command must not define expiration nor option for GETSET.");
|
||||
}
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.SETNX, keyBuf, valueBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Boolean> SETEX = new RedisCommand<Boolean>("SETEX", new BooleanReplayConvertor());
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<SetCommand>> setEX(Publisher<SetCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
|
||||
if (!command.getExpiration().isPresent()) {
|
||||
throw new IllegalArgumentException("Expiration must not be null!");
|
||||
}
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, SETEX,
|
||||
keyBuf, command.getExpiration().get().getExpirationTimeInSeconds(), valueBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<String> PSETEX = new RedisCommand<String>("PSETEX");
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<SetCommand>> pSetEX(Publisher<SetCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
|
||||
if (!command.getExpiration().isPresent()) {
|
||||
throw new IllegalArgumentException("Expiration must not be null!");
|
||||
}
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
|
||||
Mono<String> m = write(keyBuf, StringCodec.INSTANCE, PSETEX,
|
||||
keyBuf, command.getExpiration().get().getExpirationTimeInMilliseconds(), valueBuf);
|
||||
return m.map(v -> new BooleanResponse<>(command, true));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Boolean> MSET = new RedisCommand<Boolean>("MSET", new BooleanReplayConvertor());
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<MSetCommand>> mSet(Publisher<MSetCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKeyValuePairs(), "KeyValuePairs must not be null!");
|
||||
|
||||
List<byte[]> params = convert(command);
|
||||
|
||||
Mono<Boolean> m = write(params.get(0), StringCodec.INSTANCE, MSET, params.toArray());
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
protected List<byte[]> convert(MSetCommand command) {
|
||||
List<byte[]> params = new ArrayList<byte[]>(command.getKeyValuePairs().size());
|
||||
command.getKeyValuePairs().entrySet().forEach(e -> {
|
||||
byte[] keyBuf = toByteArray(e.getKey());
|
||||
byte[] valueBuf = toByteArray(e.getValue());
|
||||
params.add(keyBuf);
|
||||
params.add(valueBuf);
|
||||
});
|
||||
return params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<MSetCommand>> mSetNX(Publisher<MSetCommand> source) {
|
||||
return execute(source, command -> {
|
||||
|
||||
Assert.notNull(command.getKeyValuePairs(), "KeyValuePairs must not be null!");
|
||||
|
||||
List<byte[]> params = convert(command);
|
||||
|
||||
Mono<Boolean> m = write(params.get(0), StringCodec.INSTANCE, RedisCommands.MSETNX, params.toArray());
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> APPEND = new RedisStrictCommand<Long>("APPEND");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<AppendCommand, Long>> append(Publisher<AppendCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, APPEND, keyBuf, valueBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Object> GETRANGE = new RedisCommand<Object>("GETRANGE");
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<RangeCommand>> getRange(Publisher<RangeCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<byte[]> m = read(keyBuf, ByteArrayCodec.INSTANCE, GETRANGE,
|
||||
keyBuf, command.getRange().getLowerBound().getValue().orElse(0L),
|
||||
command.getRange().getUpperBound().getValue().orElse(-1L));
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Long> SETRANGE = new RedisCommand<Long>("SETRANGE");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<SetRangeCommand, Long>> setRange(Publisher<SetRangeCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
Assert.notNull(command.getOffset(), "Offset must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, SETRANGE, keyBuf, command.getOffset(), valueBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<GetBitCommand>> getBit(Publisher<GetBitCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getOffset(), "Offset must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Boolean> m = read(keyBuf, StringCodec.INSTANCE, RedisCommands.GETBIT, keyBuf, command.getOffset());
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BooleanResponse<SetBitCommand>> setBit(Publisher<SetBitCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getOffset(), "Offset must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.SETBIT, keyBuf, command.getOffset(), command.getValue() ? 1 : 0);
|
||||
return m.map(v -> new BooleanResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
public Flux<NumericResponse<BitCountCommand, Long>> bitCount(Publisher<BitCountCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
Range<Long> range = command.getRange();
|
||||
if (range == null) {
|
||||
range = Range.unbounded();
|
||||
}
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m;
|
||||
if (range == Range.<Long>unbounded()) {
|
||||
m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.BITCOUNT, keyBuf);
|
||||
} else {
|
||||
m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.BITCOUNT,
|
||||
keyBuf, range.getLowerBound().getValue().orElse(0L),
|
||||
range.getUpperBound().getValue().get());
|
||||
}
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<MultiValueResponse<BitFieldCommand, Long>> bitField(Publisher<BitFieldCommand> commands) {
|
||||
return null;
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> BITOP = new RedisStrictCommand<Long>("BITOP");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<BitOpCommand, Long>> bitOp(Publisher<BitOpCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getDestinationKey(), "DestinationKey must not be null!");
|
||||
Assert.notEmpty(command.getKeys(), "Keys must not be null or empty");
|
||||
|
||||
if (command.getBitOp() == BitOperation.NOT && command.getKeys().size() > 1) {
|
||||
throw new UnsupportedOperationException("NOT operation doesn't support more than single source key");
|
||||
}
|
||||
|
||||
List<Object> params = new ArrayList<Object>(command.getKeys().size() + 2);
|
||||
params.add(command.getBitOp());
|
||||
params.add(toByteArray(command.getDestinationKey()));
|
||||
params.addAll(command.getKeys().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
|
||||
Mono<Long> m = write(toByteArray(command.getDestinationKey()), StringCodec.INSTANCE, BITOP, params.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> BITPOS = new RedisStrictCommand<Long>("BITPOS");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<BitPosCommand, Long>> bitPos(Publisher<BitPosCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
params.add(toByteArray(command.getKey()));
|
||||
params.add(command.getBit() ? 1 : 0);
|
||||
|
||||
if (command.getRange() != null) {
|
||||
if (command.getRange().getLowerBound().getValue().isPresent()) {
|
||||
params.add(command.getRange().getLowerBound().getValue().get());
|
||||
}
|
||||
if (command.getRange().getUpperBound().getValue().isPresent()) {
|
||||
if (!command.getRange().getLowerBound().getValue().isPresent()) {
|
||||
throw new IllegalArgumentException("LowerBound must not be null");
|
||||
}
|
||||
params.add(command.getRange().getUpperBound().getValue().get());
|
||||
}
|
||||
}
|
||||
|
||||
Mono<Long> m = read((byte[])params.get(0), StringCodec.INSTANCE, BITPOS, params.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> strLen(Publisher<KeyCommand> keys) {
|
||||
return execute(keys, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, RedisCommands.STRLEN, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Object> GETDEL = new RedisCommand<>("GETDEL");
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<KeyCommand>> getDel(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<byte[]> m = write(keyBuf, ByteArrayCodec.INSTANCE, GETDEL, keyBuf);
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)))
|
||||
.defaultIfEmpty(new AbsentByteBufferResponse<>(command));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Object> GETEX = new RedisCommand<>("GETEX");
|
||||
|
||||
@Override
|
||||
public Flux<ByteBufferResponse<GetExCommand>> getEx(Publisher<GetExCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<byte[]> m = write(keyBuf, ByteArrayCodec.INSTANCE, GETEX, keyBuf,
|
||||
"PX", command.getExpiration().getExpirationTimeInMilliseconds());
|
||||
return m.map(v -> new ByteBufferResponse<>(command, ByteBuffer.wrap(v)))
|
||||
.defaultIfEmpty(new AbsentByteBufferResponse<>(command));
|
||||
});
|
||||
}
|
||||
}
|
@ -0,0 +1,313 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.client.BaseRedisPubSubListener;
|
||||
import org.redisson.client.ChannelName;
|
||||
import org.redisson.client.RedisPubSubListener;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.protocol.pubsub.PubSubType;
|
||||
import org.redisson.connection.ConnectionManager;
|
||||
import org.redisson.pubsub.PubSubConnectionEntry;
|
||||
import org.redisson.pubsub.PublishSubscribeService;
|
||||
import org.springframework.data.redis.connection.ReactiveSubscription;
|
||||
import org.springframework.data.redis.connection.SubscriptionListener;
|
||||
import reactor.core.Disposable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.*;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveSubscription implements ReactiveSubscription {
|
||||
|
||||
public static class ListenableCounter {
|
||||
|
||||
private int state;
|
||||
private Runnable r;
|
||||
|
||||
public synchronized void acquire() {
|
||||
state++;
|
||||
}
|
||||
|
||||
public void release() {
|
||||
synchronized (this) {
|
||||
state--;
|
||||
if (state != 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (r != null) {
|
||||
r.run();
|
||||
r = null;
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void addListener(Runnable r) {
|
||||
synchronized (this) {
|
||||
if (state != 0) {
|
||||
this.r = r;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
r.run();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final Map<ChannelName, PubSubConnectionEntry> channels = new ConcurrentHashMap<>();
|
||||
private final Map<ChannelName, Collection<PubSubConnectionEntry>> patterns = new ConcurrentHashMap<>();
|
||||
|
||||
private final ListenableCounter monosListener = new ListenableCounter();
|
||||
|
||||
private final RedisPubSubListener subscriptionListener;
|
||||
private final PublishSubscribeService subscribeService;
|
||||
|
||||
public RedissonReactiveSubscription(ConnectionManager connectionManager, SubscriptionListener subscriptionListener) {
|
||||
this.subscribeService = connectionManager.getSubscribeService();
|
||||
this.subscriptionListener = new RedisPubSubListener() {
|
||||
|
||||
@Override
|
||||
public boolean onStatus(PubSubType type, CharSequence channel) {
|
||||
if (type == PubSubType.SUBSCRIBE) {
|
||||
subscriptionListener.onChannelSubscribed(channel.toString().getBytes(StandardCharsets.UTF_8), 1L);
|
||||
} else if (type == PubSubType.PSUBSCRIBE) {
|
||||
subscriptionListener.onPatternSubscribed(channel.toString().getBytes(StandardCharsets.UTF_8), 1L);
|
||||
} else if (type == PubSubType.UNSUBSCRIBE) {
|
||||
subscriptionListener.onChannelUnsubscribed(channel.toString().getBytes(StandardCharsets.UTF_8), 1L);
|
||||
} else if (type == PubSubType.PUNSUBSCRIBE) {
|
||||
subscriptionListener.onPatternUnsubscribed(channel.toString().getBytes(StandardCharsets.UTF_8), 1L);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPatternMessage(CharSequence pattern, CharSequence channel, Object message) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onMessage(CharSequence channel, Object msg) {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> subscribe(ByteBuffer... channels) {
|
||||
monosListener.acquire();
|
||||
return Mono.defer(() -> {
|
||||
List<CompletableFuture<?>> futures = new ArrayList<>();
|
||||
for (ByteBuffer channel : channels) {
|
||||
ChannelName cn = toChannelName(channel);
|
||||
CompletableFuture<PubSubConnectionEntry> f = subscribeService.subscribe(ByteArrayCodec.INSTANCE, cn, subscriptionListener);
|
||||
f = f.whenComplete((res, e) -> RedissonReactiveSubscription.this.channels.put(cn, res));
|
||||
futures.add(f);
|
||||
}
|
||||
|
||||
CompletableFuture<Void> future = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
|
||||
future = future.whenComplete((r, e) -> {
|
||||
monosListener.release();
|
||||
});
|
||||
return Mono.fromFuture(future);
|
||||
});
|
||||
}
|
||||
|
||||
protected ChannelName toChannelName(ByteBuffer channel) {
|
||||
return new ChannelName(RedissonBaseReactive.toByteArray(channel));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> pSubscribe(ByteBuffer... patterns) {
|
||||
monosListener.acquire();
|
||||
return Mono.defer(() -> {
|
||||
List<CompletableFuture<?>> futures = new ArrayList<>();
|
||||
for (ByteBuffer channel : patterns) {
|
||||
ChannelName cn = toChannelName(channel);
|
||||
CompletableFuture<Collection<PubSubConnectionEntry>> f = subscribeService.psubscribe(cn, ByteArrayCodec.INSTANCE, subscriptionListener);
|
||||
f = f.whenComplete((res, e) -> RedissonReactiveSubscription.this.patterns.put(cn, res));
|
||||
futures.add(f);
|
||||
}
|
||||
|
||||
CompletableFuture<Void> future = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
|
||||
future = future.whenComplete((r, e) -> {
|
||||
monosListener.release();
|
||||
});
|
||||
return Mono.fromFuture(future);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> unsubscribe() {
|
||||
return unsubscribe(channels.keySet().stream().map(b -> ByteBuffer.wrap(b.getName())).distinct().toArray(ByteBuffer[]::new));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> unsubscribe(ByteBuffer... channels) {
|
||||
monosListener.acquire();
|
||||
return Mono.defer(() -> {
|
||||
List<CompletableFuture<?>> futures = new ArrayList<>(channels.length);
|
||||
for (ByteBuffer channel : channels) {
|
||||
ChannelName cn = toChannelName(channel);
|
||||
CompletableFuture<Codec> f = subscribeService.unsubscribe(cn, PubSubType.UNSUBSCRIBE);
|
||||
f = f.whenComplete((res, e) -> {
|
||||
synchronized (RedissonReactiveSubscription.this.channels) {
|
||||
PubSubConnectionEntry entry = RedissonReactiveSubscription.this.channels.get(cn);
|
||||
if (!entry.hasListeners(cn)) {
|
||||
RedissonReactiveSubscription.this.channels.remove(cn);
|
||||
}
|
||||
}
|
||||
});
|
||||
futures.add(f);
|
||||
}
|
||||
|
||||
CompletableFuture<Void> future = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
|
||||
future = future.whenComplete((r, e) -> {
|
||||
monosListener.release();
|
||||
});
|
||||
return Mono.fromFuture(future);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> pUnsubscribe() {
|
||||
return unsubscribe(patterns.keySet().stream().map(b -> ByteBuffer.wrap(b.getName())).distinct().toArray(ByteBuffer[]::new));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> pUnsubscribe(ByteBuffer... patterns) {
|
||||
monosListener.acquire();
|
||||
return Mono.defer(() -> {
|
||||
List<CompletableFuture<?>> futures = new ArrayList<>(patterns.length);
|
||||
for (ByteBuffer channel : patterns) {
|
||||
ChannelName cn = toChannelName(channel);
|
||||
CompletableFuture<Codec> f = subscribeService.unsubscribe(cn, PubSubType.PUNSUBSCRIBE);
|
||||
f = f.whenComplete((res, e) -> {
|
||||
synchronized (RedissonReactiveSubscription.this.patterns) {
|
||||
Collection<PubSubConnectionEntry> entries = RedissonReactiveSubscription.this.patterns.get(cn);
|
||||
entries.stream()
|
||||
.filter(en -> en.hasListeners(cn))
|
||||
.forEach(ee -> RedissonReactiveSubscription.this.patterns.remove(cn));
|
||||
}
|
||||
});
|
||||
futures.add(f);
|
||||
}
|
||||
|
||||
CompletableFuture<Void> future = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
|
||||
future = future.whenComplete((r, e) -> {
|
||||
monosListener.release();
|
||||
});
|
||||
return Mono.fromFuture(future);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<ByteBuffer> getChannels() {
|
||||
return channels.keySet().stream().map(b -> ByteBuffer.wrap(b.getName())).collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<ByteBuffer> getPatterns() {
|
||||
return patterns.keySet().stream().map(b -> ByteBuffer.wrap(b.getName())).collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
private final AtomicReference<Flux<Message<ByteBuffer, ByteBuffer>>> flux = new AtomicReference<>();
|
||||
private volatile Disposable disposable;
|
||||
|
||||
@Override
|
||||
public Flux<Message<ByteBuffer, ByteBuffer>> receive() {
|
||||
if (flux.get() != null) {
|
||||
return flux.get();
|
||||
}
|
||||
|
||||
Flux<Message<ByteBuffer, ByteBuffer>> f = Flux.create(emitter -> {
|
||||
emitter.onRequest(n -> {
|
||||
|
||||
monosListener.addListener(() -> {
|
||||
BaseRedisPubSubListener listener = new BaseRedisPubSubListener() {
|
||||
@Override
|
||||
public void onPatternMessage(CharSequence pattern, CharSequence channel, Object message) {
|
||||
if (!patterns.containsKey(new ChannelName(pattern.toString()))) {
|
||||
return;
|
||||
}
|
||||
|
||||
emitter.next(new PatternMessage<>(ByteBuffer.wrap(pattern.toString().getBytes()),
|
||||
ByteBuffer.wrap(channel.toString().getBytes()),
|
||||
ByteBuffer.wrap((byte[])message)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onMessage(CharSequence channel, Object msg) {
|
||||
if (!channels.containsKey(new ChannelName(channel.toString()))) {
|
||||
return;
|
||||
}
|
||||
|
||||
emitter.next(new ChannelMessage<>(ByteBuffer.wrap(channel.toString().getBytes()), ByteBuffer.wrap((byte[])msg)));
|
||||
}
|
||||
};
|
||||
|
||||
disposable = () -> {
|
||||
for (Entry<ChannelName, PubSubConnectionEntry> entry : channels.entrySet()) {
|
||||
entry.getValue().removeListener(entry.getKey(), listener);
|
||||
}
|
||||
for (Entry<ChannelName, Collection<PubSubConnectionEntry>> entry : patterns.entrySet()) {
|
||||
for (PubSubConnectionEntry pubSubConnectionEntry : entry.getValue()) {
|
||||
pubSubConnectionEntry.removeListener(entry.getKey(), listener);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
for (Entry<ChannelName, PubSubConnectionEntry> entry : channels.entrySet()) {
|
||||
entry.getValue().addListener(entry.getKey(), listener);
|
||||
}
|
||||
for (Entry<ChannelName, Collection<PubSubConnectionEntry>> entry : patterns.entrySet()) {
|
||||
for (PubSubConnectionEntry pubSubConnectionEntry : entry.getValue()) {
|
||||
pubSubConnectionEntry.addListener(entry.getKey(), listener);
|
||||
}
|
||||
}
|
||||
|
||||
emitter.onDispose(disposable);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
if (flux.compareAndSet(null, f)) {
|
||||
return f;
|
||||
}
|
||||
return flux.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> cancel() {
|
||||
return unsubscribe().then(pUnsubscribe()).then(Mono.fromRunnable(() -> {
|
||||
if (disposable != null) {
|
||||
disposable.dispose();
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
@ -0,0 +1,888 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.redisson.ScanResult;
|
||||
import org.redisson.api.RFuture;
|
||||
import org.redisson.client.RedisClient;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.DoubleCodec;
|
||||
import org.redisson.client.codec.LongCodec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.client.protocol.convertor.DoubleNullSafeReplayConvertor;
|
||||
import org.redisson.client.protocol.decoder.*;
|
||||
import org.redisson.reactive.CommandReactiveExecutor;
|
||||
import org.redisson.reactive.SetReactiveIterator;
|
||||
import org.springframework.data.domain.Range;
|
||||
import org.springframework.data.domain.Sort.Direction;
|
||||
import org.springframework.data.redis.connection.ReactiveListCommands;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.KeyCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.KeyScanCommand;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
|
||||
import org.springframework.data.redis.connection.ReactiveZSetCommands;
|
||||
import org.springframework.data.redis.connection.zset.DefaultTuple;
|
||||
import org.springframework.data.redis.connection.zset.Tuple;
|
||||
import org.springframework.util.Assert;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.redisson.client.protocol.RedisCommands.ZRANDMEMBER;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonReactiveZSetCommands extends RedissonBaseReactive implements ReactiveZSetCommands {
|
||||
|
||||
RedissonReactiveZSetCommands(CommandReactiveExecutor executorService) {
|
||||
super(executorService);
|
||||
}
|
||||
|
||||
private static final RedisCommand<Double> ZADD_FLOAT = new RedisCommand<>("ZADD", new DoubleNullSafeReplayConvertor());
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZAddCommand, Number>> zAdd(Publisher<ZAddCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notEmpty(command.getTuples(), "Tuples must not be empty or null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
List<Object> params = new ArrayList<Object>(command.getTuples().size()*2+1);
|
||||
params.add(keyBuf);
|
||||
if (command.isIncr() || command.isUpsert() || command.isReturnTotalChanged()) {
|
||||
if (command.isUpsert()) {
|
||||
params.add("NX");
|
||||
} else {
|
||||
params.add("XX");
|
||||
}
|
||||
if (command.isReturnTotalChanged()) {
|
||||
params.add("CH");
|
||||
}
|
||||
if (command.isIncr()) {
|
||||
params.add("INCR");
|
||||
}
|
||||
}
|
||||
|
||||
for (Tuple entry : command.getTuples()) {
|
||||
params.add(BigDecimal.valueOf(entry.getScore()).toPlainString());
|
||||
params.add(entry.getValue());
|
||||
}
|
||||
|
||||
Mono<Number> m;
|
||||
if (command.isIncr()) {
|
||||
m = write(keyBuf, DoubleCodec.INSTANCE, ZADD_FLOAT, params.toArray());
|
||||
} else {
|
||||
m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.ZADD, params.toArray());
|
||||
}
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZRemCommand, Long>> zRem(Publisher<ZRemCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValues(), "Values must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<Object>(command.getValues().size() + 1);
|
||||
args.add(toByteArray(command.getKey()));
|
||||
args.addAll(command.getValues().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
|
||||
|
||||
Mono<Long> m = write((byte[])args.get(0), StringCodec.INSTANCE, RedisCommands.ZREM_LONG, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZIncrByCommand, Double>> zIncrBy(Publisher<ZIncrByCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Member must not be null!");
|
||||
Assert.notNull(command.getIncrement(), "Increment value must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
Mono<Double> m = write(keyBuf, DoubleCodec.INSTANCE, RedisCommands.ZINCRBY, keyBuf, new BigDecimal(command.getIncrement().doubleValue()).toPlainString(), valueBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZRankCommand, Long>> zRank(Publisher<ZRankCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Member must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
RedisCommand<Long> cmd = RedisCommands.ZRANK;
|
||||
if (command.getDirection() == Direction.DESC) {
|
||||
cmd = RedisCommands.ZREVRANK;
|
||||
}
|
||||
Mono<Long> m = read(keyBuf, DoubleCodec.INSTANCE, cmd, keyBuf, valueBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Set<Tuple>> ZRANGE_ENTRY = new RedisCommand<Set<Tuple>>("ZRANGE", new ScoredSortedSetReplayDecoder());
|
||||
private static final RedisCommand<Set<Object>> ZRANGE = new RedisCommand<Set<Object>>("ZRANGE", new ObjectSetReplayDecoder<Object>());
|
||||
private static final RedisCommand<Set<Tuple>> ZREVRANGE_ENTRY = new RedisCommand<Set<Tuple>>("ZREVRANGE", new ScoredSortedSetReplayDecoder());
|
||||
private static final RedisCommand<Set<Object>> ZREVRANGE = new RedisCommand<Set<Object>>("ZREVRANGE", new ObjectSetReplayDecoder<Object>());
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZRangeCommand, Flux<Tuple>>> zRange(Publisher<ZRangeCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
long start = command.getRange().getLowerBound().getValue().orElse(0L);
|
||||
long end = command.getRange().getUpperBound().getValue().get();
|
||||
|
||||
Flux<Tuple> flux;
|
||||
if (command.getDirection() == Direction.ASC) {
|
||||
if (command.isWithScores()) {
|
||||
Mono<Set<Tuple>> m = read(keyBuf, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY,
|
||||
keyBuf, start, end, "WITHSCORES");
|
||||
flux = m.flatMapMany(e -> Flux.fromIterable(e));
|
||||
} else {
|
||||
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, ZRANGE, keyBuf, start, end);
|
||||
flux = m.flatMapMany(e -> Flux.fromIterable(e).map(b -> new DefaultTuple(b, Double.NaN)));
|
||||
}
|
||||
} else {
|
||||
if (command.isWithScores()) {
|
||||
Mono<Set<Tuple>> m = read(keyBuf, ByteArrayCodec.INSTANCE, ZREVRANGE_ENTRY,
|
||||
keyBuf, start, end, "WITHSCORES");
|
||||
flux = m.flatMapMany(e -> Flux.fromIterable(e));
|
||||
} else {
|
||||
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, ZREVRANGE, keyBuf, start, end);
|
||||
flux = m.flatMapMany(e -> Flux.fromIterable(e).map(b -> new DefaultTuple(b, Double.NaN)));
|
||||
}
|
||||
}
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Set<Tuple>> ZRANGEBYSCORE = new RedisCommand<Set<Tuple>>("ZRANGEBYSCORE", new ScoredSortedSetReplayDecoder());
|
||||
private static final RedisCommand<Set<Tuple>> ZREVRANGEBYSCORE = new RedisCommand<Set<Tuple>>("ZREVRANGEBYSCORE", new ScoredSortedSetReplayDecoder());
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZRangeByScoreCommand, Flux<Tuple>>> zRangeByScore(
|
||||
Publisher<ZRangeByScoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
String start = toLowerBound(command.getRange());
|
||||
String end = toUpperBound(command.getRange());
|
||||
|
||||
List<Object> args = new ArrayList<Object>();
|
||||
args.add(keyBuf);
|
||||
if (command.getDirection() == Direction.ASC) {
|
||||
args.add(start);
|
||||
} else {
|
||||
args.add(end);
|
||||
}
|
||||
if (command.getDirection() == Direction.ASC) {
|
||||
args.add(end);
|
||||
} else {
|
||||
args.add(start);
|
||||
}
|
||||
if (command.isWithScores()) {
|
||||
args.add("WITHSCORES");
|
||||
}
|
||||
if (command.getLimit().isPresent() && !command.getLimit().get().isUnlimited()) {
|
||||
args.add("LIMIT");
|
||||
args.add(command.getLimit().get().getOffset());
|
||||
args.add(command.getLimit().get().getCount());
|
||||
}
|
||||
|
||||
Flux<Tuple> flux;
|
||||
if (command.getDirection() == Direction.ASC) {
|
||||
if (command.isWithScores()) {
|
||||
Mono<Set<Tuple>> m = read(keyBuf, ByteArrayCodec.INSTANCE, ZRANGEBYSCORE, args.toArray());
|
||||
flux = m.flatMapMany(e -> Flux.fromIterable(e));
|
||||
} else {
|
||||
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, args.toArray());
|
||||
flux = m.flatMapMany(e -> Flux.fromIterable(e).map(b -> new DefaultTuple(b, Double.NaN)));
|
||||
}
|
||||
} else {
|
||||
if (command.isWithScores()) {
|
||||
Mono<Set<Tuple>> m = read(keyBuf, ByteArrayCodec.INSTANCE, ZREVRANGEBYSCORE, args.toArray());
|
||||
flux = m.flatMapMany(e -> Flux.fromIterable(e));
|
||||
} else {
|
||||
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.ZREVRANGEBYSCORE, args.toArray());
|
||||
flux = m.flatMapMany(e -> Flux.fromIterable(e).map(b -> new DefaultTuple(b, Double.NaN)));
|
||||
}
|
||||
}
|
||||
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<ListScanResult<Tuple>> ZSCAN = new RedisCommand<>("ZSCAN", new ListMultiDecoder2(new ScoredSortedSetScanDecoder<Object>(), new ScoredSortedSetScanReplayDecoder()));
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<KeyCommand, Flux<Tuple>>> zScan(Publisher<KeyScanCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getOptions(), "ScanOptions must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Flux<Tuple> flux = Flux.create(new SetReactiveIterator<Tuple>() {
|
||||
@Override
|
||||
protected RFuture<ScanResult<Object>> scanIterator(RedisClient client, long nextIterPos) {
|
||||
if (command.getOptions().getPattern() == null) {
|
||||
return executorService.readAsync(client, keyBuf, ByteArrayCodec.INSTANCE, ZSCAN,
|
||||
keyBuf, nextIterPos, "COUNT", Optional.ofNullable(command.getOptions().getCount()).orElse(10L));
|
||||
}
|
||||
|
||||
return executorService.readAsync(client, keyBuf, ByteArrayCodec.INSTANCE, ZSCAN,
|
||||
keyBuf, nextIterPos, "MATCH", command.getOptions().getPattern(),
|
||||
"COUNT", Optional.ofNullable(command.getOptions().getCount()).orElse(10L));
|
||||
}
|
||||
});
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> ZCOUNT = new RedisStrictCommand<Long>("ZCOUNT");
|
||||
|
||||
String toLowerBound(Range range) {
|
||||
StringBuilder s = new StringBuilder();
|
||||
if (!range.getLowerBound().isInclusive()) {
|
||||
s.append("(");
|
||||
}
|
||||
if (!range.getLowerBound().getValue().isPresent() || range.getLowerBound().getValue().get().toString().isEmpty()) {
|
||||
s.append("-inf");
|
||||
} else {
|
||||
s.append(range.getLowerBound().getValue().get());
|
||||
}
|
||||
return s.toString();
|
||||
}
|
||||
|
||||
String toUpperBound(Range range) {
|
||||
StringBuilder s = new StringBuilder();
|
||||
if (!range.getUpperBound().isInclusive()) {
|
||||
s.append("(");
|
||||
}
|
||||
if (!range.getUpperBound().getValue().isPresent() || range.getUpperBound().getValue().get().toString().isEmpty()) {
|
||||
s.append("+inf");
|
||||
} else {
|
||||
s.append(range.getUpperBound().getValue().get());
|
||||
}
|
||||
return s.toString();
|
||||
}
|
||||
|
||||
String toLexLowerBound(Range range, Object defaultValue) {
|
||||
StringBuilder s = new StringBuilder();
|
||||
if (range.getLowerBound().isInclusive()) {
|
||||
s.append("[");
|
||||
} else {
|
||||
s.append("(");
|
||||
}
|
||||
if (!range.getLowerBound().getValue().isPresent() || range.getLowerBound().getValue().get().toString().isEmpty()) {
|
||||
s.append(defaultValue);
|
||||
} else {
|
||||
s.append(range.getLowerBound().getValue().get());
|
||||
}
|
||||
return s.toString();
|
||||
}
|
||||
|
||||
String toLexUpperBound(Range range, Object defaultValue) {
|
||||
StringBuilder s = new StringBuilder();
|
||||
if (range.getUpperBound().isInclusive()) {
|
||||
s.append("[");
|
||||
} else {
|
||||
s.append("(");
|
||||
}
|
||||
if (!range.getUpperBound().getValue().isPresent() || range.getUpperBound().getValue().get().toString().isEmpty()) {
|
||||
s.append(defaultValue);
|
||||
} else {
|
||||
s.append(range.getUpperBound().getValue().get());
|
||||
}
|
||||
return s.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZCountCommand, Long>> zCount(Publisher<ZCountCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, ZCOUNT,
|
||||
keyBuf, toLowerBound(command.getRange()),
|
||||
toUpperBound(command.getRange()));
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<KeyCommand, Long>> zCard(Publisher<KeyCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, RedisCommands.ZCARD, keyBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZScoreCommand, Double>> zScore(Publisher<ZScoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValue(), "Value must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
byte[] valueBuf = toByteArray(command.getValue());
|
||||
Mono<Double> m = read(keyBuf, StringCodec.INSTANCE, RedisCommands.ZSCORE, keyBuf, valueBuf);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> ZREMRANGEBYRANK = new RedisStrictCommand<Long>("ZREMRANGEBYRANK");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZRemRangeByRankCommand, Long>> zRemRangeByRank(
|
||||
Publisher<ZRemRangeByRankCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, ZREMRANGEBYRANK,
|
||||
keyBuf, command.getRange().getLowerBound().getValue().orElse(0L),
|
||||
command.getRange().getUpperBound().getValue().get());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> ZREMRANGEBYSCORE = new RedisStrictCommand<Long>("ZREMRANGEBYSCORE");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZRemRangeByScoreCommand, Long>> zRemRangeByScore(
|
||||
Publisher<ZRemRangeByScoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, ZREMRANGEBYSCORE,
|
||||
keyBuf, toLowerBound(command.getRange()),
|
||||
toUpperBound(command.getRange()));
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> ZUNIONSTORE = new RedisStrictCommand<Long>("ZUNIONSTORE");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZAggregateStoreCommand, Long>> zUnionStore(Publisher<? extends ZAggregateStoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Destination key must not be null!");
|
||||
Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null or empty!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
List<Object> args = new ArrayList<Object>(command.getSourceKeys().size() * 2 + 5);
|
||||
args.add(keyBuf);
|
||||
args.add(command.getSourceKeys().size());
|
||||
args.addAll(command.getSourceKeys().stream().map(e -> toByteArray(e)).collect(Collectors.toList()));
|
||||
if (!command.getWeights().isEmpty()) {
|
||||
args.add("WEIGHTS");
|
||||
for (Double weight : command.getWeights()) {
|
||||
args.add(BigDecimal.valueOf(weight).toPlainString());
|
||||
}
|
||||
}
|
||||
if (command.getAggregateFunction().isPresent()) {
|
||||
args.add("AGGREGATE");
|
||||
args.add(command.getAggregateFunction().get().name());
|
||||
}
|
||||
Mono<Long> m = write(keyBuf, LongCodec.INSTANCE, ZUNIONSTORE, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> ZINTERSTORE = new RedisStrictCommand<Long>("ZINTERSTORE");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZAggregateStoreCommand, Long>> zInterStore(Publisher<? extends ZAggregateStoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Destination key must not be null!");
|
||||
Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null or empty!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
List<Object> args = new ArrayList<Object>(command.getSourceKeys().size() * 2 + 5);
|
||||
args.add(keyBuf);
|
||||
args.add(command.getSourceKeys().size());
|
||||
args.addAll(command.getSourceKeys().stream().map(e -> toByteArray(e)).collect(Collectors.toList()));
|
||||
if (!command.getWeights().isEmpty()) {
|
||||
args.add("WEIGHTS");
|
||||
for (Double weight : command.getWeights()) {
|
||||
args.add(BigDecimal.valueOf(weight).toPlainString());
|
||||
}
|
||||
}
|
||||
if (command.getAggregateFunction().isPresent()) {
|
||||
args.add("AGGREGATE");
|
||||
args.add(command.getAggregateFunction().get().name());
|
||||
}
|
||||
Mono<Long> m = write(keyBuf, LongCodec.INSTANCE, ZINTERSTORE, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Set<Object>> ZRANGEBYLEX = new RedisCommand<Set<Object>>("ZRANGEBYLEX", new ObjectSetReplayDecoder<Object>());
|
||||
private static final RedisCommand<Set<Object>> ZREVRANGEBYLEX = new RedisCommand<Set<Object>>("ZREVRANGEBYLEX", new ObjectSetReplayDecoder<Object>());
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZRangeByLexCommand, Flux<ByteBuffer>>> zRangeByLex(
|
||||
Publisher<ZRangeByLexCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
String start = null;
|
||||
String end = null;
|
||||
if (command.getDirection() == Direction.ASC) {
|
||||
start = toLexLowerBound(command.getRange(), "-");
|
||||
end = toLexUpperBound(command.getRange(), "+");
|
||||
} else {
|
||||
start = toLexUpperBound(command.getRange(), "-");
|
||||
end = toLexLowerBound(command.getRange(), "+");
|
||||
}
|
||||
|
||||
Mono<Set<byte[]>> m;
|
||||
if (!command.getLimit().isUnlimited()) {
|
||||
if (command.getDirection() == Direction.ASC) {
|
||||
m = read(keyBuf, ByteArrayCodec.INSTANCE, ZRANGEBYLEX,
|
||||
keyBuf, start, end, "LIMIT", command.getLimit().getOffset(), command.getLimit().getCount());
|
||||
} else {
|
||||
m = read(keyBuf, ByteArrayCodec.INSTANCE, ZREVRANGEBYLEX,
|
||||
keyBuf, start, end, "LIMIT", command.getLimit().getOffset(), command.getLimit().getCount());
|
||||
}
|
||||
} else {
|
||||
if (command.getDirection() == Direction.ASC) {
|
||||
m = read(keyBuf, ByteArrayCodec.INSTANCE, ZRANGEBYLEX,
|
||||
keyBuf, start, end);
|
||||
} else {
|
||||
m = read(keyBuf, ByteArrayCodec.INSTANCE, ZREVRANGEBYLEX,
|
||||
keyBuf, start, end);
|
||||
}
|
||||
}
|
||||
Flux<ByteBuffer> flux = m.flatMapMany(e -> Flux.fromIterable(e).map(v -> ByteBuffer.wrap(v)));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
public Flux<NumericResponse<ReactiveListCommands.LPosCommand, Long>> lPos(Publisher<ReactiveListCommands.LPosCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getElement(), "Element must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<Object>();
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
params.add(keyBuf);
|
||||
params.add(toByteArray(command.getElement()));
|
||||
if (command.getRank() != null) {
|
||||
params.add("RANK");
|
||||
params.add(command.getRank());
|
||||
}
|
||||
if (command.getCount() != null) {
|
||||
params.add("COUNT");
|
||||
params.add(command.getCount());
|
||||
}
|
||||
|
||||
Mono<Long> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.LPOS, params.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.NumericResponse<ZLexCountCommand, Long>> zLexCount(Publisher<ZLexCountCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
String start = toLexLowerBound(command.getRange(), "-");
|
||||
String end = toLexUpperBound(command.getRange(), "+");
|
||||
|
||||
Mono<Long> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.ZLEXCOUNT, keyBuf, start, end);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> ZREMRANGEBYLEX = new RedisStrictCommand<>("ZREMRANGEBYLEX");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZRemRangeByLexCommand, Long>> zRemRangeByLex(Publisher<ZRemRangeByLexCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getRange(), "Range must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
String start = toLexLowerBound(command.getRange(), "-");
|
||||
String end = toLexUpperBound(command.getRange(), "+");
|
||||
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, ZREMRANGEBYLEX, keyBuf, start, end);
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Set<Tuple>> ZPOPMIN = new RedisCommand<>("ZPOPMIN", new ScoredSortedSetReplayDecoder());
|
||||
private static final RedisCommand<Set<Tuple>> ZPOPMAX = new RedisCommand<>("ZPOPMAX", new ScoredSortedSetReplayDecoder());
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZPopCommand, Flux<Tuple>>> zPop(Publisher<ZPopCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
RedisCommand<Set<Tuple>> cmd = ZPOPMAX;
|
||||
if (command.getDirection() == PopDirection.MIN) {
|
||||
cmd = ZPOPMIN;
|
||||
}
|
||||
|
||||
Mono<Set<Tuple>> m = write(keyBuf, ByteArrayCodec.INSTANCE, cmd, keyBuf, command.getCount());
|
||||
Flux<Tuple> flux = m.flatMapMany(e -> Flux.fromIterable(e));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Set<Tuple>> BZPOPMIN = new RedisCommand<>("BZPOPMIN", new ScoredSortedSetReplayDecoder());
|
||||
private static final RedisCommand<Set<Tuple>> BZPOPMAX = new RedisCommand<>("BZPOPMAX", new ScoredSortedSetReplayDecoder());
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<BZPopCommand, Flux<Tuple>>> bZPop(Publisher<BZPopCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getTimeout(), "Timeout must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
RedisCommand<Set<Tuple>> cmd = BZPOPMAX;
|
||||
if (command.getDirection() == PopDirection.MIN) {
|
||||
cmd = BZPOPMIN;
|
||||
}
|
||||
|
||||
long timeout = command.getTimeUnit().toSeconds(command.getTimeout());
|
||||
|
||||
Mono<Set<Tuple>> m = write(keyBuf, ByteArrayCodec.INSTANCE, cmd, keyBuf, command.getCount(), timeout);
|
||||
Flux<Tuple> flux = m.flatMapMany(e -> Flux.fromIterable(e));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZRandMemberCommand, Flux<ByteBuffer>>> zRandMember(Publisher<ZRandMemberCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
Mono<Set<byte[]>> m = write(keyBuf, ByteArrayCodec.INSTANCE, ZRANDMEMBER, keyBuf, command.getCount());
|
||||
Flux<ByteBuffer> flux = m.flatMapMany(e -> Flux.fromIterable(e).map(v -> ByteBuffer.wrap(v)));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Set<Tuple>> ZRANDMEMBER_SCORE = new RedisCommand<>("ZRANDMEMBER", new ScoredSortedSetReplayDecoder());
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZRandMemberCommand, Flux<Tuple>>> zRandMemberWithScore(Publisher<ZRandMemberCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
|
||||
Mono<Set<Tuple>> m = write(keyBuf, ByteArrayCodec.INSTANCE, ZRANDMEMBER_SCORE, keyBuf, command.getCount(), "WITHSCORES");
|
||||
Flux<Tuple> flux = m.flatMapMany(e -> Flux.fromIterable(e));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZDiffCommand, Flux<ByteBuffer>>> zDiff(Publisher<? extends ZDiffCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKeys(), "Key must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<>(command.getKeys().size() + 1);
|
||||
args.add(command.getKeys().size());
|
||||
for (ByteBuffer key : command.getKeys()) {
|
||||
args.add(toByteArray(key));
|
||||
}
|
||||
|
||||
Mono<List<byte[]>> m = write(toByteArray(command.getKeys().get(0)), ByteArrayCodec.INSTANCE, RedisCommands.ZDIFF, args.toArray());
|
||||
Flux<ByteBuffer> flux = m.flatMapMany(e -> Flux.fromIterable(e).map(v -> ByteBuffer.wrap(v)));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Set<Tuple>> ZDIFF_SCORE = new RedisCommand<>("ZDIFF", new ScoredSortedSetReplayDecoder());
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZDiffCommand, Flux<Tuple>>> zDiffWithScores(Publisher<? extends ZDiffCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<>(command.getKeys().size() + 2);
|
||||
args.add(command.getKeys().size());
|
||||
for (ByteBuffer key : command.getKeys()) {
|
||||
args.add(toByteArray(key));
|
||||
}
|
||||
args.add("WITHSCORES");
|
||||
|
||||
Mono<Set<Tuple>> m = write(toByteArray(command.getKeys().get(0)), ByteArrayCodec.INSTANCE, ZDIFF_SCORE, args.toArray());
|
||||
Flux<Tuple> flux = m.flatMapMany(e -> Flux.fromIterable(e));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Long> ZDIFFSTORE = new RedisStrictCommand<>("ZDIFFSTORE");
|
||||
|
||||
@Override
|
||||
public Flux<NumericResponse<ZDiffStoreCommand, Long>> zDiffStore(Publisher<ZDiffStoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getSourceKeys(), "Source keys must not be null!");
|
||||
|
||||
List<Object> args = new ArrayList<>(command.getSourceKeys().size() + 2);
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
args.add(keyBuf);
|
||||
args.add(command.getSourceKeys().size());
|
||||
for (ByteBuffer key : command.getSourceKeys()) {
|
||||
args.add(toByteArray(key));
|
||||
}
|
||||
|
||||
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, ZDIFFSTORE, args.toArray());
|
||||
return m.map(v -> new NumericResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZAggregateCommand, Flux<ByteBuffer>>> zUnion(Publisher<? extends ZAggregateCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null or empty!");
|
||||
|
||||
List<Object> args = new ArrayList<>(command.getSourceKeys().size() * 2 + 5);
|
||||
args.add(command.getSourceKeys().size());
|
||||
args.addAll(command.getSourceKeys().stream().map(e -> toByteArray(e)).collect(Collectors.toList()));
|
||||
if (!command.getWeights().isEmpty()) {
|
||||
args.add("WEIGHTS");
|
||||
for (Double weight : command.getWeights()) {
|
||||
args.add(BigDecimal.valueOf(weight).toPlainString());
|
||||
}
|
||||
}
|
||||
if (command.getAggregateFunction().isPresent()) {
|
||||
args.add("AGGREGATE");
|
||||
args.add(command.getAggregateFunction().get().name());
|
||||
}
|
||||
|
||||
Mono<List<byte[]>> m = write(toByteArray(command.getSourceKeys().get(0)), ByteArrayCodec.INSTANCE, RedisCommands.ZUNION, args.toArray());
|
||||
Flux<ByteBuffer> flux = m.flatMapMany(e -> Flux.fromIterable(e).map(v -> ByteBuffer.wrap(v)));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Set<Tuple>> ZUNION_SCORE = new RedisCommand<>("ZUNION", new ScoredSortedSetReplayDecoder());
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZAggregateCommand, Flux<Tuple>>> zUnionWithScores(Publisher<? extends ZAggregateCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null or empty!");
|
||||
|
||||
List<Object> args = new ArrayList<>(command.getSourceKeys().size() * 2 + 5);
|
||||
args.add(command.getSourceKeys().size());
|
||||
args.addAll(command.getSourceKeys().stream().map(e -> toByteArray(e)).collect(Collectors.toList()));
|
||||
if (!command.getWeights().isEmpty()) {
|
||||
args.add("WEIGHTS");
|
||||
for (Double weight : command.getWeights()) {
|
||||
args.add(BigDecimal.valueOf(weight).toPlainString());
|
||||
}
|
||||
}
|
||||
if (command.getAggregateFunction().isPresent()) {
|
||||
args.add("AGGREGATE");
|
||||
args.add(command.getAggregateFunction().get().name());
|
||||
}
|
||||
args.add("WITHSCORES");
|
||||
|
||||
Mono<Set<Tuple>> m = write(toByteArray(command.getSourceKeys().get(0)), ByteArrayCodec.INSTANCE, ZUNION_SCORE, args.toArray());
|
||||
Flux<Tuple> flux = m.flatMapMany(e -> Flux.fromIterable(e));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZAggregateCommand, Flux<ByteBuffer>>> zInter(Publisher<? extends ZAggregateCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null or empty!");
|
||||
|
||||
List<Object> args = new ArrayList<>(command.getSourceKeys().size() * 2 + 5);
|
||||
args.add(command.getSourceKeys().size());
|
||||
args.addAll(command.getSourceKeys().stream().map(e -> toByteArray(e)).collect(Collectors.toList()));
|
||||
if (!command.getWeights().isEmpty()) {
|
||||
args.add("WEIGHTS");
|
||||
for (Double weight : command.getWeights()) {
|
||||
args.add(BigDecimal.valueOf(weight).toPlainString());
|
||||
}
|
||||
}
|
||||
if (command.getAggregateFunction().isPresent()) {
|
||||
args.add("AGGREGATE");
|
||||
args.add(command.getAggregateFunction().get().name());
|
||||
}
|
||||
|
||||
Mono<List<byte[]>> m = write(toByteArray(command.getSourceKeys().get(0)), ByteArrayCodec.INSTANCE, RedisCommands.ZINTER, args.toArray());
|
||||
Flux<ByteBuffer> flux = m.flatMapMany(e -> Flux.fromIterable(e).map(v -> ByteBuffer.wrap(v)));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<Set<Tuple>> ZINTER_SCORE = new RedisCommand<>("ZINTER", new ScoredSortedSetReplayDecoder());
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZAggregateCommand, Flux<Tuple>>> zInterWithScores(Publisher<? extends ZAggregateCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null or empty!");
|
||||
|
||||
List<Object> args = new ArrayList<>(command.getSourceKeys().size() * 2 + 5);
|
||||
args.add(command.getSourceKeys().size());
|
||||
args.addAll(command.getSourceKeys().stream().map(e -> toByteArray(e)).collect(Collectors.toList()));
|
||||
if (!command.getWeights().isEmpty()) {
|
||||
args.add("WEIGHTS");
|
||||
for (Double weight : command.getWeights()) {
|
||||
args.add(BigDecimal.valueOf(weight).toPlainString());
|
||||
}
|
||||
}
|
||||
if (command.getAggregateFunction().isPresent()) {
|
||||
args.add("AGGREGATE");
|
||||
args.add(command.getAggregateFunction().get().name());
|
||||
}
|
||||
args.add("WITHSCORES");
|
||||
|
||||
Mono<Set<Tuple>> m = write(toByteArray(command.getSourceKeys().get(0)), ByteArrayCodec.INSTANCE, ZINTER_SCORE, args.toArray());
|
||||
Flux<Tuple> flux = m.flatMapMany(e -> Flux.fromIterable(e));
|
||||
return Mono.just(new CommandResponse<>(command, flux));
|
||||
});
|
||||
}
|
||||
|
||||
private static final RedisCommand<List<Object>> ZMSCORE = new RedisCommand<>("ZMSCORE", new ObjectListReplayDecoder<>());
|
||||
|
||||
@Override
|
||||
public Flux<ReactiveRedisConnection.MultiValueResponse<ZMScoreCommand, Double>> zMScore(Publisher<ZMScoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Key must not be null!");
|
||||
Assert.notNull(command.getValues(), "Values must not be null!");
|
||||
|
||||
byte[] keyBuf = toByteArray(command.getKey());
|
||||
List<Object> args = new ArrayList<>(command.getValues().size() + 1);
|
||||
args.add(keyBuf);
|
||||
args.addAll(command.getValues().stream().map(buf -> toByteArray(buf)).collect(Collectors.toList()));
|
||||
|
||||
Mono<List<Double>> m = read(keyBuf, DoubleCodec.INSTANCE, ZMSCORE, args.toArray());
|
||||
return m.map(v -> new ReactiveRedisConnection.MultiValueResponse<>(command, v));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<CommandResponse<ZRangeStoreCommand, Mono<Long>>> zRangeStore(Publisher<ZRangeStoreCommand> commands) {
|
||||
return execute(commands, command -> {
|
||||
|
||||
Assert.notNull(command.getKey(), "Source key must not be null");
|
||||
Assert.notNull(command.getDestKey(), "Destination key must not be null");
|
||||
Assert.notNull(command.getRange(), "Range must not be null");
|
||||
Assert.notNull(command.getLimit(), "Limit must not be null");
|
||||
|
||||
// Limit limit = LettuceConverters.toLimit(command.getLimit());
|
||||
// Mono<Long> result;
|
||||
//
|
||||
// if (command.getDirection() == Direction.ASC) {
|
||||
//
|
||||
// switch (command.getRangeMode()) {
|
||||
// case ByScore -> result = cmd.zrangestorebyscore(command.getDestKey(), command.getKey(),
|
||||
// (Range<? extends Number>) LettuceConverters.toRange(command.getRange()), limit);
|
||||
// case ByLex -> result = cmd.zrangestorebylex(command.getDestKey(), command.getKey(),
|
||||
// RangeConverter.toRange(command.getRange()), limit);
|
||||
// default -> throw new IllegalStateException("Unsupported value: " + command.getRangeMode());
|
||||
// }
|
||||
// } else {
|
||||
// switch (command.getRangeMode()) {
|
||||
// case ByScore -> result = cmd.zrevrangestorebyscore(command.getDestKey(), command.getKey(),
|
||||
// (Range<? extends Number>) LettuceConverters.toRange(command.getRange()), limit);
|
||||
// case ByLex -> result = cmd.zrevrangestorebylex(command.getDestKey(), command.getKey(),
|
||||
// RangeConverter.toRange(command.getRange()), limit);
|
||||
// default -> throw new IllegalStateException("Unsupported value: " + command.getRangeMode());
|
||||
// }
|
||||
// }
|
||||
|
||||
return Mono.empty();
|
||||
// return Mono.just(new CommandResponse<>(command, result));
|
||||
});
|
||||
}
|
||||
}
|
@ -0,0 +1,91 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.redisson.client.RedisConnection;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.springframework.data.redis.connection.NamedNode;
|
||||
import org.springframework.data.redis.connection.RedisSentinelConnection;
|
||||
import org.springframework.data.redis.connection.RedisServer;
|
||||
import org.springframework.data.redis.connection.convert.Converters;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonSentinelConnection implements RedisSentinelConnection {
|
||||
|
||||
private final RedisConnection connection;
|
||||
|
||||
public RedissonSentinelConnection(RedisConnection connection) {
|
||||
this.connection = connection;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void failover(NamedNode master) {
|
||||
connection.sync(RedisCommands.SENTINEL_FAILOVER, master.getName());
|
||||
}
|
||||
|
||||
private static List<RedisServer> toRedisServersList(List<Map<String, String>> source) {
|
||||
List<RedisServer> servers = new ArrayList<RedisServer>(source.size());
|
||||
for (Map<String, String> info : source) {
|
||||
servers.add(RedisServer.newServerFrom(Converters.toProperties(info)));
|
||||
}
|
||||
return servers;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<RedisServer> masters() {
|
||||
List<Map<String, String>> masters = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_MASTERS);
|
||||
return toRedisServersList(masters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<RedisServer> replicas(NamedNode master) {
|
||||
List<Map<String, String>> slaves = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_SLAVES, master.getName());
|
||||
return toRedisServersList(slaves);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(NamedNode master) {
|
||||
connection.sync(RedisCommands.SENTINEL_REMOVE, master.getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void monitor(RedisServer master) {
|
||||
connection.sync(RedisCommands.SENTINEL_MONITOR, master.getName(), master.getHost(),
|
||||
master.getPort().intValue(), master.getQuorum().intValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
connection.closeAsync();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isOpen() {
|
||||
return !connection.isClosed();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,635 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.api.*;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
import org.redisson.client.protocol.RedisCommand;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.client.protocol.RedisStrictCommand;
|
||||
import org.redisson.client.protocol.convertor.StreamIdConvertor;
|
||||
import org.redisson.client.protocol.decoder.*;
|
||||
import org.springframework.data.domain.Range;
|
||||
import org.springframework.data.redis.connection.Limit;
|
||||
import org.springframework.data.redis.connection.RedisStreamCommands;
|
||||
import org.springframework.data.redis.connection.RedisZSetCommands;
|
||||
import org.springframework.data.redis.connection.stream.*;
|
||||
import org.springframework.data.redis.connection.stream.StreamInfo;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonStreamCommands implements RedisStreamCommands {
|
||||
|
||||
private final RedissonConnection connection;
|
||||
|
||||
public RedissonStreamCommands(RedissonConnection connection) {
|
||||
this.connection = connection;
|
||||
}
|
||||
|
||||
private static List<String> toStringList(RecordId... recordIds) {
|
||||
if (recordIds.length == 1) {
|
||||
return Arrays.asList(recordIds[0].getValue());
|
||||
}
|
||||
|
||||
return Arrays.stream(recordIds).map(RecordId::getValue).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public RecordId xAdd(MapRecord<byte[], byte[], byte[]> record) {
|
||||
return xAdd(record, XAddOptions.none());
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<RecordId> XCLAIM_JUSTID = new RedisStrictCommand<RecordId>("XCLAIM", obj -> RecordId.of(obj.toString()));
|
||||
|
||||
@Override
|
||||
public List<RecordId> xClaimJustId(byte[] key, String group, String newOwner, XClaimOptions options) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(group, "Group name must not be null!");
|
||||
Assert.notNull(newOwner, "NewOwner must not be null!");
|
||||
Assert.notEmpty(options.getIds(), "Ids collection must not be empty!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
params.add(key);
|
||||
params.add(group);
|
||||
params.add(newOwner);
|
||||
params.add(Objects.requireNonNull(options.getIdleTime()).toMillis());
|
||||
params.addAll(Arrays.asList(options.getIdsAsStringArray()));
|
||||
params.add("JUSTID");
|
||||
|
||||
return connection.write(key, StringCodec.INSTANCE, XCLAIM_JUSTID, params.toArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ByteRecord> xClaim(byte[] key, String group, String newOwner, XClaimOptions options) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(group, "Group name must not be null!");
|
||||
Assert.notNull(newOwner, "NewOwner must not be null!");
|
||||
Assert.notEmpty(options.getIds(), "Ids collection must not be empty!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
params.add(key);
|
||||
params.add(group);
|
||||
params.add(newOwner);
|
||||
params.add(Objects.requireNonNull(options.getIdleTime()).toMillis());
|
||||
params.addAll(Arrays.asList(options.getIdsAsStringArray()));
|
||||
|
||||
return connection.write(key, ByteArrayCodec.INSTANCE, new RedisCommand<List<ByteRecord>>("XCLAIM",
|
||||
new ListMultiDecoder2(
|
||||
new ByteRecordReplayDecoder(key),
|
||||
new ObjectDecoder(new StreamIdDecoder()),
|
||||
new MapEntriesDecoder(new StreamObjectMapReplayDecoder()))), params.toArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String xGroupCreate(byte[] key, String groupName, ReadOffset readOffset, boolean mkStream) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(groupName, "GroupName must not be null!");
|
||||
Assert.notNull(readOffset, "ReadOffset must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
params.add("CREATE");
|
||||
params.add(key);
|
||||
params.add(groupName);
|
||||
params.add(readOffset.getOffset());
|
||||
if (mkStream) {
|
||||
params.add("MKSTREAM");
|
||||
}
|
||||
|
||||
return connection.write(key, StringCodec.INSTANCE, XGROUP_STRING, params.toArray());
|
||||
}
|
||||
|
||||
private static class XInfoStreamReplayDecoder implements MultiDecoder<StreamInfo.XInfoStream> {
|
||||
|
||||
@Override
|
||||
public StreamInfo.XInfoStream decode(List<Object> parts, State state) {
|
||||
Map<String, Object> res = new HashMap<>();
|
||||
res.put("length", parts.get(1));
|
||||
res.put("radix-tree-keys", parts.get(3));
|
||||
res.put("radix-tree-nodes", parts.get(5));
|
||||
res.put("groups", parts.get(7));
|
||||
res.put("last-generated-id", parts.get(9).toString());
|
||||
|
||||
List<?> firstEntry = (List<?>) parts.get(11);
|
||||
if (firstEntry != null) {
|
||||
StreamMessageId firstId = StreamIdConvertor.INSTANCE.convert(firstEntry.get(0));
|
||||
Map<Object, Object> firstData = (Map<Object, Object>) firstEntry.get(1);
|
||||
res.put("first-entry", firstData);
|
||||
}
|
||||
|
||||
List<?> lastEntry = (List<?>) parts.get(13);
|
||||
if (lastEntry != null) {
|
||||
StreamMessageId lastId = StreamIdConvertor.INSTANCE.convert(lastEntry.get(0));
|
||||
Map<Object, Object> lastData = (Map<Object, Object>) lastEntry.get(1);
|
||||
res.put("last-entry", lastData);
|
||||
}
|
||||
|
||||
List<Object> list = res.entrySet().stream()
|
||||
.flatMap(e -> Stream.of(e.getKey(), e.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
return StreamInfo.XInfoStream.fromList(list);
|
||||
}
|
||||
}
|
||||
|
||||
private static final RedisCommand<org.redisson.api.StreamInfo<Object, Object>> XINFO_STREAM = new RedisCommand<>("XINFO", "STREAM",
|
||||
new ListMultiDecoder2(
|
||||
new XInfoStreamReplayDecoder(),
|
||||
new ObjectDecoder(StringCodec.INSTANCE.getValueDecoder()),
|
||||
new ObjectMapDecoder(false)));
|
||||
|
||||
@Override
|
||||
public StreamInfo.XInfoStream xInfo(byte[] key) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
|
||||
return connection.write(key, ByteArrayCodec.INSTANCE, XINFO_STREAM, key);
|
||||
}
|
||||
|
||||
private static class XInfoGroupsReplayDecoder implements MultiDecoder<StreamInfo.XInfoGroups> {
|
||||
|
||||
@Override
|
||||
public StreamInfo.XInfoGroups decode(List<Object> parts, State state) {
|
||||
List<Object> result = new ArrayList<>();
|
||||
for (List<Object> part: (List<List<Object>>) (Object)parts) {
|
||||
Map<String, Object> res = new HashMap<>();
|
||||
res.put("name", part.get(1));
|
||||
res.put("consumers", part.get(3));
|
||||
res.put("pending", part.get(5));
|
||||
res.put("last-delivered-id", part.get(7));
|
||||
List<Object> list = res.entrySet().stream()
|
||||
.flatMap(e -> Stream.of(e.getKey(), e.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
result.add(list);
|
||||
}
|
||||
|
||||
return StreamInfo.XInfoGroups.fromList(result);
|
||||
}
|
||||
}
|
||||
|
||||
RedisCommand<StreamInfo.XInfoGroups> XINFO_GROUPS = new RedisCommand<>("XINFO", "GROUPS",
|
||||
new ListMultiDecoder2(new XInfoGroupsReplayDecoder(),
|
||||
new ObjectListReplayDecoder(), new ObjectListReplayDecoder())
|
||||
);
|
||||
|
||||
@Override
|
||||
public StreamInfo.XInfoGroups xInfoGroups(byte[] key) {
|
||||
return connection.write(key, StringCodec.INSTANCE, XINFO_GROUPS, key);
|
||||
}
|
||||
|
||||
private static class XInfoConsumersReplayDecoder implements MultiDecoder<StreamInfo.XInfoConsumers> {
|
||||
|
||||
private final String groupName;
|
||||
|
||||
public XInfoConsumersReplayDecoder(String groupName) {
|
||||
this.groupName = groupName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamInfo.XInfoConsumers decode(List<Object> parts, State state) {
|
||||
List<Object> result = new ArrayList<>();
|
||||
for (List<Object> part: (List<List<Object>>) (Object)parts) {
|
||||
Map<String, Object> res = new HashMap<>();
|
||||
res.put("name", part.get(1));
|
||||
res.put("pending", part.get(3));
|
||||
res.put("idle", part.get(5));
|
||||
List<Object> list = res.entrySet().stream()
|
||||
.flatMap(e -> Stream.of(e.getKey(), e.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
result.add(list);
|
||||
}
|
||||
|
||||
return StreamInfo.XInfoConsumers.fromList(groupName, result);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamInfo.XInfoConsumers xInfoConsumers(byte[] key, String groupName) {
|
||||
return connection.write(key, StringCodec.INSTANCE, new RedisCommand<StreamInfo.XInfoConsumers>("XINFO", "CONSUMERS",
|
||||
new ListMultiDecoder2(new XInfoConsumersReplayDecoder(groupName),
|
||||
new ObjectListReplayDecoder(), new ObjectListReplayDecoder())), key, groupName);
|
||||
}
|
||||
|
||||
private static class PendingMessagesSummaryReplayDecoder implements MultiDecoder<PendingMessagesSummary> {
|
||||
|
||||
private final String groupName;
|
||||
|
||||
public PendingMessagesSummaryReplayDecoder(String groupName) {
|
||||
this.groupName = groupName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PendingMessagesSummary decode(List<Object> parts, State state) {
|
||||
if (parts.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
List<List<String>> customerParts = (List<List<String>>) parts.get(3);
|
||||
if (customerParts.isEmpty()) {
|
||||
return new PendingMessagesSummary(groupName, 0, Range.unbounded(), Collections.emptyMap());
|
||||
}
|
||||
|
||||
Map<String, Long> map = customerParts.stream().collect(Collectors.toMap(e -> e.get(0), e -> Long.valueOf(e.get(1)),
|
||||
(u, v) -> { throw new IllegalStateException("Duplicate key: " + u); },
|
||||
LinkedHashMap::new));
|
||||
Range<String> range = Range.open(parts.get(1).toString(), parts.get(2).toString());
|
||||
return new PendingMessagesSummary(groupName, (Long) parts.get(0), range, map);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public PendingMessagesSummary xPending(byte[] key, String groupName) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(groupName, "Group name must not be null!");
|
||||
|
||||
return connection.write(key, StringCodec.INSTANCE, new RedisCommand<PendingMessagesSummary>("XPENDING",
|
||||
new ListMultiDecoder2(new PendingMessagesSummaryReplayDecoder(groupName),
|
||||
new ObjectListReplayDecoder(), new ObjectListReplayDecoder())), key, groupName);
|
||||
}
|
||||
|
||||
private static class PendingMessageReplayDecoder implements MultiDecoder<PendingMessage> {
|
||||
|
||||
private String groupName;
|
||||
|
||||
public PendingMessageReplayDecoder(String groupName) {
|
||||
this.groupName = groupName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PendingMessage decode(List<Object> parts, State state) {
|
||||
PendingMessage pm = new PendingMessage(RecordId.of(parts.get(0).toString()),
|
||||
Consumer.from(groupName, parts.get(1).toString()),
|
||||
Duration.of(Long.valueOf(parts.get(2).toString()), ChronoUnit.MILLIS),
|
||||
Long.valueOf(parts.get(3).toString()));
|
||||
return pm;
|
||||
}
|
||||
}
|
||||
|
||||
private static class PendingMessagesReplayDecoder implements MultiDecoder<PendingMessages> {
|
||||
|
||||
private final String groupName;
|
||||
private final Range<?> range;
|
||||
|
||||
public PendingMessagesReplayDecoder(String groupName, Range<?> range) {
|
||||
this.groupName = groupName;
|
||||
this.range = range;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PendingMessages decode(List<Object> parts, State state) {
|
||||
List<PendingMessage> pendingMessages = (List<PendingMessage>) (Object) parts;
|
||||
return new PendingMessages(groupName, range, pendingMessages);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public PendingMessages xPending(byte[] key, String groupName, XPendingOptions options) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(groupName, "Group name must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
params.add(key);
|
||||
params.add(groupName);
|
||||
|
||||
params.add(((Range.Bound<String>)options.getRange().getLowerBound()).getValue().orElse("-"));
|
||||
params.add(((Range.Bound<String>)options.getRange().getUpperBound()).getValue().orElse("+"));
|
||||
|
||||
if (options.getCount() != null) {
|
||||
params.add(options.getCount());
|
||||
} else {
|
||||
params.add(10);
|
||||
}
|
||||
if (options.getConsumerName() != null) {
|
||||
params.add(options.getConsumerName());
|
||||
}
|
||||
|
||||
return connection.write(key, StringCodec.INSTANCE, new RedisCommand<>("XPENDING",
|
||||
new ListMultiDecoder2<PendingMessages>(
|
||||
new PendingMessagesReplayDecoder(groupName, options.getRange()),
|
||||
new PendingMessageReplayDecoder(groupName))),
|
||||
params.toArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long xAck(byte[] key, String group, RecordId... recordIds) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(group, "Group must not be null!");
|
||||
Assert.notNull(recordIds, "recordIds must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
params.add(key);
|
||||
params.add(group);
|
||||
params.addAll(toStringList(recordIds));
|
||||
|
||||
return connection.write(key, StringCodec.INSTANCE, RedisCommands.XACK, params.toArray());
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<RecordId> XADD = new RedisStrictCommand<RecordId>("XADD", obj -> RecordId.of(obj.toString()));
|
||||
|
||||
@Override
|
||||
public RecordId xAdd(MapRecord<byte[], byte[], byte[]> record, XAddOptions options) {
|
||||
Assert.notNull(record, "record must not be null!");
|
||||
|
||||
List<Object> params = new LinkedList<>();
|
||||
params.add(record.getStream());
|
||||
|
||||
if (options.getMaxlen() != null) {
|
||||
params.add("MAXLEN");
|
||||
params.add(options.getMaxlen());
|
||||
}
|
||||
|
||||
if (!record.getId().shouldBeAutoGenerated()) {
|
||||
params.add(record.getId().getValue());
|
||||
} else {
|
||||
params.add("*");
|
||||
}
|
||||
|
||||
record.getValue().forEach((key, value) -> {
|
||||
params.add(key);
|
||||
params.add(value);
|
||||
});
|
||||
|
||||
return connection.write(record.getStream(), StringCodec.INSTANCE, XADD, params.toArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long xDel(byte[] key, RecordId... recordIds) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(recordIds, "recordIds must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
params.add(key);
|
||||
params.addAll(toStringList(recordIds));
|
||||
|
||||
return connection.write(key, StringCodec.INSTANCE, RedisCommands.XDEL, params.toArray());
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<String> XGROUP_STRING = new RedisStrictCommand<>("XGROUP");
|
||||
|
||||
@Override
|
||||
public String xGroupCreate(byte[] key, String groupName, ReadOffset readOffset) {
|
||||
return xGroupCreate(key, groupName, readOffset, false);
|
||||
}
|
||||
|
||||
private static final RedisStrictCommand<Boolean> XGROUP_BOOLEAN = new RedisStrictCommand<Boolean>("XGROUP", obj -> ((Long)obj) > 0);
|
||||
|
||||
@Override
|
||||
public Boolean xGroupDelConsumer(byte[] key, Consumer consumer) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(consumer, "Consumer must not be null!");
|
||||
Assert.notNull(consumer.getName(), "Consumer name must not be null!");
|
||||
Assert.notNull(consumer.getGroup(), "Consumer group must not be null!");
|
||||
|
||||
return connection.write(key, StringCodec.INSTANCE, XGROUP_BOOLEAN, "DELCONSUMER", key, consumer.getGroup(), consumer.getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean xGroupDestroy(byte[] key, String groupName) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(groupName, "GroupName must not be null!");
|
||||
|
||||
return connection.write(key, StringCodec.INSTANCE, XGROUP_BOOLEAN, "DESTROY", key, groupName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long xLen(byte[] key) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
|
||||
return connection.write(key, StringCodec.INSTANCE, RedisCommands.XLEN, key);
|
||||
}
|
||||
|
||||
private List<ByteRecord> range(RedisCommand<?> rangeCommand, byte[] key, Range<String> range, Limit limit) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(range, "Range must not be null!");
|
||||
Assert.notNull(limit, "Limit must not be null!");
|
||||
|
||||
List<Object> params = new LinkedList<>();
|
||||
params.add(key);
|
||||
|
||||
if (rangeCommand.getName().equals(RedisCommands.XRANGE.getName())) {
|
||||
params.add(range.getLowerBound().getValue().orElse("-"));
|
||||
params.add(range.getUpperBound().getValue().orElse("+"));
|
||||
} else {
|
||||
params.add(range.getUpperBound().getValue().orElse("+"));
|
||||
params.add(range.getLowerBound().getValue().orElse("-"));
|
||||
}
|
||||
|
||||
if (limit.getCount() > 0) {
|
||||
params.add("COUNT");
|
||||
params.add(limit.getCount());
|
||||
}
|
||||
|
||||
return connection.write(key, ByteArrayCodec.INSTANCE, rangeCommand, params.toArray());
|
||||
}
|
||||
|
||||
private static class ByteRecordReplayDecoder implements MultiDecoder<List<ByteRecord>> {
|
||||
|
||||
private final byte[] key;
|
||||
|
||||
ByteRecordReplayDecoder(byte[] key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ByteRecord> decode(List<Object> parts, State state) {
|
||||
List<List<Object>> list = (List<List<Object>>) (Object) parts;
|
||||
List<ByteRecord> result = new ArrayList<>(parts.size()/2);
|
||||
for (List<Object> entry : list) {
|
||||
ByteRecord record = StreamRecords.newRecord()
|
||||
.in(key)
|
||||
.withId(RecordId.of(entry.get(0).toString()))
|
||||
.ofBytes((Map<byte[], byte[]>) entry.get(1));
|
||||
result.add(record);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ByteRecord> xRange(byte[] key, Range<String> range, Limit limit) {
|
||||
return range(new RedisCommand<>("XRANGE",
|
||||
new ListMultiDecoder2(
|
||||
new ByteRecordReplayDecoder(key),
|
||||
new ObjectDecoder(new StreamIdDecoder()),
|
||||
new MapEntriesDecoder(new StreamObjectMapReplayDecoder()))),
|
||||
key, range, limit);
|
||||
}
|
||||
|
||||
private static class ByteRecordReplayDecoder2 implements MultiDecoder<List<ByteRecord>> {
|
||||
|
||||
@Override
|
||||
public List<ByteRecord> decode(List<Object> parts, State state) {
|
||||
List<List<Object>> list = (List<List<Object>>) (Object) parts;
|
||||
List<ByteRecord> result = new ArrayList<>(parts.size()/2);
|
||||
|
||||
for (List<Object> entries : list) {
|
||||
List<List<Object>> streamEntries = (List<List<Object>>) entries.get(1);
|
||||
if (streamEntries.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String name = (String) entries.get(0);
|
||||
for (List<Object> se : streamEntries) {
|
||||
ByteRecord record = StreamRecords.newRecord()
|
||||
.in(name.getBytes())
|
||||
.withId(RecordId.of(se.get(0).toString()))
|
||||
.ofBytes((Map<byte[], byte[]>) se.get(1));
|
||||
result.add(record);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static final RedisCommand<List<ByteRecord>> XREAD = new RedisCommand<>("XREAD",
|
||||
new ListMultiDecoder2(
|
||||
new ByteRecordReplayDecoder2(),
|
||||
new ObjectDecoder(StringCodec.INSTANCE.getValueDecoder()),
|
||||
new ObjectDecoder(new StreamIdDecoder()),
|
||||
new ObjectDecoder(new StreamIdDecoder()),
|
||||
new MapEntriesDecoder(new StreamObjectMapReplayDecoder())));
|
||||
|
||||
private static final RedisCommand<List<ByteRecord>> XREAD_BLOCKING =
|
||||
new RedisCommand<>("XREAD", XREAD.getReplayMultiDecoder());
|
||||
|
||||
private static final RedisCommand<List<ByteRecord>> XREADGROUP =
|
||||
new RedisCommand<>("XREADGROUP", XREAD.getReplayMultiDecoder());
|
||||
|
||||
private static final RedisCommand<List<ByteRecord>> XREADGROUP_BLOCKING =
|
||||
new RedisCommand<>("XREADGROUP", XREADGROUP.getReplayMultiDecoder());
|
||||
|
||||
|
||||
static {
|
||||
RedisCommands.BLOCKING_COMMANDS.add(XREAD_BLOCKING);
|
||||
RedisCommands.BLOCKING_COMMANDS.add(XREADGROUP_BLOCKING);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ByteRecord> xRead(StreamReadOptions readOptions, StreamOffset<byte[]>... streams) {
|
||||
Assert.notNull(readOptions, "ReadOptions must not be null!");
|
||||
Assert.notNull(streams, "StreamOffsets must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
|
||||
if (readOptions.getCount() != null && readOptions.getCount() > 0) {
|
||||
params.add("COUNT");
|
||||
params.add(readOptions.getCount());
|
||||
}
|
||||
|
||||
if (readOptions.getBlock() != null && readOptions.getBlock() > 0) {
|
||||
params.add("BLOCK");
|
||||
params.add(readOptions.getBlock());
|
||||
}
|
||||
|
||||
params.add("STREAMS");
|
||||
for (StreamOffset<byte[]> streamOffset : streams) {
|
||||
params.add(streamOffset.getKey());
|
||||
}
|
||||
|
||||
for (StreamOffset<byte[]> streamOffset : streams) {
|
||||
params.add(streamOffset.getOffset().getOffset());
|
||||
}
|
||||
|
||||
if (readOptions.getBlock() != null && readOptions.getBlock() > 0) {
|
||||
return connection.read(streams[0].getKey(), ByteArrayCodec.INSTANCE, XREAD_BLOCKING, params.toArray());
|
||||
}
|
||||
return connection.read(streams[0].getKey(), ByteArrayCodec.INSTANCE, XREAD, params.toArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ByteRecord> xReadGroup(Consumer consumer, StreamReadOptions readOptions, StreamOffset<byte[]>... streams) {
|
||||
Assert.notNull(readOptions, "Consumer must not be null!");
|
||||
Assert.notNull(readOptions, "ReadOptions must not be null!");
|
||||
Assert.notNull(streams, "StreamOffsets must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<>();
|
||||
|
||||
params.add("GROUP");
|
||||
params.add(consumer.getGroup());
|
||||
params.add(consumer.getName());
|
||||
|
||||
if (readOptions.getCount() != null && readOptions.getCount() > 0) {
|
||||
params.add("COUNT");
|
||||
params.add(readOptions.getCount());
|
||||
}
|
||||
|
||||
if (readOptions.getBlock() != null && readOptions.getBlock() > 0) {
|
||||
params.add("BLOCK");
|
||||
params.add(readOptions.getBlock());
|
||||
}
|
||||
|
||||
if (readOptions.isNoack()) {
|
||||
params.add("NOACK");
|
||||
}
|
||||
|
||||
params.add("STREAMS");
|
||||
for (StreamOffset<byte[]> streamOffset : streams) {
|
||||
params.add(streamOffset.getKey());
|
||||
}
|
||||
|
||||
for (StreamOffset<byte[]> streamOffset : streams) {
|
||||
params.add(streamOffset.getOffset().getOffset());
|
||||
}
|
||||
|
||||
if (readOptions.getBlock() != null && readOptions.getBlock() > 0) {
|
||||
return connection.write(streams[0].getKey(), ByteArrayCodec.INSTANCE, XREADGROUP_BLOCKING, params.toArray());
|
||||
}
|
||||
return connection.write(streams[0].getKey(), ByteArrayCodec.INSTANCE, XREADGROUP, params.toArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ByteRecord> xRevRange(byte[] key, Range<String> range, Limit limit) {
|
||||
return range(new RedisCommand<>("XREVRANGE",
|
||||
new ListMultiDecoder2(
|
||||
new ByteRecordReplayDecoder(key),
|
||||
new ObjectDecoder(new StreamIdDecoder()),
|
||||
new MapEntriesDecoder(new StreamObjectMapReplayDecoder()))),
|
||||
key, range, limit);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long xTrim(byte[] key, long count) {
|
||||
return xTrim(key, count, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long xTrim(byte[] key, long count, boolean approximateTrimming) {
|
||||
Assert.notNull(key, "Key must not be null!");
|
||||
Assert.notNull(count, "Count must not be null!");
|
||||
|
||||
List<Object> params = new ArrayList<>(4);
|
||||
params.add(key);
|
||||
params.add("MAXLEN");
|
||||
if (approximateTrimming) {
|
||||
params.add("~");
|
||||
}
|
||||
params.add(count);
|
||||
|
||||
return connection.write(key, StringCodec.INSTANCE, RedisCommands.XTRIM, params.toArray());
|
||||
}
|
||||
}
|
@ -0,0 +1,179 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.redisson.client.BaseRedisPubSubListener;
|
||||
import org.redisson.client.ChannelName;
|
||||
import org.redisson.client.codec.ByteArrayCodec;
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.protocol.pubsub.PubSubType;
|
||||
import org.redisson.command.CommandAsyncExecutor;
|
||||
import org.redisson.pubsub.PubSubConnectionEntry;
|
||||
import org.redisson.pubsub.PublishSubscribeService;
|
||||
import org.springframework.data.redis.connection.DefaultMessage;
|
||||
import org.springframework.data.redis.connection.MessageListener;
|
||||
import org.springframework.data.redis.connection.SubscriptionListener;
|
||||
import org.springframework.data.redis.connection.util.AbstractSubscription;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class RedissonSubscription extends AbstractSubscription {
|
||||
|
||||
private final CommandAsyncExecutor commandExecutor;
|
||||
private final PublishSubscribeService subscribeService;
|
||||
|
||||
public RedissonSubscription(CommandAsyncExecutor commandExecutor, PublishSubscribeService subscribeService, MessageListener listener) {
|
||||
super(listener, null, null);
|
||||
this.commandExecutor = commandExecutor;
|
||||
this.subscribeService = subscribeService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSubscribe(byte[]... channels) {
|
||||
List<CompletableFuture<?>> list = new ArrayList<>();
|
||||
Queue<byte[]> subscribed = new ConcurrentLinkedQueue<>();
|
||||
for (byte[] channel : channels) {
|
||||
if (subscribeService.getPubSubEntry(new ChannelName(channel)) != null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CompletableFuture<PubSubConnectionEntry> f = subscribeService.subscribe(ByteArrayCodec.INSTANCE, new ChannelName(channel), new BaseRedisPubSubListener() {
|
||||
@Override
|
||||
public void onMessage(CharSequence ch, Object message) {
|
||||
if (!Arrays.equals(((ChannelName) ch).getName(), channel)) {
|
||||
return;
|
||||
}
|
||||
|
||||
byte[] m = toBytes(message);
|
||||
DefaultMessage msg = new DefaultMessage(((ChannelName) ch).getName(), m);
|
||||
getListener().onMessage(msg, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean onStatus(PubSubType type, CharSequence ch) {
|
||||
if (!Arrays.equals(((ChannelName) ch).getName(), channel)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (getListener() instanceof SubscriptionListener) {
|
||||
subscribed.add(channel);
|
||||
}
|
||||
return super.onStatus(type, ch);
|
||||
}
|
||||
|
||||
});
|
||||
list.add(f);
|
||||
}
|
||||
for (CompletableFuture<?> future : list) {
|
||||
commandExecutor.get(future);
|
||||
}
|
||||
for (byte[] channel : subscribed) {
|
||||
((SubscriptionListener) getListener()).onChannelSubscribed(channel, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doUnsubscribe(boolean all, byte[]... channels) {
|
||||
for (byte[] channel : channels) {
|
||||
CompletableFuture<Codec> f = subscribeService.unsubscribe(new ChannelName(channel), PubSubType.UNSUBSCRIBE);
|
||||
if (getListener() instanceof SubscriptionListener) {
|
||||
f.whenComplete((r, e) -> {
|
||||
if (r != null) {
|
||||
((SubscriptionListener) getListener()).onChannelUnsubscribed(channel, 1);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doPsubscribe(byte[]... patterns) {
|
||||
List<CompletableFuture<?>> list = new ArrayList<>();
|
||||
Queue<byte[]> subscribed = new ConcurrentLinkedQueue<>();
|
||||
for (byte[] channel : patterns) {
|
||||
if (subscribeService.getPubSubEntry(new ChannelName(channel)) != null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CompletableFuture<Collection<PubSubConnectionEntry>> f = subscribeService.psubscribe(new ChannelName(channel), ByteArrayCodec.INSTANCE, new BaseRedisPubSubListener() {
|
||||
@Override
|
||||
public void onPatternMessage(CharSequence pattern, CharSequence ch, Object message) {
|
||||
if (!Arrays.equals(((ChannelName) pattern).getName(), channel)) {
|
||||
return;
|
||||
}
|
||||
|
||||
byte[] m = toBytes(message);
|
||||
DefaultMessage msg = new DefaultMessage(((ChannelName)ch).getName(), m);
|
||||
getListener().onMessage(msg, ((ChannelName)pattern).getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean onStatus(PubSubType type, CharSequence pattern) {
|
||||
if (!Arrays.equals(((ChannelName) pattern).getName(), channel)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (getListener() instanceof SubscriptionListener) {
|
||||
subscribed.add(channel);
|
||||
}
|
||||
return super.onStatus(type, pattern);
|
||||
}
|
||||
});
|
||||
list.add(f);
|
||||
}
|
||||
for (CompletableFuture<?> future : list) {
|
||||
commandExecutor.get(future);
|
||||
}
|
||||
for (byte[] channel : subscribed) {
|
||||
((SubscriptionListener) getListener()).onPatternSubscribed(channel, 1);
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] toBytes(Object message) {
|
||||
if (message instanceof String) {
|
||||
return ((String) message).getBytes();
|
||||
}
|
||||
return (byte[]) message;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doPUnsubscribe(boolean all, byte[]... patterns) {
|
||||
for (byte[] pattern : patterns) {
|
||||
CompletableFuture<Codec> f = subscribeService.unsubscribe(new ChannelName(pattern), PubSubType.PUNSUBSCRIBE);
|
||||
if (getListener() instanceof SubscriptionListener) {
|
||||
f.whenComplete((r, e) -> {
|
||||
if (r != null) {
|
||||
((SubscriptionListener) getListener()).onPatternUnsubscribed(pattern, 1);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
doUnsubscribe(false, getChannels().toArray(new byte[getChannels().size()][]));
|
||||
doPUnsubscribe(false, getPatterns().toArray(new byte[getPatterns().size()][]));
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,53 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.codec.DoubleCodec;
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
import org.redisson.client.protocol.decoder.MultiDecoder;
|
||||
import org.springframework.data.redis.connection.zset.DefaultTuple;
|
||||
import org.springframework.data.redis.connection.zset.Tuple;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class ScoredSortedListReplayDecoder implements MultiDecoder<List<Tuple>> {
|
||||
|
||||
@Override
|
||||
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state) {
|
||||
if (paramNum % 2 != 0) {
|
||||
return DoubleCodec.INSTANCE.getValueDecoder();
|
||||
}
|
||||
return MultiDecoder.super.getDecoder(codec, paramNum, state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Tuple> decode(List<Object> parts, State state) {
|
||||
List<Tuple> result = new ArrayList<Tuple>();
|
||||
for (int i = 0; i < parts.size(); i += 2) {
|
||||
result.add(new DefaultTuple((byte[])parts.get(i), ((Number)parts.get(i+1)).doubleValue()));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.codec.DoubleCodec;
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
import org.redisson.client.protocol.decoder.MultiDecoder;
|
||||
import org.springframework.data.redis.connection.zset.DefaultTuple;
|
||||
import org.springframework.data.redis.connection.zset.Tuple;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class ScoredSortedSetReplayDecoder implements MultiDecoder<Set<Tuple>> {
|
||||
|
||||
@Override
|
||||
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state) {
|
||||
if (paramNum % 2 != 0) {
|
||||
return DoubleCodec.INSTANCE.getValueDecoder();
|
||||
}
|
||||
return MultiDecoder.super.getDecoder(codec, paramNum, state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<Tuple> decode(List<Object> parts, State state) {
|
||||
Set<Tuple> result = new LinkedHashSet<Tuple>();
|
||||
for (int i = 0; i < parts.size(); i += 2) {
|
||||
result.add(new DefaultTuple((byte[])parts.get(i), ((Number)parts.get(i+1)).doubleValue()));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.redisson.client.protocol.convertor.Convertor;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class SecondsConvertor implements Convertor<Long> {
|
||||
|
||||
private final TimeUnit unit;
|
||||
private final TimeUnit source;
|
||||
|
||||
public SecondsConvertor(TimeUnit unit, TimeUnit source) {
|
||||
super();
|
||||
this.unit = unit;
|
||||
this.source = source;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long convert(Object obj) {
|
||||
return unit.convert((Long)obj, source);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,51 @@
|
||||
/**
|
||||
* Copyright (c) 2013-2022 Nikita Koksharov
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.redisson.client.codec.Codec;
|
||||
import org.redisson.client.handler.State;
|
||||
import org.redisson.client.protocol.Decoder;
|
||||
import org.redisson.client.protocol.decoder.MultiDecoder;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Nikita Koksharov
|
||||
*
|
||||
*/
|
||||
public class SetReplayDecoder<T> implements MultiDecoder<Set<T>> {
|
||||
|
||||
private final Decoder<Object> decoder;
|
||||
|
||||
public SetReplayDecoder(Decoder<Object> decoder) {
|
||||
super();
|
||||
this.decoder = decoder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state) {
|
||||
return decoder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<T> decode(List<Object> parts, State state) {
|
||||
return new LinkedHashSet(parts);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,86 @@
|
||||
package org.redisson;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.redisson.api.RedissonClient;
|
||||
import org.redisson.config.Config;
|
||||
|
||||
public abstract class BaseTest {
|
||||
|
||||
protected RedissonClient redisson;
|
||||
protected static RedissonClient defaultRedisson;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws IOException, InterruptedException {
|
||||
if (!RedissonRuntimeEnvironment.isTravis) {
|
||||
RedisRunner.startDefaultRedisServerInstance();
|
||||
defaultRedisson = createInstance();
|
||||
Runtime.getRuntime().addShutdownHook(new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
defaultRedisson.shutdown();
|
||||
try {
|
||||
RedisRunner.shutDownDefaultRedisServerInstance();
|
||||
} catch (InterruptedException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void before() throws IOException, InterruptedException {
|
||||
if (RedissonRuntimeEnvironment.isTravis) {
|
||||
RedisRunner.startDefaultRedisServerInstance();
|
||||
redisson = createInstance();
|
||||
} else {
|
||||
if (redisson == null) {
|
||||
redisson = defaultRedisson;
|
||||
}
|
||||
if (flushBetweenTests()) {
|
||||
redisson.getKeys().flushall();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() throws InterruptedException {
|
||||
if (RedissonRuntimeEnvironment.isTravis) {
|
||||
redisson.shutdown();
|
||||
RedisRunner.shutDownDefaultRedisServerInstance();
|
||||
}
|
||||
}
|
||||
|
||||
public static Config createConfig() {
|
||||
// String redisAddress = System.getProperty("redisAddress");
|
||||
// if (redisAddress == null) {
|
||||
// redisAddress = "127.0.0.1:6379";
|
||||
// }
|
||||
Config config = new Config();
|
||||
// config.setCodec(new MsgPackJacksonCodec());
|
||||
// config.useSentinelServers().setMasterName("mymaster").addSentinelAddress("127.0.0.1:26379", "127.0.0.1:26389");
|
||||
// config.useClusterServers().addNodeAddress("127.0.0.1:7004", "127.0.0.1:7001", "127.0.0.1:7000");
|
||||
config.useSingleServer()
|
||||
.setAddress(RedisRunner.getDefaultRedisServerBindAddressAndPort());
|
||||
// .setPassword("mypass1");
|
||||
// config.useMasterSlaveConnection()
|
||||
// .setMasterAddress("127.0.0.1:6379")
|
||||
// .addSlaveAddress("127.0.0.1:6399")
|
||||
// .addSlaveAddress("127.0.0.1:6389");
|
||||
return config;
|
||||
}
|
||||
|
||||
public static RedissonClient createInstance() {
|
||||
Config config = createConfig();
|
||||
return Redisson.create(config);
|
||||
}
|
||||
|
||||
protected boolean flushBetweenTests() {
|
||||
return true;
|
||||
}
|
||||
}
|
@ -0,0 +1,156 @@
|
||||
package org.redisson;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.math.BigInteger;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import org.redisson.misc.BiHashMap;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Rui Gu (https://github.com/jackygurui)
|
||||
*/
|
||||
public class ClusterRunner {
|
||||
|
||||
private final LinkedHashMap<RedisRunner, String> nodes = new LinkedHashMap<>();
|
||||
private final LinkedHashMap<String, String> slaveMasters = new LinkedHashMap<>();
|
||||
|
||||
public ClusterRunner addNode(RedisRunner runner) {
|
||||
nodes.putIfAbsent(runner, getRandomId());
|
||||
if (!runner.hasOption(RedisRunner.REDIS_OPTIONS.CLUSTER_ENABLED)) {
|
||||
runner.clusterEnabled(true);
|
||||
}
|
||||
if (!runner.hasOption(RedisRunner.REDIS_OPTIONS.CLUSTER_NODE_TIMEOUT)) {
|
||||
runner.clusterNodeTimeout(5000);
|
||||
}
|
||||
if (!runner.hasOption(RedisRunner.REDIS_OPTIONS.PORT)) {
|
||||
runner.randomPort(1);
|
||||
runner.port(RedisRunner.findFreePort());
|
||||
}
|
||||
if (!runner.hasOption(RedisRunner.REDIS_OPTIONS.BIND)) {
|
||||
runner.bind("127.0.0.1");
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterRunner addNode(RedisRunner master, RedisRunner... slaves) {
|
||||
addNode(master);
|
||||
for (RedisRunner slave : slaves) {
|
||||
addNode(slave);
|
||||
slaveMasters.put(nodes.get(slave), nodes.get(master));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public synchronized ClusterProcesses run() throws IOException, InterruptedException, RedisRunner.FailedToStartRedisException {
|
||||
BiHashMap<String, RedisRunner.RedisProcess> processes = new BiHashMap<>();
|
||||
for (RedisRunner runner : nodes.keySet()) {
|
||||
List<String> options = getClusterConfig(runner);
|
||||
String confFile = runner.dir() + File.separator + nodes.get(runner) + ".conf";
|
||||
System.out.println("WRITING CONFIG: for " + nodes.get(runner));
|
||||
try (PrintWriter printer = new PrintWriter(new FileWriter(confFile))) {
|
||||
options.stream().forEach((line) -> {
|
||||
printer.println(line);
|
||||
System.out.println(line);
|
||||
});
|
||||
}
|
||||
processes.put(nodes.get(runner), runner.clusterConfigFile(confFile).run());
|
||||
}
|
||||
Thread.sleep(1000);
|
||||
for (RedisRunner.RedisProcess process : processes.valueSet()) {
|
||||
if (!process.isAlive()) {
|
||||
throw new RedisRunner.FailedToStartRedisException();
|
||||
}
|
||||
}
|
||||
return new ClusterProcesses(processes);
|
||||
}
|
||||
|
||||
private List<String> getClusterConfig(RedisRunner runner) {
|
||||
String me = runner.getInitialBindAddr() + ":" + runner.getPort();
|
||||
List<String> nodeConfig = new ArrayList<>();
|
||||
int c = 0;
|
||||
for (RedisRunner node : nodes.keySet()) {
|
||||
String nodeId = nodes.get(node);
|
||||
StringBuilder sb = new StringBuilder();
|
||||
String nodeAddr = node.getInitialBindAddr() + ":" + node.getPort();
|
||||
sb.append(nodeId).append(" ");
|
||||
sb.append(nodeAddr).append(" ");
|
||||
sb.append(me.equals(nodeAddr)
|
||||
? "myself,"
|
||||
: "");
|
||||
boolean isMaster = !slaveMasters.containsKey(nodeId);
|
||||
if (isMaster) {
|
||||
sb.append("master -");
|
||||
} else {
|
||||
sb.append("slave ").append(slaveMasters.get(nodeId));
|
||||
}
|
||||
sb.append(" ");
|
||||
sb.append("0").append(" ");
|
||||
sb.append(me.equals(nodeAddr)
|
||||
? "0"
|
||||
: "1").append(" ");
|
||||
sb.append(c + 1).append(" ");
|
||||
sb.append("connected ");
|
||||
if (isMaster) {
|
||||
sb.append(getSlots(c, nodes.size() - slaveMasters.size()));
|
||||
c++;
|
||||
}
|
||||
nodeConfig.add(sb.toString());
|
||||
}
|
||||
nodeConfig.add("vars currentEpoch 0 lastVoteEpoch 0");
|
||||
return nodeConfig;
|
||||
}
|
||||
|
||||
private static String getSlots(int index, int groupNum) {
|
||||
final double t = 16383;
|
||||
int start = index == 0 ? 0 : (int) (t / groupNum * index);
|
||||
int end = index == groupNum - 1 ? (int) t : (int) (t / groupNum * (index + 1)) - 1;
|
||||
return start + "-" + end;
|
||||
}
|
||||
|
||||
private static String getRandomId() {
|
||||
final SecureRandom r = new SecureRandom();
|
||||
return new BigInteger(160, r).toString(16);
|
||||
}
|
||||
|
||||
public static class ClusterProcesses {
|
||||
private final BiHashMap<String, RedisRunner.RedisProcess> processes;
|
||||
|
||||
private ClusterProcesses(BiHashMap<String, RedisRunner.RedisProcess> processes) {
|
||||
this.processes = processes;
|
||||
}
|
||||
|
||||
public RedisRunner.RedisProcess getProcess(String nodeId) {
|
||||
return processes.get(nodeId);
|
||||
}
|
||||
|
||||
public String getNodeId(RedisRunner.RedisProcess process) {
|
||||
return processes.reverseGet(process);
|
||||
}
|
||||
|
||||
public Set<RedisRunner.RedisProcess> getNodes() {
|
||||
return processes.valueSet();
|
||||
}
|
||||
|
||||
public Set<String> getNodeIds() {
|
||||
return processes.keySet();
|
||||
}
|
||||
|
||||
public synchronized Map<String, Integer> shutdown() {
|
||||
return processes
|
||||
.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(
|
||||
e -> e.getKey(),
|
||||
e -> e.getValue().stop()));
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,58 @@
|
||||
package org.redisson;
|
||||
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Rui Gu (https://github.com/jackygurui)
|
||||
*/
|
||||
public class RedisVersion implements Comparable<RedisVersion>{
|
||||
|
||||
private final String fullVersion;
|
||||
private final Integer majorVersion;
|
||||
private final Integer minorVersion;
|
||||
private final Integer patchVersion;
|
||||
|
||||
public RedisVersion(String fullVersion) {
|
||||
this.fullVersion = fullVersion;
|
||||
Matcher matcher = Pattern.compile("^([\\d]+)\\.([\\d]+)\\.([\\d]+)$").matcher(fullVersion);
|
||||
matcher.find();
|
||||
majorVersion = Integer.parseInt(matcher.group(1));
|
||||
minorVersion = Integer.parseInt(matcher.group(2));
|
||||
patchVersion = Integer.parseInt(matcher.group(3));
|
||||
}
|
||||
|
||||
public String getFullVersion() {
|
||||
return fullVersion;
|
||||
}
|
||||
|
||||
public int getMajorVersion() {
|
||||
return majorVersion;
|
||||
}
|
||||
|
||||
public int getMinorVersion() {
|
||||
return minorVersion;
|
||||
}
|
||||
|
||||
public int getPatchVersion() {
|
||||
return patchVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(RedisVersion o) {
|
||||
int ma = this.majorVersion.compareTo(o.majorVersion);
|
||||
int mi = this.minorVersion.compareTo(o.minorVersion);
|
||||
int pa = this.patchVersion.compareTo(o.patchVersion);
|
||||
return ma != 0 ? ma : mi != 0 ? mi : pa;
|
||||
}
|
||||
|
||||
public int compareTo(String redisVersion) {
|
||||
return this.compareTo(new RedisVersion(redisVersion));
|
||||
}
|
||||
|
||||
public static int compareTo(String redisVersion1, String redisVersion2) {
|
||||
return new RedisVersion(redisVersion1).compareTo(redisVersion2);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
package org.redisson;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Rui Gu (https://github.com/jackygurui)
|
||||
*/
|
||||
public class RedissonRuntimeEnvironment {
|
||||
|
||||
public static final boolean isTravis = "true".equalsIgnoreCase(System.getProperty("travisEnv"));
|
||||
public static final String redisBinaryPath = System.getProperty("redisBinary", "C:\\redis\\redis-server.exe");
|
||||
public static final String tempDir = System.getProperty("java.io.tmpdir");
|
||||
public static final String OS;
|
||||
public static final boolean isWindows;
|
||||
|
||||
static {
|
||||
OS = System.getProperty("os.name", "generic");
|
||||
isWindows = OS.toLowerCase(Locale.ENGLISH).contains("win");
|
||||
}
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.redisson.BaseTest;
|
||||
import org.springframework.data.redis.connection.RedisConnection;
|
||||
|
||||
public abstract class BaseConnectionTest extends BaseTest {
|
||||
|
||||
RedisConnection connection;
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
connection = new RedissonConnection(redisson);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,166 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.redisson.ClusterRunner;
|
||||
import org.redisson.ClusterRunner.ClusterProcesses;
|
||||
import org.redisson.RedisRunner;
|
||||
import org.redisson.RedisRunner.FailedToStartRedisException;
|
||||
import org.redisson.Redisson;
|
||||
import org.redisson.api.RedissonClient;
|
||||
import org.redisson.config.Config;
|
||||
import org.redisson.config.SubscriptionMode;
|
||||
import org.redisson.connection.balancer.RandomLoadBalancer;
|
||||
import org.springframework.dao.InvalidDataAccessResourceUsageException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.assertj.core.api.Assertions.*;
|
||||
import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
||||
import static org.redisson.connection.MasterSlaveConnectionManager.MAX_SLOT;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class RedissonClusterConnectionRenameTest {
|
||||
|
||||
@Parameterized.Parameters(name= "{index} - same slot = {0}")
|
||||
public static Iterable<Object[]> data() {
|
||||
return Arrays.asList(new Object[][] {
|
||||
{false},
|
||||
{true}
|
||||
});
|
||||
}
|
||||
|
||||
@Parameterized.Parameter(0)
|
||||
public boolean sameSlot;
|
||||
|
||||
static RedissonClient redisson;
|
||||
static RedissonClusterConnection connection;
|
||||
static ClusterProcesses process;
|
||||
|
||||
byte[] originalKey = "key".getBytes();
|
||||
byte[] newKey = "unset".getBytes();
|
||||
byte[] value = "value".getBytes();
|
||||
|
||||
@BeforeClass
|
||||
public static void before() throws FailedToStartRedisException, IOException, InterruptedException {
|
||||
RedisRunner master1 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner master2 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner master3 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner slave1 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner slave2 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner slave3 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
|
||||
|
||||
ClusterRunner clusterRunner = new ClusterRunner()
|
||||
.addNode(master1, slave1)
|
||||
.addNode(master2, slave2)
|
||||
.addNode(master3, slave3);
|
||||
process = clusterRunner.run();
|
||||
|
||||
Config config = new Config();
|
||||
config.useClusterServers()
|
||||
.setSubscriptionMode(SubscriptionMode.SLAVE)
|
||||
.setLoadBalancer(new RandomLoadBalancer())
|
||||
.addNodeAddress(process.getNodes().stream().findAny().get().getRedisServerAddressAndPort());
|
||||
|
||||
redisson = Redisson.create(config);
|
||||
connection = new RedissonClusterConnection(redisson);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void after() {
|
||||
process.shutdown();
|
||||
redisson.shutdown();
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanup() {
|
||||
connection.del(originalKey);
|
||||
connection.del(newKey);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRename() {
|
||||
connection.set(originalKey, value);
|
||||
connection.expire(originalKey, 1000);
|
||||
|
||||
Integer originalSlot = connection.clusterGetSlotForKey(originalKey);
|
||||
newKey = getNewKeyForSlot(originalKey, getTargetSlot(originalSlot));
|
||||
|
||||
connection.rename(originalKey, newKey);
|
||||
|
||||
assertThat(connection.get(newKey)).isEqualTo(value);
|
||||
assertThat(connection.ttl(newKey)).isGreaterThan(0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRename_pipeline() {
|
||||
connection.set(originalKey, value);
|
||||
|
||||
Integer originalSlot = connection.clusterGetSlotForKey(originalKey);
|
||||
newKey = getNewKeyForSlot(originalKey, getTargetSlot(originalSlot));
|
||||
|
||||
connection.openPipeline();
|
||||
assertThatThrownBy(() -> connection.rename(originalKey, newKey)).isInstanceOf(InvalidDataAccessResourceUsageException.class);
|
||||
connection.closePipeline();
|
||||
}
|
||||
|
||||
protected byte[] getNewKeyForSlot(byte[] originalKey, Integer targetSlot) {
|
||||
int counter = 0;
|
||||
|
||||
byte[] newKey = (new String(originalKey) + counter).getBytes();
|
||||
|
||||
Integer newKeySlot = connection.clusterGetSlotForKey(newKey);
|
||||
|
||||
while(!newKeySlot.equals(targetSlot)) {
|
||||
counter++;
|
||||
newKey = (new String(originalKey) + counter).getBytes();
|
||||
newKeySlot = connection.clusterGetSlotForKey(newKey);
|
||||
}
|
||||
|
||||
return newKey;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameNX() {
|
||||
connection.set(originalKey, value);
|
||||
connection.expire(originalKey, 1000);
|
||||
|
||||
Integer originalSlot = connection.clusterGetSlotForKey(originalKey);
|
||||
newKey = getNewKeyForSlot(originalKey, getTargetSlot(originalSlot));
|
||||
|
||||
Boolean result = connection.renameNX(originalKey, newKey);
|
||||
|
||||
assertThat(connection.get(newKey)).isEqualTo(value);
|
||||
assertThat(connection.ttl(newKey)).isGreaterThan(0);
|
||||
assertThat(result).isTrue();
|
||||
|
||||
connection.set(originalKey, value);
|
||||
|
||||
result = connection.renameNX(originalKey, newKey);
|
||||
|
||||
assertThat(result).isFalse();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameNX_pipeline() {
|
||||
connection.set(originalKey, value);
|
||||
|
||||
Integer originalSlot = connection.clusterGetSlotForKey(originalKey);
|
||||
newKey = getNewKeyForSlot(originalKey, getTargetSlot(originalSlot));
|
||||
|
||||
connection.openPipeline();
|
||||
assertThatThrownBy(() -> connection.renameNX(originalKey, newKey)).isInstanceOf(InvalidDataAccessResourceUsageException.class);
|
||||
connection.closePipeline();
|
||||
}
|
||||
|
||||
private Integer getTargetSlot(Integer originalSlot) {
|
||||
return sameSlot ? originalSlot : MAX_SLOT - originalSlot - 1;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,293 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.redisson.ClusterRunner;
|
||||
import org.redisson.ClusterRunner.ClusterProcesses;
|
||||
import org.redisson.RedisRunner;
|
||||
import org.redisson.RedisRunner.FailedToStartRedisException;
|
||||
import org.redisson.Redisson;
|
||||
import org.redisson.api.RedissonClient;
|
||||
import org.redisson.config.Config;
|
||||
import org.redisson.config.SubscriptionMode;
|
||||
import org.redisson.connection.MasterSlaveConnectionManager;
|
||||
import org.redisson.connection.balancer.RandomLoadBalancer;
|
||||
import org.springframework.data.redis.connection.ClusterInfo;
|
||||
import org.springframework.data.redis.connection.RedisClusterNode;
|
||||
import org.springframework.data.redis.connection.RedisConnectionFactory;
|
||||
import org.springframework.data.redis.connection.RedisNode.NodeType;
|
||||
import org.springframework.data.redis.core.Cursor;
|
||||
import org.springframework.data.redis.core.ScanOptions;
|
||||
import org.springframework.data.redis.core.types.RedisClientInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.*;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class RedissonClusterConnectionTest {
|
||||
|
||||
static RedissonClient redisson;
|
||||
static RedissonClusterConnection connection;
|
||||
static ClusterProcesses process;
|
||||
|
||||
@BeforeClass
|
||||
public static void before() throws FailedToStartRedisException, IOException, InterruptedException {
|
||||
RedisRunner master1 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner master2 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner master3 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner slave1 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner slave2 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner slave3 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
|
||||
|
||||
ClusterRunner clusterRunner = new ClusterRunner()
|
||||
.addNode(master1, slave1)
|
||||
.addNode(master2, slave2)
|
||||
.addNode(master3, slave3);
|
||||
process = clusterRunner.run();
|
||||
|
||||
Config config = new Config();
|
||||
config.useClusterServers()
|
||||
.setSubscriptionMode(SubscriptionMode.SLAVE)
|
||||
.setLoadBalancer(new RandomLoadBalancer())
|
||||
.addNodeAddress(process.getNodes().stream().findAny().get().getRedisServerAddressAndPort());
|
||||
|
||||
redisson = Redisson.create(config);
|
||||
connection = new RedissonClusterConnection(redisson);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void after() {
|
||||
process.shutdown();
|
||||
redisson.shutdown();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDel() {
|
||||
List<byte[]> keys = new ArrayList<>();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
byte[] key = ("test" + i).getBytes();
|
||||
keys.add(key);
|
||||
connection.set(key, ("test" + i).getBytes());
|
||||
}
|
||||
assertThat(connection.del(keys.toArray(new byte[0][]))).isEqualTo(10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testScan() {
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
connection.set(("" + i).getBytes(StandardCharsets.UTF_8), ("" + i).getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
Cursor<byte[]> b = connection.scan(ScanOptions.scanOptions().build());
|
||||
int counter = 0;
|
||||
while (b.hasNext()) {
|
||||
b.next();
|
||||
counter++;
|
||||
}
|
||||
assertThat(counter).isEqualTo(1000);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMSet() {
|
||||
Map<byte[], byte[]> map = new HashMap<>();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
map.put(("test" + i).getBytes(), ("test" + i*100).getBytes());
|
||||
}
|
||||
connection.mSet(map);
|
||||
for (Map.Entry<byte[], byte[]> entry : map.entrySet()) {
|
||||
assertThat(connection.get(entry.getKey())).isEqualTo(entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMGet() {
|
||||
Map<byte[], byte[]> map = new HashMap<>();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
map.put(("test" + i).getBytes(), ("test" + i*100).getBytes());
|
||||
}
|
||||
connection.mSet(map);
|
||||
List<byte[]> r = connection.mGet(map.keySet().toArray(new byte[0][]));
|
||||
assertThat(r).containsExactly(map.values().toArray(new byte[0][]));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterGetNodes() {
|
||||
Iterable<RedisClusterNode> nodes = connection.clusterGetNodes();
|
||||
assertThat(nodes).hasSize(6);
|
||||
for (RedisClusterNode redisClusterNode : nodes) {
|
||||
assertThat(redisClusterNode.getLinkState()).isNotNull();
|
||||
assertThat(redisClusterNode.getFlags()).isNotEmpty();
|
||||
assertThat(redisClusterNode.getHost()).isNotNull();
|
||||
assertThat(redisClusterNode.getPort()).isNotNull();
|
||||
assertThat(redisClusterNode.getId()).isNotNull();
|
||||
assertThat(redisClusterNode.getType()).isNotNull();
|
||||
if (redisClusterNode.getType() == NodeType.MASTER) {
|
||||
assertThat(redisClusterNode.getSlotRange().getSlots()).isNotEmpty();
|
||||
} else {
|
||||
assertThat(redisClusterNode.getMasterId()).isNotNull();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterGetNodesMaster() {
|
||||
Iterable<RedisClusterNode> nodes = connection.clusterGetNodes();
|
||||
for (RedisClusterNode redisClusterNode : nodes) {
|
||||
if (redisClusterNode.getType() == NodeType.MASTER) {
|
||||
Collection<RedisClusterNode> slaves = connection.clusterGetReplicas(redisClusterNode);
|
||||
assertThat(slaves).hasSize(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterGetMasterSlaveMap() {
|
||||
Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterReplicaMap();
|
||||
assertThat(map).hasSize(3);
|
||||
for (Collection<RedisClusterNode> slaves : map.values()) {
|
||||
assertThat(slaves).hasSize(1);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterGetSlotForKey() {
|
||||
Integer slot = connection.clusterGetSlotForKey("123".getBytes());
|
||||
assertThat(slot).isNotNull();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterGetNodeForSlot() {
|
||||
RedisClusterNode node1 = connection.clusterGetNodeForSlot(1);
|
||||
RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000);
|
||||
assertThat(node1.getId()).isNotEqualTo(node2.getId());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterGetNodeForKey() {
|
||||
RedisClusterNode node = connection.clusterGetNodeForKey("123".getBytes());
|
||||
assertThat(node).isNotNull();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterGetClusterInfo() {
|
||||
ClusterInfo info = connection.clusterGetClusterInfo();
|
||||
assertThat(info.getSlotsFail()).isEqualTo(0);
|
||||
assertThat(info.getSlotsOk()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
|
||||
assertThat(info.getSlotsAssigned()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterAddRemoveSlots() {
|
||||
RedisClusterNode master = getFirstMaster();
|
||||
Integer slot = master.getSlotRange().getSlots().iterator().next();
|
||||
connection.clusterDeleteSlots(master, slot);
|
||||
connection.clusterAddSlots(master, slot);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterCountKeysInSlot() {
|
||||
Long t = connection.clusterCountKeysInSlot(1);
|
||||
assertThat(t).isZero();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterMeetForget() {
|
||||
RedisClusterNode master = getFirstMaster();
|
||||
connection.clusterForget(master);
|
||||
connection.clusterMeet(master);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterGetKeysInSlot() {
|
||||
List<byte[]> keys = connection.clusterGetKeysInSlot(12, 10);
|
||||
assertThat(keys).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterPing() {
|
||||
RedisClusterNode master = getFirstMaster();
|
||||
String res = connection.ping(master);
|
||||
assertThat(res).isEqualTo("PONG");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDbSize() {
|
||||
RedisClusterNode master = getFirstMaster();
|
||||
Long size = connection.dbSize(master);
|
||||
assertThat(size).isZero();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInfo() {
|
||||
RedisClusterNode master = getFirstMaster();
|
||||
Properties info = connection.info(master);
|
||||
assertThat(info.size()).isGreaterThan(10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDelPipeline() {
|
||||
byte[] k = "key".getBytes();
|
||||
byte[] v = "val".getBytes();
|
||||
connection.set(k, v);
|
||||
|
||||
connection.openPipeline();
|
||||
connection.get(k);
|
||||
connection.del(k);
|
||||
List<Object> results = connection.closePipeline();
|
||||
byte[] val = (byte[])results.get(0);
|
||||
assertThat(val).isEqualTo(v);
|
||||
Long res = (Long) results.get(1);
|
||||
assertThat(res).isEqualTo(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResetConfigStats() {
|
||||
RedisClusterNode master = getFirstMaster();
|
||||
connection.resetConfigStats(master);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTime() {
|
||||
RedisClusterNode master = getFirstMaster();
|
||||
Long time = connection.time(master);
|
||||
assertThat(time).isGreaterThan(1000);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetClientList() {
|
||||
RedisClusterNode master = getFirstMaster();
|
||||
List<RedisClientInfo> list = connection.getClientList(master);
|
||||
assertThat(list.size()).isGreaterThan(10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetConfig() {
|
||||
RedisClusterNode master = getFirstMaster();
|
||||
connection.setConfig(master, "timeout", "10");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetConfig() {
|
||||
RedisClusterNode master = getFirstMaster();
|
||||
Properties config = connection.getConfig(master, "*");
|
||||
assertThat(config.size()).isGreaterThan(20);
|
||||
}
|
||||
|
||||
protected RedisClusterNode getFirstMaster() {
|
||||
Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterReplicaMap();
|
||||
RedisClusterNode master = map.keySet().iterator().next();
|
||||
return master;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConnectionFactoryReturnsClusterConnection() {
|
||||
RedisConnectionFactory connectionFactory = new RedissonConnectionFactory(redisson);
|
||||
|
||||
assertThat(connectionFactory.getConnection()).isInstanceOf(RedissonClusterConnection.class);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,125 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.springframework.data.geo.Circle;
|
||||
import org.springframework.data.geo.GeoResults;
|
||||
import org.springframework.data.geo.Point;
|
||||
import org.springframework.data.redis.connection.RedisGeoCommands;
|
||||
import org.springframework.data.redis.connection.RedisStringCommands.SetOption;
|
||||
import org.springframework.data.redis.connection.RedisZSetCommands;
|
||||
import org.springframework.data.redis.connection.zset.Tuple;
|
||||
import org.springframework.data.redis.core.Cursor;
|
||||
import org.springframework.data.redis.core.RedisTemplate;
|
||||
import org.springframework.data.redis.core.ScanOptions;
|
||||
import org.springframework.data.redis.core.SetOperations;
|
||||
import org.springframework.data.redis.core.types.Expiration;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
public class RedissonConnectionTest extends BaseConnectionTest {
|
||||
|
||||
@Test
|
||||
public void testExecute() {
|
||||
Long s = (Long) connection.execute("ttl", "key".getBytes());
|
||||
assertThat(s).isEqualTo(-2);
|
||||
connection.execute("flushDb");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRandomMembers() {
|
||||
RedisTemplate<String, Integer> redisTemplate = new RedisTemplate<>();
|
||||
redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson));
|
||||
redisTemplate.afterPropertiesSet();
|
||||
|
||||
|
||||
SetOperations<String, Integer> ops = redisTemplate.opsForSet();
|
||||
ops.add("val", 1, 2, 3, 4);
|
||||
Set<Integer> values = redisTemplate.opsForSet().distinctRandomMembers("val", 1L);
|
||||
assertThat(values).containsAnyOf(1, 2, 3, 4);
|
||||
|
||||
Integer v = redisTemplate.opsForSet().randomMember("val");
|
||||
assertThat(v).isNotNull();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRangeByLex() {
|
||||
RedisTemplate<String, String> redisTemplate = new RedisTemplate<>();
|
||||
redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson));
|
||||
redisTemplate.afterPropertiesSet();
|
||||
|
||||
RedisZSetCommands.Range range = new RedisZSetCommands.Range();
|
||||
range.lt("c");
|
||||
Set<String> zSetValue = redisTemplate.opsForZSet().rangeByLex("val", range);
|
||||
assertThat(zSetValue).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGeo() {
|
||||
RedisTemplate<String, String> redisTemplate = new RedisTemplate<>();
|
||||
redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson));
|
||||
redisTemplate.afterPropertiesSet();
|
||||
|
||||
String key = "test_geo_key";
|
||||
Point point = new Point(116.401001, 40.119499);
|
||||
redisTemplate.opsForGeo().add(key, point, "a");
|
||||
|
||||
point = new Point(111.545998, 36.133499);
|
||||
redisTemplate.opsForGeo().add(key, point, "b");
|
||||
|
||||
point = new Point(111.483002, 36.030998);
|
||||
redisTemplate.opsForGeo().add(key, point, "c");
|
||||
Circle within = new Circle(116.401001, 40.119499, 80000);
|
||||
RedisGeoCommands.GeoRadiusCommandArgs args = RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs().includeCoordinates();
|
||||
GeoResults<RedisGeoCommands.GeoLocation<String>> res = redisTemplate.opsForGeo().radius(key, within, args);
|
||||
assertThat(res.getContent().get(0).getContent().getName()).isEqualTo("a");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testZSet() {
|
||||
connection.zAdd(new byte[] {1}, -1, new byte[] {1});
|
||||
connection.zAdd(new byte[] {1}, 2, new byte[] {2});
|
||||
connection.zAdd(new byte[] {1}, 10, new byte[] {3});
|
||||
|
||||
assertThat(connection.zRangeByScore(new byte[] {1}, Double.NEGATIVE_INFINITY, 5))
|
||||
.containsOnly(new byte[] {1}, new byte[] {2});
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEcho() {
|
||||
assertThat(connection.echo("test".getBytes())).isEqualTo("test".getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetGet() {
|
||||
connection.set("key".getBytes(), "value".getBytes());
|
||||
assertThat(connection.get("key".getBytes())).isEqualTo("value".getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetExpiration() {
|
||||
assertThat(connection.set("key".getBytes(), "value".getBytes(), Expiration.milliseconds(111122), SetOption.SET_IF_ABSENT)).isTrue();
|
||||
assertThat(connection.get("key".getBytes())).isEqualTo("value".getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHSetGet() {
|
||||
assertThat(connection.hSet("key".getBytes(), "field".getBytes(), "value".getBytes())).isTrue();
|
||||
assertThat(connection.hGet("key".getBytes(), "field".getBytes())).isEqualTo("value".getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testZScan() {
|
||||
connection.zAdd("key".getBytes(), 1, "value1".getBytes());
|
||||
connection.zAdd("key".getBytes(), 2, "value2".getBytes());
|
||||
|
||||
Cursor<Tuple> t = connection.zScan("key".getBytes(), ScanOptions.scanOptions().build());
|
||||
assertThat(t.hasNext()).isTrue();
|
||||
assertThat(t.next().getValue()).isEqualTo("value1".getBytes());
|
||||
assertThat(t.hasNext()).isTrue();
|
||||
assertThat(t.next().getValue()).isEqualTo("value2".getBytes());
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,96 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import static org.assertj.core.api.Assertions.*;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.redisson.BaseTest;
|
||||
import org.springframework.dao.DataAccessException;
|
||||
import org.springframework.data.redis.core.RedisOperations;
|
||||
import org.springframework.data.redis.core.SessionCallback;
|
||||
import org.springframework.data.redis.core.StringRedisTemplate;
|
||||
import org.springframework.data.redis.core.ValueOperations;
|
||||
|
||||
public class RedissonMultiConnectionTest extends BaseConnectionTest {
|
||||
|
||||
@Test
|
||||
public void testBroken() throws InterruptedException {
|
||||
StringRedisTemplate stringRedisTemplate = new StringRedisTemplate(new RedissonConnectionFactory(redisson));
|
||||
ExecutorService e = Executors.newFixedThreadPool(32);
|
||||
AtomicBoolean hasErrors = new AtomicBoolean();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
e.submit(() -> {
|
||||
stringRedisTemplate.execute(new SessionCallback<Void>() {
|
||||
@Override
|
||||
public Void execute(RedisOperations operations) throws DataAccessException {
|
||||
try {
|
||||
ValueOperations<String, String> valueOps = operations.opsForValue();
|
||||
operations.multi();
|
||||
valueOps.set("test3", "value");
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
hasErrors.set(true);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
stringRedisTemplate.execute(new SessionCallback<Void>() {
|
||||
@Override
|
||||
public Void execute(RedisOperations operations) throws DataAccessException {
|
||||
try {
|
||||
ValueOperations<String, String> valueOps = operations.opsForValue();
|
||||
valueOps.set("test1", "value");
|
||||
assertThat(valueOps.get("test1")).isEqualTo("value");
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
hasErrors.set(true);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
e.shutdown();
|
||||
e.awaitTermination(1, TimeUnit.MINUTES);
|
||||
assertThat(hasErrors).isFalse();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEcho() {
|
||||
RedissonConnection connection = new RedissonConnection(redisson);
|
||||
connection.multi();
|
||||
assertThat(connection.echo("test".getBytes())).isNull();
|
||||
assertThat(connection.exec().iterator().next()).isEqualTo("test".getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetGet() {
|
||||
RedissonConnection connection = new RedissonConnection(redisson);
|
||||
connection.multi();
|
||||
assertThat(connection.isQueueing()).isTrue();
|
||||
connection.set("key".getBytes(), "value".getBytes());
|
||||
assertThat(connection.get("key".getBytes())).isNull();
|
||||
|
||||
List<Object> result = connection.exec();
|
||||
assertThat(connection.isQueueing()).isFalse();
|
||||
assertThat(result.get(0)).isEqualTo("value".getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHSetGet() {
|
||||
RedissonConnection connection = new RedissonConnection(redisson);
|
||||
connection.multi();
|
||||
assertThat(connection.hSet("key".getBytes(), "field".getBytes(), "value".getBytes())).isNull();
|
||||
assertThat(connection.hGet("key".getBytes(), "field".getBytes())).isNull();
|
||||
|
||||
List<Object> result = connection.exec();
|
||||
assertThat((Boolean)result.get(0)).isTrue();
|
||||
assertThat(result.get(1)).isEqualTo("value".getBytes());
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,64 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import static org.assertj.core.api.Assertions.*;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.redisson.BaseTest;
|
||||
|
||||
public class RedissonPipelineConnectionTest extends BaseConnectionTest {
|
||||
|
||||
@Test
|
||||
public void testDel() {
|
||||
RedissonConnection connection = new RedissonConnection(redisson);
|
||||
byte[] key = "my_key".getBytes();
|
||||
byte[] value = "my_value".getBytes();
|
||||
connection.set(key, value);
|
||||
|
||||
connection.openPipeline();
|
||||
connection.get(key);
|
||||
connection.del(key);
|
||||
|
||||
List<Object> results = connection.closePipeline();
|
||||
byte[] val = (byte[])results.get(0);
|
||||
assertThat(val).isEqualTo(value);
|
||||
Long res = (Long) results.get(1);
|
||||
assertThat(res).isEqualTo(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEcho() {
|
||||
RedissonConnection connection = new RedissonConnection(redisson);
|
||||
connection.openPipeline();
|
||||
assertThat(connection.echo("test".getBytes())).isNull();
|
||||
assertThat(connection.closePipeline().iterator().next()).isEqualTo("test".getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetGet() {
|
||||
RedissonConnection connection = new RedissonConnection(redisson);
|
||||
connection.openPipeline();
|
||||
assertThat(connection.isPipelined()).isTrue();
|
||||
connection.set("key".getBytes(), "value".getBytes());
|
||||
assertThat(connection.get("key".getBytes())).isNull();
|
||||
|
||||
List<Object> result = connection.closePipeline();
|
||||
assertThat(connection.isPipelined()).isFalse();
|
||||
assertThat(result.get(0)).isEqualTo("value".getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHSetGet() {
|
||||
RedissonConnection connection = new RedissonConnection(redisson);
|
||||
connection.openPipeline();
|
||||
assertThat(connection.hSet("key".getBytes(), "field".getBytes(), "value".getBytes())).isNull();
|
||||
assertThat(connection.hGet("key".getBytes(), "field".getBytes())).isNull();
|
||||
|
||||
List<Object> result = connection.closePipeline();
|
||||
assertThat((Boolean)result.get(0)).isTrue();
|
||||
assertThat(result.get(1)).isEqualTo("value".getBytes());
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,193 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.redisson.*;
|
||||
import org.redisson.ClusterRunner.ClusterProcesses;
|
||||
import org.redisson.RedisRunner.FailedToStartRedisException;
|
||||
import org.redisson.api.RedissonClient;
|
||||
import org.redisson.client.codec.StringCodec;
|
||||
import org.redisson.client.protocol.RedisCommands;
|
||||
import org.redisson.config.Config;
|
||||
import org.redisson.config.SubscriptionMode;
|
||||
import org.redisson.connection.balancer.RandomLoadBalancer;
|
||||
import org.redisson.reactive.CommandReactiveService;
|
||||
import org.springframework.data.redis.RedisSystemException;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.time.Duration;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
||||
import static org.redisson.connection.MasterSlaveConnectionManager.MAX_SLOT;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class RedissonReactiveClusterKeyCommandsTest {
|
||||
|
||||
@Parameterized.Parameters(name= "{index} - same slot = {0}; has ttl = {1}")
|
||||
public static Iterable<Object[]> data() {
|
||||
return Arrays.asList(new Object[][] {
|
||||
{false, false},
|
||||
{true, false},
|
||||
{false, true},
|
||||
{true, true}
|
||||
});
|
||||
}
|
||||
|
||||
@Parameterized.Parameter(0)
|
||||
public boolean sameSlot;
|
||||
|
||||
@Parameterized.Parameter(1)
|
||||
public boolean hasTtl;
|
||||
|
||||
static RedissonClient redisson;
|
||||
static RedissonReactiveRedisClusterConnection connection;
|
||||
static ClusterProcesses process;
|
||||
|
||||
ByteBuffer originalKey = ByteBuffer.wrap("key".getBytes());
|
||||
ByteBuffer newKey = ByteBuffer.wrap("unset".getBytes());
|
||||
ByteBuffer value = ByteBuffer.wrap("value".getBytes());
|
||||
|
||||
@BeforeClass
|
||||
public static void before() throws FailedToStartRedisException, IOException, InterruptedException {
|
||||
RedisRunner master1 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner master2 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner master3 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner slave1 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner slave2 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
RedisRunner slave3 = new RedisRunner().randomPort().randomDir().nosave();
|
||||
|
||||
|
||||
ClusterRunner clusterRunner = new ClusterRunner()
|
||||
.addNode(master1, slave1)
|
||||
.addNode(master2, slave2)
|
||||
.addNode(master3, slave3);
|
||||
process = clusterRunner.run();
|
||||
|
||||
Config config = new Config();
|
||||
config.useClusterServers()
|
||||
.setSubscriptionMode(SubscriptionMode.SLAVE)
|
||||
.setLoadBalancer(new RandomLoadBalancer())
|
||||
.addNodeAddress(process.getNodes().stream().findAny().get().getRedisServerAddressAndPort());
|
||||
|
||||
redisson = Redisson.create(config);
|
||||
connection = new RedissonReactiveRedisClusterConnection(((RedissonReactive)redisson.reactive()).getCommandExecutor());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void after() {
|
||||
process.shutdown();
|
||||
redisson.shutdown();
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanup() {
|
||||
connection.keyCommands().del(originalKey)
|
||||
.and(connection.keyCommands().del(newKey))
|
||||
.block();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRename() {
|
||||
connection.stringCommands().set(originalKey, value).block();
|
||||
|
||||
if (hasTtl) {
|
||||
connection.keyCommands().expire(originalKey, Duration.ofSeconds(1000)).block();
|
||||
}
|
||||
|
||||
Integer originalSlot = getSlotForKey(originalKey);
|
||||
newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot));
|
||||
|
||||
Boolean response = connection.keyCommands().rename(originalKey, newKey).block();
|
||||
|
||||
assertThat(response).isTrue();
|
||||
|
||||
final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block();
|
||||
assertThat(newKeyValue).isEqualTo(value);
|
||||
if (hasTtl) {
|
||||
assertThat(connection.keyCommands().ttl(newKey).block()).isGreaterThan(0);
|
||||
} else {
|
||||
assertThat(connection.keyCommands().ttl(newKey).block()).isEqualTo(-1);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRename_keyNotExist() {
|
||||
Integer originalSlot = getSlotForKey(originalKey);
|
||||
newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot));
|
||||
|
||||
if (sameSlot) {
|
||||
// This is a quirk of the implementation - since same-slot renames use the non-cluster version,
|
||||
// the result is a Redis error. This behavior matches other spring-data-redis implementations
|
||||
assertThatThrownBy(() -> connection.keyCommands().rename(originalKey, newKey).block())
|
||||
.isInstanceOf(RedisSystemException.class);
|
||||
|
||||
} else {
|
||||
Boolean response = connection.keyCommands().rename(originalKey, newKey).block();
|
||||
|
||||
assertThat(response).isTrue();
|
||||
|
||||
final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block();
|
||||
assertThat(newKeyValue).isEqualTo(null);
|
||||
}
|
||||
}
|
||||
|
||||
protected ByteBuffer getNewKeyForSlot(String originalKey, Integer targetSlot) {
|
||||
int counter = 0;
|
||||
|
||||
ByteBuffer newKey = ByteBuffer.wrap((originalKey + counter).getBytes());
|
||||
|
||||
Integer newKeySlot = getSlotForKey(newKey);
|
||||
|
||||
while(!newKeySlot.equals(targetSlot)) {
|
||||
counter++;
|
||||
newKey = ByteBuffer.wrap((originalKey + counter).getBytes());
|
||||
newKeySlot = getSlotForKey(newKey);
|
||||
}
|
||||
|
||||
return newKey;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameNX() {
|
||||
connection.stringCommands().set(originalKey, value).block();
|
||||
if (hasTtl) {
|
||||
connection.keyCommands().expire(originalKey, Duration.ofSeconds(1000)).block();
|
||||
}
|
||||
|
||||
Integer originalSlot = getSlotForKey(originalKey);
|
||||
newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot));
|
||||
|
||||
Boolean result = connection.keyCommands().renameNX(originalKey, newKey).block();
|
||||
|
||||
assertThat(result).isTrue();
|
||||
assertThat(connection.stringCommands().get(newKey).block()).isEqualTo(value);
|
||||
if (hasTtl) {
|
||||
assertThat(connection.keyCommands().ttl(newKey).block()).isGreaterThan(0);
|
||||
} else {
|
||||
assertThat(connection.keyCommands().ttl(newKey).block()).isEqualTo(-1);
|
||||
}
|
||||
|
||||
connection.stringCommands().set(originalKey, value).block();
|
||||
|
||||
result = connection.keyCommands().renameNX(originalKey, newKey).block();
|
||||
|
||||
assertThat(result).isFalse();
|
||||
}
|
||||
|
||||
private Integer getTargetSlot(Integer originalSlot) {
|
||||
return sameSlot ? originalSlot : MAX_SLOT - originalSlot - 1;
|
||||
}
|
||||
|
||||
private Integer getSlotForKey(ByteBuffer key) {
|
||||
return (Integer) connection.read(null, StringCodec.INSTANCE, RedisCommands.KEYSLOT, key.array()).block();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,33 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection;
|
||||
import org.springframework.data.redis.connection.ReturnType;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class RedissonScriptReactiveTest extends BaseConnectionTest {
|
||||
|
||||
@Test
|
||||
public void testEval() {
|
||||
RedissonConnectionFactory factory = new RedissonConnectionFactory(redisson);
|
||||
ReactiveRedisConnection cc = factory.getReactiveConnection();
|
||||
|
||||
String s = "local ret = {}" +
|
||||
"local mysqlKeys = {}" +
|
||||
"table.insert(ret, 'test1')" +
|
||||
"table.insert(ret, 'test2')" +
|
||||
"table.insert(ret, 'test3')" +
|
||||
"table.insert(ret, mysqlKeys)" +
|
||||
"return ret";
|
||||
Flux<List<Object>> ss = cc.scriptingCommands().eval(ByteBuffer.wrap(s.getBytes()), ReturnType.MULTI, 0);
|
||||
List<Object> r = ss.blockFirst();
|
||||
assertThat(r.get(2)).isEqualTo(ByteBuffer.wrap("test3".getBytes()));
|
||||
assertThat((List) r.get(3)).isEmpty();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,132 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.redisson.RedisRunner;
|
||||
import org.redisson.RedisRunner.FailedToStartRedisException;
|
||||
import org.redisson.Redisson;
|
||||
import org.redisson.api.RedissonClient;
|
||||
import org.redisson.config.Config;
|
||||
import org.redisson.connection.balancer.RandomLoadBalancer;
|
||||
import org.springframework.data.redis.connection.RedisSentinelConnection;
|
||||
import org.springframework.data.redis.connection.RedisServer;
|
||||
|
||||
public class RedissonSentinelConnectionTest {
|
||||
|
||||
RedissonClient redisson;
|
||||
RedisSentinelConnection connection;
|
||||
RedisRunner.RedisProcess master;
|
||||
RedisRunner.RedisProcess slave1;
|
||||
RedisRunner.RedisProcess slave2;
|
||||
RedisRunner.RedisProcess sentinel1;
|
||||
RedisRunner.RedisProcess sentinel2;
|
||||
RedisRunner.RedisProcess sentinel3;
|
||||
|
||||
@Before
|
||||
public void before() throws FailedToStartRedisException, IOException, InterruptedException {
|
||||
master = new RedisRunner()
|
||||
.nosave()
|
||||
.randomDir()
|
||||
.run();
|
||||
slave1 = new RedisRunner()
|
||||
.port(6380)
|
||||
.nosave()
|
||||
.randomDir()
|
||||
.slaveof("127.0.0.1", 6379)
|
||||
.run();
|
||||
slave2 = new RedisRunner()
|
||||
.port(6381)
|
||||
.nosave()
|
||||
.randomDir()
|
||||
.slaveof("127.0.0.1", 6379)
|
||||
.run();
|
||||
sentinel1 = new RedisRunner()
|
||||
.nosave()
|
||||
.randomDir()
|
||||
.port(26379)
|
||||
.sentinel()
|
||||
.sentinelMonitor("myMaster", "127.0.0.1", 6379, 2)
|
||||
.run();
|
||||
sentinel2 = new RedisRunner()
|
||||
.nosave()
|
||||
.randomDir()
|
||||
.port(26380)
|
||||
.sentinel()
|
||||
.sentinelMonitor("myMaster", "127.0.0.1", 6379, 2)
|
||||
.run();
|
||||
sentinel3 = new RedisRunner()
|
||||
.nosave()
|
||||
.randomDir()
|
||||
.port(26381)
|
||||
.sentinel()
|
||||
.sentinelMonitor("myMaster", "127.0.0.1", 6379, 2)
|
||||
.run();
|
||||
|
||||
Thread.sleep(5000);
|
||||
|
||||
Config config = new Config();
|
||||
config.useSentinelServers()
|
||||
.setLoadBalancer(new RandomLoadBalancer())
|
||||
.addSentinelAddress(sentinel3.getRedisServerAddressAndPort()).setMasterName("myMaster");
|
||||
redisson = Redisson.create(config);
|
||||
|
||||
RedissonConnectionFactory factory = new RedissonConnectionFactory(redisson);
|
||||
connection = factory.getSentinelConnection();
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() {
|
||||
sentinel1.stop();
|
||||
sentinel2.stop();
|
||||
sentinel3.stop();
|
||||
master.stop();
|
||||
slave1.stop();
|
||||
slave2.stop();
|
||||
|
||||
redisson.shutdown();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMasters() {
|
||||
Collection<RedisServer> masters = connection.masters();
|
||||
assertThat(masters).hasSize(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSlaves() {
|
||||
Collection<RedisServer> masters = connection.masters();
|
||||
Collection<RedisServer> slaves = connection.replicas(masters.iterator().next());
|
||||
assertThat(slaves).hasSize(2);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemove() {
|
||||
Collection<RedisServer> masters = connection.masters();
|
||||
connection.remove(masters.iterator().next());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMonitor() {
|
||||
Collection<RedisServer> masters = connection.masters();
|
||||
RedisServer master = masters.iterator().next();
|
||||
master.setName(master.getName() + ":");
|
||||
connection.monitor(master);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFailover() throws InterruptedException {
|
||||
Collection<RedisServer> masters = connection.masters();
|
||||
connection.failover(masters.iterator().next());
|
||||
|
||||
Thread.sleep(10000);
|
||||
|
||||
RedisServer newMaster = connection.masters().iterator().next();
|
||||
assertThat(masters.iterator().next().getPort()).isNotEqualTo(newMaster.getPort());
|
||||
}
|
||||
}
|
@ -0,0 +1,76 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.springframework.data.redis.connection.stream.*;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Nikita Koksharov
|
||||
*/
|
||||
public class RedissonStreamTest extends BaseConnectionTest {
|
||||
|
||||
@Test
|
||||
public void testPending() {
|
||||
connection.streamCommands().xGroupCreate("test".getBytes(), "testGroup", ReadOffset.latest(), true);
|
||||
|
||||
PendingMessages p = connection.streamCommands().xPending("test".getBytes(), Consumer.from("testGroup", "test1"));
|
||||
assertThat(p.size()).isEqualTo(0);
|
||||
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("1".getBytes(), "1".getBytes()));
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("2".getBytes(), "2".getBytes()));
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("3".getBytes(), "3".getBytes()));
|
||||
|
||||
List<ByteRecord> l = connection.streamCommands().xReadGroup(Consumer.from("testGroup", "test1"), StreamOffset.create("test".getBytes(), ReadOffset.from(">")));
|
||||
assertThat(l.size()).isEqualTo(3);
|
||||
|
||||
PendingMessages p2 = connection.streamCommands().xPending("test".getBytes(), Consumer.from("testGroup", "test1"));
|
||||
assertThat(p2.size()).isEqualTo(3);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGroups() {
|
||||
connection.streamCommands().xGroupCreate("test".getBytes(), "testGroup", ReadOffset.latest(), true);
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("1".getBytes(), "1".getBytes()));
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("2".getBytes(), "2".getBytes()));
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("3".getBytes(), "3".getBytes()));
|
||||
|
||||
StreamInfo.XInfoGroups groups = connection.streamCommands().xInfoGroups("test".getBytes());
|
||||
assertThat(groups.size()).isEqualTo(1);
|
||||
assertThat(groups.get(0).groupName()).isEqualTo("testGroup");
|
||||
assertThat(groups.get(0).pendingCount()).isEqualTo(0);
|
||||
assertThat(groups.get(0).consumerCount()).isEqualTo(0);
|
||||
assertThat(groups.get(0).lastDeliveredId()).isEqualTo("0-0");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConsumers() {
|
||||
connection.streamCommands().xGroupCreate("test".getBytes(), "testGroup", ReadOffset.latest(), true);
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("1".getBytes(), "1".getBytes()));
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("2".getBytes(), "2".getBytes()));
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("3".getBytes(), "3".getBytes()));
|
||||
|
||||
connection.streamCommands().xGroupCreate("test".getBytes(), "testGroup2", ReadOffset.latest(), true);
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("1".getBytes(), "1".getBytes()));
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("2".getBytes(), "2".getBytes()));
|
||||
connection.streamCommands().xAdd("test".getBytes(), Collections.singletonMap("3".getBytes(), "3".getBytes()));
|
||||
|
||||
List<ByteRecord> list = connection.streamCommands().xReadGroup(Consumer.from("testGroup", "consumer1"),
|
||||
StreamOffset.create("test".getBytes(), ReadOffset.lastConsumed()));
|
||||
assertThat(list.size()).isEqualTo(6);
|
||||
|
||||
StreamInfo.XInfoStream info = connection.streamCommands().xInfo("test".getBytes());
|
||||
assertThat(info.streamLength()).isEqualTo(6);
|
||||
|
||||
StreamInfo.XInfoConsumers s1 = connection.streamCommands().xInfoConsumers("test".getBytes(), "testGroup");
|
||||
assertThat(s1.getConsumerCount()).isEqualTo(1);
|
||||
assertThat(s1.get(0).consumerName()).isEqualTo("consumer1");
|
||||
assertThat(s1.get(0).pendingCount()).isEqualTo(6);
|
||||
assertThat(s1.get(0).idleTimeMs()).isLessThan(100L);
|
||||
assertThat(s1.get(0).groupName()).isEqualTo("testGroup");
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,113 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.awaitility.Awaitility;
|
||||
import org.awaitility.Duration;
|
||||
import org.junit.Test;
|
||||
import org.springframework.data.redis.connection.ReactiveRedisConnection;
|
||||
import org.springframework.data.redis.connection.ReactiveSubscription;
|
||||
|
||||
import org.springframework.data.redis.core.ReactiveStringRedisTemplate;
|
||||
import org.springframework.data.redis.listener.ChannelTopic;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class RedissonSubscribeReactiveTest extends BaseConnectionTest {
|
||||
|
||||
@Test
|
||||
public void testPubSub() {
|
||||
RedissonConnectionFactory factory = new RedissonConnectionFactory(redisson);
|
||||
AtomicLong counter = new AtomicLong();
|
||||
|
||||
ReactiveStringRedisTemplate template = new ReactiveStringRedisTemplate(factory);
|
||||
template.listenTo(ChannelTopic.of("test")).flatMap(message -> {
|
||||
counter.incrementAndGet();
|
||||
return Mono.empty();
|
||||
}).subscribe();
|
||||
|
||||
for (int i = 0; i < 40; i++) {
|
||||
ReactiveRedisConnection connection = factory.getReactiveConnection();
|
||||
connection.pubSubCommands().publish(ByteBuffer.wrap("test".getBytes()), ByteBuffer.wrap("msg".getBytes())).block();
|
||||
}
|
||||
|
||||
Awaitility.await().atMost(Duration.ONE_SECOND).untilAsserted(() -> {
|
||||
assertThat(counter.get()).isEqualTo(40);
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTemplate() {
|
||||
RedissonConnectionFactory factory = new RedissonConnectionFactory(redisson);
|
||||
AtomicLong counter = new AtomicLong();
|
||||
|
||||
ReactiveStringRedisTemplate template = new ReactiveStringRedisTemplate(factory);
|
||||
template.listenTo(ChannelTopic.of("test")).flatMap(message -> {
|
||||
counter.incrementAndGet();
|
||||
return Mono.empty();
|
||||
}).subscribe();
|
||||
|
||||
template.listenTo(ChannelTopic.of("test2")).flatMap(message -> {
|
||||
counter.incrementAndGet();
|
||||
return Mono.empty();
|
||||
}).subscribe();
|
||||
|
||||
ReactiveRedisConnection connection = factory.getReactiveConnection();
|
||||
connection.pubSubCommands().publish(ByteBuffer.wrap("test".getBytes()), ByteBuffer.wrap("msg".getBytes())).block();
|
||||
|
||||
Awaitility.await().atMost(Duration.ONE_SECOND)
|
||||
.until(() -> counter.get() == 1);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSubscribe() {
|
||||
RedissonConnectionFactory factory = new RedissonConnectionFactory(redisson);
|
||||
ReactiveRedisConnection connection = factory.getReactiveConnection();
|
||||
Mono<ReactiveSubscription> s = connection.pubSubCommands().createSubscription();
|
||||
AtomicReference<byte[]> msg = new AtomicReference<byte[]>();
|
||||
ReactiveSubscription ss = s.block();
|
||||
|
||||
ss.subscribe(ByteBuffer.wrap("test".getBytes())).block();
|
||||
ss.receive().doOnEach(message -> {
|
||||
msg.set(message.get().getMessage().array());
|
||||
}).subscribe();
|
||||
|
||||
connection.pubSubCommands().publish(ByteBuffer.wrap("test".getBytes()), ByteBuffer.wrap("msg".getBytes())).block();
|
||||
|
||||
Awaitility.await().atMost(Duration.ONE_SECOND)
|
||||
.until(() -> Arrays.equals("msg".getBytes(), msg.get()));
|
||||
|
||||
ss.unsubscribe();
|
||||
|
||||
connection.pubSubCommands().publish(ByteBuffer.wrap("test".getBytes()), ByteBuffer.wrap("msg".getBytes())).block();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnSubscribe() {
|
||||
RedissonConnectionFactory factory = new RedissonConnectionFactory(redisson);
|
||||
ReactiveRedisConnection connection = factory.getReactiveConnection();
|
||||
Mono<ReactiveSubscription> s = connection.pubSubCommands().createSubscription();
|
||||
AtomicReference<byte[]> msg = new AtomicReference<byte[]>();
|
||||
ReactiveSubscription ss = s.block();
|
||||
|
||||
ss.subscribe(ByteBuffer.wrap("test".getBytes())).block();
|
||||
ss.receive().doOnEach(message -> {
|
||||
msg.set(message.get().getMessage().array());
|
||||
}).subscribe();
|
||||
|
||||
connection.pubSubCommands().publish(ByteBuffer.wrap("test".getBytes()), ByteBuffer.wrap("msg".getBytes())).block();
|
||||
Awaitility.await().atMost(Duration.ONE_SECOND)
|
||||
.until(() -> Arrays.equals("msg".getBytes(), msg.get()));
|
||||
|
||||
ss.unsubscribe();
|
||||
|
||||
connection.pubSubCommands().publish(ByteBuffer.wrap("test".getBytes()), ByteBuffer.wrap("msg".getBytes())).block();
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,144 @@
|
||||
package org.redisson.spring.data.connection;
|
||||
|
||||
import org.awaitility.Awaitility;
|
||||
import org.awaitility.Duration;
|
||||
import org.junit.Test;
|
||||
import org.redisson.RedisRunner;
|
||||
import org.redisson.Redisson;
|
||||
import org.redisson.api.RedissonClient;
|
||||
import org.redisson.config.Config;
|
||||
import org.springframework.data.redis.connection.Message;
|
||||
import org.springframework.data.redis.connection.MessageListener;
|
||||
import org.springframework.data.redis.connection.RedisConnection;
|
||||
import org.springframework.data.redis.listener.ChannelTopic;
|
||||
import org.springframework.data.redis.listener.PatternTopic;
|
||||
import org.springframework.data.redis.listener.RedisMessageListenerContainer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class RedissonSubscribeTest extends BaseConnectionTest {
|
||||
|
||||
@Test
|
||||
public void testListenersDuplication() {
|
||||
Queue<byte[]> msg = new ConcurrentLinkedQueue<>();
|
||||
MessageListener aListener = (message, pattern) -> {
|
||||
msg.add(message.getBody());
|
||||
};
|
||||
|
||||
RedissonConnectionFactory factory = new RedissonConnectionFactory(redisson);
|
||||
RedisMessageListenerContainer container = new RedisMessageListenerContainer();
|
||||
container.setConnectionFactory(factory);
|
||||
container.addMessageListener(aListener,
|
||||
Arrays.asList(new ChannelTopic("a"), new ChannelTopic("b")));
|
||||
container.addMessageListener(aListener,
|
||||
Arrays.asList(new PatternTopic("c*")));
|
||||
container.afterPropertiesSet();
|
||||
container.start();
|
||||
|
||||
RedisConnection c = factory.getConnection();
|
||||
c.publish("a".getBytes(), "msg".getBytes());
|
||||
|
||||
Awaitility.await().atMost(Duration.ONE_SECOND)
|
||||
.untilAsserted(() -> {
|
||||
assertThat(msg).containsExactly("msg".getBytes());
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPatterTopic() throws IOException, InterruptedException {
|
||||
RedisRunner.RedisProcess instance = new RedisRunner()
|
||||
.nosave()
|
||||
.randomPort()
|
||||
.randomDir()
|
||||
.notifyKeyspaceEvents(
|
||||
RedisRunner.KEYSPACE_EVENTS_OPTIONS.K,
|
||||
RedisRunner.KEYSPACE_EVENTS_OPTIONS.g,
|
||||
RedisRunner.KEYSPACE_EVENTS_OPTIONS.E,
|
||||
RedisRunner.KEYSPACE_EVENTS_OPTIONS.$)
|
||||
.run();
|
||||
|
||||
Config config = new Config();
|
||||
config.useSingleServer().setAddress(instance.getRedisServerAddressAndPort()).setPingConnectionInterval(0);
|
||||
RedissonClient redisson = Redisson.create(config);
|
||||
|
||||
RedissonConnectionFactory factory = new RedissonConnectionFactory(redisson);
|
||||
|
||||
RedisMessageListenerContainer container = new RedisMessageListenerContainer();
|
||||
container.setConnectionFactory(factory);
|
||||
AtomicInteger counterTest = new AtomicInteger();
|
||||
container.addMessageListener(new MessageListener() {
|
||||
@Override
|
||||
public void onMessage(Message message, byte[] pattern) {
|
||||
counterTest.incrementAndGet();
|
||||
}
|
||||
}, new PatternTopic("__keyspace@0__:mykey"));
|
||||
container.addMessageListener(new MessageListener() {
|
||||
@Override
|
||||
public void onMessage(Message message, byte[] pattern) {
|
||||
counterTest.incrementAndGet();
|
||||
}
|
||||
}, new PatternTopic("__keyevent@0__:del"));
|
||||
container.afterPropertiesSet();
|
||||
container.start();
|
||||
assertThat(container.isRunning()).isTrue();
|
||||
|
||||
RedisConnection c = factory.getConnection();
|
||||
c.set("mykey".getBytes(), "2".getBytes());
|
||||
c.del("mykey".getBytes());
|
||||
|
||||
Awaitility.await().atMost(Duration.FIVE_SECONDS).until(() -> {
|
||||
return counterTest.get() == 3;
|
||||
});
|
||||
|
||||
container.stop();
|
||||
redisson.shutdown();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSubscribe() {
|
||||
RedissonConnection connection = new RedissonConnection(redisson);
|
||||
AtomicReference<byte[]> msg = new AtomicReference<byte[]>();
|
||||
connection.subscribe(new MessageListener() {
|
||||
@Override
|
||||
public void onMessage(Message message, byte[] pattern) {
|
||||
msg.set(message.getBody());
|
||||
}
|
||||
}, "test".getBytes());
|
||||
|
||||
connection.publish("test".getBytes(), "msg".getBytes());
|
||||
Awaitility.await().atMost(Duration.ONE_SECOND)
|
||||
.until(() -> Arrays.equals("msg".getBytes(), msg.get()));
|
||||
|
||||
connection.getSubscription().unsubscribe();
|
||||
|
||||
connection.publish("test".getBytes(), "msg".getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnSubscribe() {
|
||||
RedissonConnection connection = new RedissonConnection(redisson);
|
||||
AtomicReference<byte[]> msg = new AtomicReference<byte[]>();
|
||||
connection.subscribe(new MessageListener() {
|
||||
@Override
|
||||
public void onMessage(Message message, byte[] pattern) {
|
||||
msg.set(message.getBody());
|
||||
}
|
||||
}, "test".getBytes());
|
||||
|
||||
connection.publish("test".getBytes(), "msg".getBytes());
|
||||
Awaitility.await().atMost(Duration.ONE_SECOND)
|
||||
.until(() -> Arrays.equals("msg".getBytes(), msg.get()));
|
||||
|
||||
connection.getSubscription().unsubscribe();
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,36 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Copyright 2012 Nikita Koksharov
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
-->
|
||||
<configuration>
|
||||
|
||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%d{yyyy.MM.dd HH:mm:ss.SSS} %-5level %c{0} : %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<logger name="org.redisson" additivity="true">
|
||||
<level value="trace"/>
|
||||
</logger>
|
||||
|
||||
<root>
|
||||
<level value="info"/>
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
</root>
|
||||
|
||||
</configuration>
|
Loading…
Reference in New Issue