mirror of
https://github.com/keycloak/keycloak.git
synced 2026-02-03 20:39:33 -05:00
ISPN16: Upgrade to Infinispan 16.0.5
Closes #45341 - Remove query modules - Remove unused config file - Update config file versions - Update jgroups attributes - Remove ISPN-16595 workaround - Call HotRodServer#postStart in HotRodServerRule to start caches as well as the server - Simplify cluster-ha.xml - Utilise org.infinispan.commons.util.TimeQuantity in CacheConfiguration - Cleanup when InfinispanContainer startup fails - RemoteUserSessionProvider remote query calls must not use negative values for offsets and maxResults - Remove use of deprecated org.infinispan.server.test.core.InfinispanContainer class - Use testcontainers-infinispan dependency - Explicitly utilise "legacy" metrics - Remove explicit `name-as-tags` configuration as Infinispan 16 defaults to true - Remove test configuration not required since #31807 Signed-off-by: Ryan Emerson <remerson@ibm.com> Signed-off-by: Alexander Schwartz <alexander.schwartz@ibm.com> Co-authored-by: Alexander Schwartz <alexander.schwartz@ibm.com>
This commit is contained in:
parent
0cff95581e
commit
c8635f9bf2
33 changed files with 127 additions and 668 deletions
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
{jdgserver_name} exposes metrics in the endpoint `/metrics`.
|
||||
By default, they are enabled.
|
||||
We recommend enabling the attribute `name-as-tags` as it makes the metrics name independent on the cache name.
|
||||
|
||||
To configure metrics in the {jdgserver_name} server, just enabled as shown in the XML below.
|
||||
|
||||
|
|
@ -11,7 +10,7 @@ To configure metrics in the {jdgserver_name} server, just enabled as shown in th
|
|||
----
|
||||
<infinispan>
|
||||
<cache-container statistics="true">
|
||||
<metrics gauges="true" histograms="false" name-as-tags="true" />
|
||||
<metrics gauges="true" histograms="false" />
|
||||
</cache-container>
|
||||
</infinispan>
|
||||
----
|
||||
|
|
|
|||
|
|
@ -74,16 +74,6 @@
|
|||
<groupId>org.infinispan</groupId>
|
||||
<artifactId>infinispan-cachestore-remote</artifactId>
|
||||
</dependency>
|
||||
<!-- required for query/search in the external Infinispan server -->
|
||||
<dependency>
|
||||
<groupId>org.infinispan</groupId>
|
||||
<artifactId>infinispan-remote-query-client</artifactId>
|
||||
</dependency>
|
||||
<!-- to be removed after https://issues.redhat.com/browse/ISPN-16220 -->
|
||||
<dependency>
|
||||
<groupId>org.infinispan</groupId>
|
||||
<artifactId>infinispan-query-dsl</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.infinispan</groupId>
|
||||
<artifactId>infinispan-component-annotations</artifactId>
|
||||
|
|
|
|||
|
|
@ -28,9 +28,7 @@ import org.infinispan.distribution.DistributionManager;
|
|||
import org.infinispan.factories.GlobalComponentRegistry;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
import org.infinispan.remoting.transport.Address;
|
||||
import org.infinispan.remoting.transport.LocalModeAddress;
|
||||
import org.infinispan.remoting.transport.Transport;
|
||||
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
|
||||
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.jgroups.stack.IpAddress;
|
||||
|
|
@ -116,12 +114,9 @@ public class TopologyInfo {
|
|||
public boolean amIOwner(Cache<?, ?> cache, Object key) {
|
||||
Address myAddress = cache.getCacheManager().getAddress();
|
||||
Address objectOwnerAddress = getOwnerAddress(cache, key);
|
||||
|
||||
// NOTE: For scattered caches, this will always return true, which may not be correct. Need to review this if we add support for scattered caches
|
||||
return Objects.equals(myAddress, objectOwnerAddress);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get route to be used as the identifier for sticky session. Return null if I am not able to find the appropriate route (or in case of local mode)
|
||||
* @deprecated Use {@link org.keycloak.sessions.StickySessionEncoderProvider#sessionIdRoute(String)} instead.
|
||||
|
|
@ -140,11 +135,11 @@ public class TopologyInfo {
|
|||
Address address = getOwnerAddress(cache, key);
|
||||
|
||||
// Local mode
|
||||
if (address == null || (address == LocalModeAddress.INSTANCE)) {
|
||||
if (address == null || (address == Address.LOCAL)) {
|
||||
return myNodeName;
|
||||
}
|
||||
|
||||
org.jgroups.Address jgroupsAddress = toJGroupsAddress(address);
|
||||
org.jgroups.Address jgroupsAddress = Address.toExtendedUUID(address);
|
||||
String name = NameCache.get(jgroupsAddress);
|
||||
|
||||
// If no logical name exists, create one using physical address
|
||||
|
|
@ -169,16 +164,4 @@ public class TopologyInfo {
|
|||
DistributionManager dist = cache.getAdvancedCache().getDistributionManager();
|
||||
return dist == null ? cache.getCacheManager().getAddress() : dist.getCacheTopology().getDistribution(key).primary();
|
||||
}
|
||||
|
||||
|
||||
// See org.wildfly.clustering.server.group.CacheGroup
|
||||
private static org.jgroups.Address toJGroupsAddress(Address address) {
|
||||
if ((address == null) || (address == LocalModeAddress.INSTANCE)) return null;
|
||||
if (address instanceof JGroupsAddress jgroupsAddress) {
|
||||
return jgroupsAddress.getJGroupsAddress();
|
||||
}
|
||||
throw new IllegalArgumentException(address.toString());
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,222 +18,18 @@
|
|||
package org.keycloak.jgroups.protocol;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.keycloak.connections.jpa.JpaConnectionProviderFactory;
|
||||
|
||||
import org.jgroups.Address;
|
||||
import org.jgroups.Event;
|
||||
import org.jgroups.PhysicalAddress;
|
||||
import org.jgroups.View;
|
||||
import org.jgroups.protocols.JDBC_PING2;
|
||||
import org.jgroups.protocols.PingData;
|
||||
import org.jgroups.stack.Protocol;
|
||||
import org.jgroups.util.ExtendedUUID;
|
||||
import org.jgroups.util.NameCache;
|
||||
import org.jgroups.util.Responses;
|
||||
import org.jgroups.util.UUID;
|
||||
|
||||
/**
|
||||
* Enhanced JDBC_PING2 to handle entries transactionally.
|
||||
* <p>
|
||||
* Workaround for issue <a href="https://issues.redhat.com/browse/JGRP-2870">JGRP-2870</a>
|
||||
*/
|
||||
public class KEYCLOAK_JDBC_PING2 extends JDBC_PING2 {
|
||||
|
||||
private JpaConnectionProviderFactory factory;
|
||||
|
||||
@Override
|
||||
protected void handleView(View new_view, View old_view, boolean coord_changed) {
|
||||
// If we are the coordinator, it is good to learn about new entries that have been added before we delete them.
|
||||
// If we are not the coordinator, it is good to learn the new entries added by the coordinator.
|
||||
// This avoids a "JGRP000032: %s: no physical address for %s, dropping message" that leads to split clusters at concurrent startup.
|
||||
learnExistingAddresses();
|
||||
|
||||
// This is an updated logic where we do not call removeAll but instead remove those obsolete entries.
|
||||
// This avoids the short moment where the table is empty and a new node might not see any other node.
|
||||
if (is_coord) {
|
||||
if (remove_old_coords_on_view_change) {
|
||||
Address old_coord = old_view != null ? old_view.getCreator() : null;
|
||||
if (old_coord != null)
|
||||
remove(cluster_name, old_coord);
|
||||
}
|
||||
Address[] left = View.diff(old_view, new_view)[1];
|
||||
if (coord_changed || update_store_on_view_change || left.length > 0) {
|
||||
writeAll(left);
|
||||
if (remove_all_data_on_view_change) {
|
||||
removeAllNotInCurrentView();
|
||||
}
|
||||
if (remove_all_data_on_view_change || remove_old_coords_on_view_change) {
|
||||
startInfoWriter();
|
||||
}
|
||||
}
|
||||
} else if (coord_changed && !remove_all_data_on_view_change) {
|
||||
// I'm no longer the coordinator, usually due to a merge.
|
||||
// The new coordinator will update my status to non-coordinator, and remove me fully
|
||||
// if 'remove_all_data_on_view_change' is enabled and I'm no longer part of the view.
|
||||
// Maybe this branch even be removed completely, but for JDBC_PING 'remove_all_data_on_view_change' is always set to true.
|
||||
PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr));
|
||||
PingData coord_data = new PingData(local_addr, true, NameCache.get(local_addr), physical_addr).coord(is_coord);
|
||||
write(Collections.singletonList(coord_data), cluster_name);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void removeAll(String clustername) {
|
||||
// This is unsafe as even if we would fill the table a moment later, a new node might see an empty table and become a coordinator
|
||||
throw new RuntimeException("Not implemented as it is unsafe");
|
||||
}
|
||||
|
||||
private void removeAllNotInCurrentView() {
|
||||
try {
|
||||
List<PingData> list = readFromDB(getClusterName());
|
||||
for (PingData data : list) {
|
||||
Address addr = data.getAddress();
|
||||
if (view != null && !view.containsMember(addr)) {
|
||||
addDiscoveryResponseToCaches(addr, data.getLogicalName(), data.getPhysicalAddr());
|
||||
remove(cluster_name, addr);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error(String.format("%s: failed reading from the DB", local_addr), e);
|
||||
}
|
||||
}
|
||||
|
||||
protected void learnExistingAddresses() {
|
||||
try {
|
||||
List<PingData> list = readFromDB(getClusterName());
|
||||
for (PingData data : list) {
|
||||
Address addr = data.getAddress();
|
||||
if (local_addr != null && !local_addr.equals(addr)) {
|
||||
addDiscoveryResponseToCaches(addr, data.getLogicalName(), data.getPhysicalAddr());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error(String.format("%s: failed reading from the DB", local_addr), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isInfoWriterRunning() {
|
||||
// Do not rely on the InfoWriter, instead always write the missing information on find if it is missing. Find is also triggered by MERGE.
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void findMembers(List<Address> members, boolean initial_discovery, Responses responses) {
|
||||
if (initial_discovery) {
|
||||
try {
|
||||
List<PingData> pingData = readFromDB(cluster_name);
|
||||
PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr));
|
||||
// Sending the discovery here, as parent class will not execute it once there is data in the table
|
||||
sendDiscoveryResponse(local_addr, physical_addr, NameCache.get(local_addr), null, is_coord);
|
||||
PingData coord_data = new PingData(local_addr, true, NameCache.get(local_addr), physical_addr).coord(is_coord);
|
||||
write(Collections.singletonList(coord_data), cluster_name);
|
||||
while (pingData.stream().noneMatch(PingData::isCoord)) {
|
||||
// Do a quick check if more nodes have arrived, to have a more complete list of nodes to start with.
|
||||
List<PingData> newPingData = readFromDB(cluster_name);
|
||||
if (newPingData.stream().map(PingData::getAddress).collect(Collectors.toSet()).equals(pingData.stream().map(PingData::getAddress).collect(Collectors.toSet()))
|
||||
|| pingData.stream().anyMatch(PingData::isCoord)) {
|
||||
break;
|
||||
}
|
||||
pingData = newPingData;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error(String.format("%s: failed reading from the DB", local_addr), e);
|
||||
}
|
||||
}
|
||||
|
||||
super.findMembers(members, initial_discovery, responses);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeToDB(PingData data, String clustername) throws SQLException {
|
||||
lock.lock();
|
||||
try (Connection connection = getConnection()) {
|
||||
if(call_insert_sp != null && insert_sp != null)
|
||||
callInsertStoredProcedure(connection, data, clustername);
|
||||
else {
|
||||
boolean isAutocommit = connection.getAutoCommit();
|
||||
try {
|
||||
if (isAutocommit) {
|
||||
// Always use a transaction for the delete+insert to make it atomic
|
||||
// to avoid the short moment where there is no entry in the table.
|
||||
connection.setAutoCommit(false);
|
||||
} else {
|
||||
log.warn("Autocommit is disabled. This indicates a transaction context that might batch statements and can lead to deadlocks.");
|
||||
}
|
||||
delete(connection, clustername, data.getAddress());
|
||||
insert(connection, data, clustername);
|
||||
if (isAutocommit) {
|
||||
connection.commit();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
if (isAutocommit) {
|
||||
connection.rollback();
|
||||
}
|
||||
throw e;
|
||||
} finally {
|
||||
if (isAutocommit) {
|
||||
connection.setAutoCommit(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* START: JDBC_PING2 does not handle ExtendedUUID yet, see
|
||||
https://github.com/belaban/JGroups/pull/901 - until this is backported, we convert all of them.
|
||||
*/
|
||||
|
||||
@Override
|
||||
public <T extends Protocol> T addr(Address addr) {
|
||||
addr = toUUID(addr);
|
||||
return super.addr(addr);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Protocol> T setAddress(Address addr) {
|
||||
addr = toUUID(addr);
|
||||
return super.setAddress(addr);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void delete(Connection conn, String clustername, Address addressToDelete) throws SQLException {
|
||||
super.delete(conn, clustername, toUUID(addressToDelete));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void delete(String clustername, Address addressToDelete) throws SQLException {
|
||||
super.delete(clustername, toUUID(addressToDelete));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void insert(Connection connection, PingData data, String clustername) throws SQLException {
|
||||
if (data.getAddress() instanceof ExtendedUUID) {
|
||||
data = new PingData(toUUID(data.getAddress()), data.isServer(), data.getLogicalName(), data.getPhysicalAddr()).coord(data.isCoord());
|
||||
}
|
||||
super.insert(connection, data, clustername);
|
||||
}
|
||||
|
||||
private static Address toUUID(Address addr) {
|
||||
if (addr instanceof ExtendedUUID eUUID) {
|
||||
addr = new UUID(eUUID.getMostSignificantBits(), eUUID.getLeastSignificantBits());
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
/* END: JDBC_PING2 does not handle ExtendedUUID yet, see
|
||||
https://github.com/belaban/JGroups/pull/901 - until this is backported, we convert all of them.
|
||||
*/
|
||||
|
||||
@Override
|
||||
protected void loadDriver() {
|
||||
//no-op, using JpaConnectionProviderFactory
|
||||
|
|
|
|||
|
|
@ -118,6 +118,7 @@ import org.infinispan.protostream.types.java.CommonTypes;
|
|||
schemaPackageName = Marshalling.PROTO_SCHEMA_PACKAGE,
|
||||
schemaFilePath = "proto/generated",
|
||||
allowNullFields = true,
|
||||
orderedMarshallers = true,
|
||||
|
||||
// common-types for UUID
|
||||
dependsOn = CommonTypes.class,
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import org.keycloak.sessions.StickySessionEncoderProvider;
|
|||
import org.keycloak.sessions.StickySessionEncoderProviderFactory;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
|
||||
import org.infinispan.remoting.transport.Address;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.jgroups.util.NameCache;
|
||||
|
||||
|
|
@ -153,6 +153,6 @@ public class InfinispanStickySessionEncoderProviderFactory implements StickySess
|
|||
// Return null if the logical name is not available yet.
|
||||
// The following request may be redirected to the wrong instance, but that's ok.
|
||||
// In a healthy/stable cluster, the name cache is correctly populated.
|
||||
return primaryOwner instanceof JGroupsAddress jgrpAddr ? NameCache.get(jgrpAddr.getJGroupsAddress()) : null;
|
||||
return primaryOwner == null ? null : NameCache.get(Address.toExtendedUUID(primaryOwner));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ import java.util.stream.StreamSupport;
|
|||
|
||||
import org.infinispan.client.hotrod.impl.query.RemoteQuery;
|
||||
import org.infinispan.commons.api.query.Query;
|
||||
import org.infinispan.query.dsl.QueryResult;
|
||||
import org.infinispan.commons.api.query.QueryResult;
|
||||
|
||||
public final class QueryHelper {
|
||||
|
||||
|
|
@ -195,7 +195,7 @@ public final class QueryHelper {
|
|||
return;
|
||||
}
|
||||
currentOffset += resultList.size();
|
||||
if (rsp.count().isExact() && currentOffset >= rsp.count().value()) {
|
||||
if (rsp.count().exact() && currentOffset >= rsp.count().value()) {
|
||||
completed = true;
|
||||
return;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -439,10 +439,10 @@ public class RemoteUserSessionProvider implements UserSessionProvider {
|
|||
|
||||
private Stream<UserSessionModel> streamUserSessionByClientId(RealmModel realm, String clientId, boolean offline, Integer offset, Integer maxResults) {
|
||||
var userSessionIdQuery = ClientSessionQueries.fetchUserSessionIdForClientId(getClientSessionTransaction(offline).getCache(), realm.getId(), clientId);
|
||||
if (offset != null) {
|
||||
if (offset != null && offset > -1) {
|
||||
userSessionIdQuery.startOffset(offset);
|
||||
}
|
||||
userSessionIdQuery.maxResults(maxResults == null ? Integer.MAX_VALUE : maxResults);
|
||||
userSessionIdQuery.maxResults(maxResults == null || maxResults == -1 ? Integer.MAX_VALUE : maxResults);
|
||||
var userSessionTx = getUserSessionTransaction(offline);
|
||||
return Flowable.fromIterable(QueryHelper.toCollection(userSessionIdQuery, QueryHelper.SINGLE_PROJECTION_TO_STRING))
|
||||
.flatMapMaybe(userSessionTx::maybeGet, false, MAX_CONCURRENT_REQUESTS)
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ import org.keycloak.models.sessions.infinispan.entities.RemoteUserSessionEntity;
|
|||
import org.keycloak.models.sessions.infinispan.entities.RootAuthenticationSessionEntity;
|
||||
|
||||
import org.infinispan.commons.dataconversion.MediaType;
|
||||
import org.infinispan.commons.util.TimeQuantity;
|
||||
import org.infinispan.configuration.cache.AbstractStoreConfiguration;
|
||||
import org.infinispan.configuration.cache.BackupConfiguration;
|
||||
import org.infinispan.configuration.cache.BackupFailurePolicy;
|
||||
|
|
@ -394,13 +395,17 @@ public final class CacheConfigurator {
|
|||
}
|
||||
|
||||
private static ConfigurationBuilder remoteCacheConfigurationBuilder(String name, Config.Scope config, String[] sites, Class<?> indexedEntity, long expirationWakeupPeriodMillis) {
|
||||
return remoteCacheConfigurationBuilder(name, config, sites, indexedEntity, TimeQuantity.valueOf(expirationWakeupPeriodMillis));
|
||||
}
|
||||
|
||||
private static ConfigurationBuilder remoteCacheConfigurationBuilder(String name, Config.Scope config, String[] sites, Class<?> indexedEntity, TimeQuantity expirationWakeupPeriod) {
|
||||
var builder = new ConfigurationBuilder();
|
||||
builder.clustering().cacheMode(CacheMode.DIST_SYNC);
|
||||
builder.clustering().hash().numOwners(Math.max(MIN_NUM_OWNERS_REMOTE_CACHE, config.getInt(numOwnerConfigKey(name), MIN_NUM_OWNERS_REMOTE_CACHE)));
|
||||
builder.clustering().stateTransfer().chunkSize(STATE_TRANSFER_CHUNK_SIZE);
|
||||
builder.encoding().mediaType(MediaType.APPLICATION_PROTOSTREAM);
|
||||
builder.statistics().enable();
|
||||
builder.expiration().enableReaper().wakeUpInterval(expirationWakeupPeriodMillis);
|
||||
builder.expiration().enableReaper().wakeUpInterval(expirationWakeupPeriod.longValue());
|
||||
|
||||
if (indexedEntity != null) {
|
||||
builder.indexing().enable().addIndexedEntities(Marshalling.protoEntity(indexedEntity));
|
||||
|
|
|
|||
|
|
@ -241,8 +241,8 @@ public class DefaultCacheEmbeddedConfigProviderFactory implements CacheEmbeddedC
|
|||
.meterRegistry(Metrics.globalRegistry);
|
||||
builder.cacheContainer().statistics(true);
|
||||
builder.metrics()
|
||||
.namesAsTags(true)
|
||||
.histograms(keycloakConfig.getBoolean(HISTOGRAMS, Boolean.FALSE));
|
||||
.histograms(keycloakConfig.getBoolean(HISTOGRAMS, Boolean.FALSE))
|
||||
.legacy(true);
|
||||
holder.getNamedConfigurationBuilders()
|
||||
.values()
|
||||
.stream()
|
||||
|
|
|
|||
|
|
@ -1,57 +0,0 @@
|
|||
<config xmlns="urn:org:jgroups"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:org:jgroups http://www.jgroups.org/schema/jgroups-5.2.xsd">
|
||||
<!-- This file has been adpoted from https://github.com/infinispan/infinispan/blob/master/core/src/main/resources/default-configs/default-jgroups-udp.xml -->
|
||||
<!-- jgroups.udp.address is deprecated and will be removed, see ISPN-11867 -->
|
||||
<UDP bind_addr="${jgroups.bind.address,jgroups.udp.address:127.0.0.1}"
|
||||
bind_port="${jgroups.bind.port,jgroups.udp.port:0}"
|
||||
mcast_addr="${jgroups.udp.mcast_addr,jgroups.mcast_addr:228.6.7.8}"
|
||||
mcast_port="${jgroups.udp.mcast_port,jgroups.mcast_port:46655}"
|
||||
tos="0"
|
||||
ucast_send_buf_size="1m"
|
||||
mcast_send_buf_size="1m"
|
||||
ucast_recv_buf_size="20m"
|
||||
mcast_recv_buf_size="25m"
|
||||
ip_ttl="${jgroups.ip_ttl:2}"
|
||||
thread_naming_pattern="pl"
|
||||
diag.enabled="${jgroups.diag.enabled:false}"
|
||||
bundler_type="transfer-queue"
|
||||
bundler.max_size="${jgroups.bundler.max_size:64000}"
|
||||
|
||||
thread_pool.min_threads="${jgroups.thread_pool.min_threads:0}"
|
||||
thread_pool.max_threads="${jgroups.thread_pool.max_threads:200}"
|
||||
thread_pool.keep_alive_time="60000"
|
||||
/>
|
||||
<RED/>
|
||||
<PING num_discovery_runs="3"/>
|
||||
<MERGE3 min_interval="10000"
|
||||
max_interval="30000"
|
||||
/>
|
||||
<FD_SOCK2 offset="${jgroups.fd.port-offset:50000}"/>
|
||||
<FD_ALL3/>
|
||||
<VERIFY_SUSPECT timeout="1000"/>
|
||||
<pbcast.NAKACK2 xmit_interval="100"
|
||||
xmit_table_num_rows="50"
|
||||
xmit_table_msgs_per_row="1024"
|
||||
xmit_table_max_compaction_time="30000"
|
||||
resend_last_seqno="true"
|
||||
/>
|
||||
<UNICAST3 xmit_interval="100"
|
||||
xmit_table_num_rows="50"
|
||||
xmit_table_msgs_per_row="1024"
|
||||
xmit_table_max_compaction_time="30000"
|
||||
/>
|
||||
<pbcast.STABLE desired_avg_gossip="5000"
|
||||
max_bytes="1M"
|
||||
/>
|
||||
<pbcast.GMS print_local_addr="false"
|
||||
join_timeout="${jgroups.join_timeout:500}"
|
||||
/>
|
||||
<UFC max_credits="${jgroups.max_credits:4m}"
|
||||
min_threshold="0.40"
|
||||
/>
|
||||
<MFC max_credits="${jgroups.max_credits:4m}"
|
||||
min_threshold="0.40"
|
||||
/>
|
||||
<FRAG4 frag_size="${jgroups.frag_size:60000}"/>
|
||||
</config>
|
||||
|
|
@ -18,8 +18,8 @@
|
|||
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
|
||||
xmlns="urn:infinispan:config:15.0">
|
||||
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
|
||||
xmlns="urn:infinispan:config:16.0">
|
||||
|
||||
<cache-container name="keycloak">
|
||||
<transport lock-timeout="60000"/>
|
||||
|
|
|
|||
4
pom.xml
4
pom.xml
|
|
@ -91,9 +91,9 @@
|
|||
<h2.version>2.4.240</h2.version>
|
||||
<hibernate-orm.plugin.version>6.2.13.Final</hibernate-orm.plugin.version>
|
||||
<hibernate.c3p0.version>6.2.13.Final</hibernate.c3p0.version>
|
||||
<infinispan.version>15.0.19.Final</infinispan.version>
|
||||
<hibernate-validator.version>9.0.1.Final</hibernate-validator.version>
|
||||
<protostream.version>5.0.14.Final</protostream.version> <!-- For the annotation processor: keep in sync with the version shipped with Infinispan -->
|
||||
<infinispan.version>16.0.5</infinispan.version>
|
||||
<protostream.version>6.0.3</protostream.version> <!-- For the annotation processor: keep in sync with the version shipped with Infinispan -->
|
||||
<protostream.plugin.version>${protostream.version}</protostream.plugin.version>
|
||||
|
||||
<!--JAKARTA-->
|
||||
|
|
|
|||
|
|
@ -672,16 +672,6 @@
|
|||
<artifactId>infinispan-component-annotations</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<!-- required for query/search in the external Infinispan server -->
|
||||
<dependency>
|
||||
<groupId>org.infinispan</groupId>
|
||||
<artifactId>infinispan-remote-query-client</artifactId>
|
||||
</dependency>
|
||||
<!-- to be removed after https://issues.redhat.com/browse/ISPN-16220 -->
|
||||
<dependency>
|
||||
<groupId>org.infinispan</groupId>
|
||||
<artifactId>infinispan-query-dsl</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>jakarta.xml.bind</groupId>
|
||||
<artifactId>jakarta.xml.bind-api</artifactId>
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@
|
|||
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
|
||||
xmlns="urn:infinispan:config:15.0">
|
||||
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
|
||||
xmlns="urn:infinispan:config:16.0">
|
||||
|
||||
<cache-container name="keycloak">
|
||||
<transport lock-timeout="60000"/>
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
|
||||
xmlns="urn:infinispan:config:15.0">
|
||||
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
|
||||
xmlns="urn:infinispan:config:16.0">
|
||||
<cache-container />
|
||||
</infinispan>
|
||||
|
|
|
|||
|
|
@ -148,9 +148,7 @@ public class ClusterConfigKeepAliveDistTest {
|
|||
.prettyPrint();
|
||||
|
||||
ConfigurationBuilderHolder configHolder = new ParserRegistry().parse(configJson, MediaType.APPLICATION_JSON);
|
||||
// Workaround for ISPN-16595
|
||||
String cacheName = CaseFormat.LOWER_CAMEL.to(CaseFormat.LOWER_HYPHEN, cache);
|
||||
return configHolder.getNamedConfigurationBuilders().get(cacheName).build();
|
||||
return configHolder.getNamedConfigurationBuilders().get(cache).build();
|
||||
}
|
||||
|
||||
private record CacheOwners(String name, int owners) {
|
||||
|
|
|
|||
|
|
@ -1,263 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!-- end::keycloak-ispn-configmap[] -->
|
||||
|
||||
<!--
|
||||
~ Copyright 2019 Red Hat, Inc. and/or its affiliates
|
||||
~ and other contributors as indicated by the @author tags.
|
||||
~
|
||||
~ Licensed under the Apache License, Version 2.0 (the "License");
|
||||
~ you may not use this file except in compliance with the License.
|
||||
~ You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!--tag::keycloak-ispn-configmap[] -->
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:15.0 https://www.infinispan.org/schemas/infinispan-config-15.0.xsd
|
||||
urn:infinispan:config:store:remote:15.0 https://www.infinispan.org/schemas/infinispan-cachestore-remote-config-15.0.xsd"
|
||||
xmlns="urn:infinispan:config:15.0">
|
||||
<!--end::keycloak-ispn-configmap[] -->
|
||||
|
||||
<!-- the statistics="true" attribute is not part of the original KC config and was added by Keycloak Benchmark -->
|
||||
<cache-container name="keycloak" statistics="true">
|
||||
<transport lock-timeout="60000"/>
|
||||
<metrics names-as-tags="true" />
|
||||
<local-cache name="realms" simple-cache="true" statistics="true">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<memory max-count="10000"/>
|
||||
</local-cache>
|
||||
<local-cache name="users" simple-cache="true" statistics="true">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<memory max-count="10000"/>
|
||||
</local-cache>
|
||||
<!--tag::keycloak-ispn-remotestore[] -->
|
||||
<distributed-cache name="sessions" owners="2" statistics="true">
|
||||
<expiration lifespan="-1"/>
|
||||
<persistence passivation="false" availability-interval="500"> <!--1-->
|
||||
<remote-store xmlns="urn:infinispan:config:store:remote:15.0"
|
||||
cache="sessions"
|
||||
raw-values="true"
|
||||
shared="true"
|
||||
segmented="false">
|
||||
<remote-server host="127.0.0.1"
|
||||
port="11222"/> <!--2-->
|
||||
<connection-pool max-active="16"
|
||||
exhausted-action="CREATE_NEW"/>
|
||||
<security>
|
||||
<authentication server-name="infinispan">
|
||||
<digest username="keycloak"
|
||||
password="Password1!"
|
||||
realm="default"/> <!--3-->
|
||||
</authentication>
|
||||
</security>
|
||||
</remote-store>
|
||||
</persistence>
|
||||
<state-transfer enabled="false"/> <!--5-->
|
||||
</distributed-cache>
|
||||
<!--end::keycloak-ispn-remotestore[] -->
|
||||
<distributed-cache name="authenticationSessions" owners="2" statistics="true">
|
||||
<expiration lifespan="-1"/>
|
||||
<persistence passivation="false" availability-interval="500">
|
||||
<remote-store xmlns="urn:infinispan:config:store:remote:15.0"
|
||||
cache="authenticationSessions"
|
||||
raw-values="true"
|
||||
shared="true"
|
||||
segmented="false">
|
||||
<remote-server host="127.0.0.1"
|
||||
port="11222"/>
|
||||
<connection-pool max-active="16"
|
||||
exhausted-action="CREATE_NEW"/>
|
||||
<security>
|
||||
<authentication server-name="infinispan">
|
||||
<digest username="keycloak"
|
||||
password="Password1!"
|
||||
realm="default"/>
|
||||
</authentication>
|
||||
</security>
|
||||
</remote-store>
|
||||
</persistence>
|
||||
<state-transfer enabled="false"/>
|
||||
</distributed-cache>
|
||||
<distributed-cache name="offlineSessions" owners="2" statistics="true">
|
||||
<expiration lifespan="-1"/>
|
||||
<persistence passivation="false" availability-interval="500">
|
||||
<remote-store xmlns="urn:infinispan:config:store:remote:15.0"
|
||||
cache="offlineSessions"
|
||||
raw-values="true"
|
||||
shared="true"
|
||||
segmented="false">
|
||||
<remote-server host="127.0.0.1"
|
||||
port="11222"/>
|
||||
<connection-pool max-active="16"
|
||||
exhausted-action="CREATE_NEW"/>
|
||||
<security>
|
||||
<authentication server-name="infinispan">
|
||||
<digest username="keycloak"
|
||||
password="Password1!"
|
||||
realm="default"/>
|
||||
</authentication>
|
||||
</security>
|
||||
</remote-store>
|
||||
</persistence>
|
||||
<state-transfer enabled="false"/>
|
||||
</distributed-cache>
|
||||
<distributed-cache name="clientSessions" owners="2" statistics="true">
|
||||
<expiration lifespan="-1"/>
|
||||
<persistence passivation="false" availability-interval="500">
|
||||
<remote-store xmlns="urn:infinispan:config:store:remote:15.0"
|
||||
cache="clientSessions"
|
||||
raw-values="true"
|
||||
shared="true"
|
||||
segmented="false">
|
||||
<remote-server host="127.0.0.1"
|
||||
port="11222"/>
|
||||
<connection-pool max-active="16"
|
||||
exhausted-action="CREATE_NEW"/>
|
||||
<security>
|
||||
<authentication server-name="infinispan">
|
||||
<digest username="keycloak"
|
||||
password="Password1!"
|
||||
realm="default"/>
|
||||
</authentication>
|
||||
</security>
|
||||
</remote-store>
|
||||
</persistence>
|
||||
<state-transfer enabled="false"/>
|
||||
</distributed-cache>
|
||||
<distributed-cache name="offlineClientSessions" owners="2" statistics="true">
|
||||
<expiration lifespan="-1"/>
|
||||
<persistence passivation="false" availability-interval="500">
|
||||
<remote-store xmlns="urn:infinispan:config:store:remote:15.0"
|
||||
cache="offlineClientSessions"
|
||||
raw-values="true"
|
||||
shared="true"
|
||||
segmented="false">
|
||||
<remote-server host="127.0.0.1"
|
||||
port="11222"/>
|
||||
<connection-pool max-active="16"
|
||||
exhausted-action="CREATE_NEW"/>
|
||||
<security>
|
||||
<authentication server-name="infinispan">
|
||||
<digest username="keycloak"
|
||||
password="Password1!"
|
||||
realm="default"/>
|
||||
</authentication>
|
||||
</security>
|
||||
</remote-store>
|
||||
</persistence>
|
||||
<state-transfer enabled="false"/>
|
||||
</distributed-cache>
|
||||
<distributed-cache name="loginFailures" owners="2" statistics="true">
|
||||
<expiration lifespan="-1"/>
|
||||
<persistence passivation="false" availability-interval="500">
|
||||
<remote-store xmlns="urn:infinispan:config:store:remote:15.0"
|
||||
cache="loginFailures"
|
||||
raw-values="true"
|
||||
shared="true"
|
||||
segmented="false">
|
||||
<remote-server host="127.0.0.1"
|
||||
port="11222"/>
|
||||
<connection-pool max-active="16"
|
||||
exhausted-action="CREATE_NEW"/>
|
||||
<security>
|
||||
<authentication server-name="infinispan">
|
||||
<digest username="keycloak"
|
||||
password="Password1!"
|
||||
realm="default"/>
|
||||
</authentication>
|
||||
</security>
|
||||
</remote-store>
|
||||
</persistence>
|
||||
<state-transfer enabled="false"/>
|
||||
</distributed-cache>
|
||||
<local-cache name="authorization" simple-cache="true" statistics="true">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<memory max-count="10000"/>
|
||||
</local-cache>
|
||||
<replicated-cache name="work" statistics="true">
|
||||
<expiration lifespan="-1"/>
|
||||
<persistence passivation="false" availability-interval="500">
|
||||
<remote-store xmlns="urn:infinispan:config:store:remote:15.0"
|
||||
cache="work"
|
||||
raw-values="true"
|
||||
shared="true"
|
||||
segmented="false">
|
||||
<remote-server host="127.0.0.1"
|
||||
port="11222"/>
|
||||
<connection-pool max-active="16"
|
||||
exhausted-action="CREATE_NEW"/>
|
||||
<security>
|
||||
<authentication server-name="infinispan">
|
||||
<digest username="keycloak"
|
||||
password="Password1!"
|
||||
realm="default"/>
|
||||
</authentication>
|
||||
</security>
|
||||
</remote-store>
|
||||
</persistence>
|
||||
|
||||
</replicated-cache>
|
||||
<local-cache name="keys" simple-cache="true" statistics="true">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<expiration max-idle="3600000"/>
|
||||
<memory max-count="1000"/>
|
||||
</local-cache>
|
||||
<local-cache name="crl" simple-cache="true" statistics="true">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<expiration lifespan="-1"/>
|
||||
<memory max-count="1000"/>
|
||||
</local-cache>
|
||||
<distributed-cache name="actionTokens" owners="2" statistics="true">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<expiration max-idle="-1" lifespan="-1" interval="300000"/>
|
||||
<memory max-count="-1"/>
|
||||
<persistence passivation="false" availability-interval="500">
|
||||
<remote-store xmlns="urn:infinispan:config:store:remote:15.0"
|
||||
cache="actionTokens"
|
||||
raw-values="true"
|
||||
shared="true"
|
||||
segmented="false">
|
||||
<remote-server host="127.0.0.1"
|
||||
port="11222"/>
|
||||
<connection-pool max-active="16"
|
||||
exhausted-action="CREATE_NEW"/>
|
||||
<security>
|
||||
<authentication server-name="infinispan">
|
||||
<digest username="keycloak"
|
||||
password="Password1!"
|
||||
realm="default"/>
|
||||
</authentication>
|
||||
|
||||
</security>
|
||||
</remote-store>
|
||||
</persistence>
|
||||
<state-transfer enabled="false"/>
|
||||
</distributed-cache>
|
||||
</cache-container>
|
||||
</infinispan>
|
||||
|
|
@ -18,8 +18,8 @@
|
|||
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
|
||||
xmlns="urn:infinispan:config:15.0">
|
||||
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
|
||||
xmlns="urn:infinispan:config:16.0">
|
||||
|
||||
<cache-container name="keycloak">
|
||||
<transport lock-timeout="60000"/>
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@
|
|||
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
|
||||
xmlns="urn:infinispan:config:15.0">
|
||||
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
|
||||
xmlns="urn:infinispan:config:16.0">
|
||||
|
||||
<cache-container name="keycloak">
|
||||
<transport lock-timeout="60000"/>
|
||||
|
|
|
|||
|
|
@ -57,9 +57,8 @@
|
|||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.infinispan</groupId>
|
||||
<artifactId>infinispan-server-testdriver-core</artifactId>
|
||||
<artifactId>testcontainers-infinispan</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
|
|
|
|||
|
|
@ -345,7 +345,12 @@ public class CLITestExtension extends QuarkusMainTestExtension {
|
|||
private static InfinispanContainer configureExternalInfinispan(ExtensionContext context) {
|
||||
if (getAnnotationFromTestContext(context, WithExternalInfinispan.class) != null) {
|
||||
InfinispanContainer infinispanContainer = new InfinispanContainer();
|
||||
infinispanContainer.start();
|
||||
try {
|
||||
infinispanContainer.start();
|
||||
} catch (RuntimeException e) {
|
||||
infinispanContainer.stop();
|
||||
throw e;
|
||||
}
|
||||
return infinispanContainer;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,12 +21,14 @@ import java.util.Arrays;
|
|||
|
||||
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
|
||||
|
||||
import com.github.dockerjava.api.command.InspectContainerResponse;
|
||||
import org.infinispan.client.hotrod.RemoteCacheManager;
|
||||
import org.infinispan.client.hotrod.configuration.ConfigurationBuilder;
|
||||
import org.infinispan.commons.configuration.StringConfiguration;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.testcontainers.images.PullPolicy;
|
||||
|
||||
public class InfinispanContainer extends org.infinispan.server.test.core.InfinispanContainer {
|
||||
public class InfinispanContainer extends org.infinispan.testcontainers.InfinispanContainer {
|
||||
|
||||
private final Logger LOG = Logger.getLogger(getClass());
|
||||
public static final String PORT = System.getProperty("keycloak.externalInfinispan.port", "11222");
|
||||
|
|
@ -50,7 +52,6 @@ public class InfinispanContainer extends org.infinispan.server.test.core.Infinis
|
|||
if (getImageName().startsWith("quay.io/infinispan-test")) {
|
||||
withImagePullPolicy(PullPolicy.alwaysPull());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static String getImageName() {
|
||||
|
|
@ -72,17 +73,23 @@ public class InfinispanContainer extends org.infinispan.server.test.core.Infinis
|
|||
remoteCacheManager.administration().removeCache(cache);
|
||||
}
|
||||
|
||||
private void establishHotRodConnection() {
|
||||
remoteCacheManager = getRemoteCacheManager();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start() {
|
||||
logger().info("Starting ISPN container");
|
||||
|
||||
super.start();
|
||||
|
||||
establishHotRodConnection();
|
||||
remoteCacheManager = new RemoteCacheManager(
|
||||
new ConfigurationBuilder()
|
||||
.addServer()
|
||||
.host(getHost())
|
||||
.port(getMappedPort(DEFAULT_HOTROD_PORT))
|
||||
.security()
|
||||
.authentication()
|
||||
.username(getEnvMap().get(USER))
|
||||
.password(getEnvMap().get(PASS))
|
||||
.build()
|
||||
);
|
||||
|
||||
Arrays.stream(InfinispanConnectionProvider.CLUSTERED_CACHE_NAMES)
|
||||
.forEach(cacheName -> {
|
||||
|
|
@ -91,6 +98,27 @@ public class InfinispanContainer extends org.infinispan.server.test.core.Infinis
|
|||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void containerIsStopping(InspectContainerResponse containerInfo) {
|
||||
// graceful shutdown
|
||||
if (containerInfo.getState() != null && Boolean.TRUE.equals(containerInfo.getState().getRunning())) {
|
||||
dockerClient.killContainerCmd(getContainerId()).withSignal("TERM").exec();
|
||||
}
|
||||
|
||||
while (true) {
|
||||
InspectContainerResponse info = dockerClient.inspectContainerCmd(getContainerId()).exec();
|
||||
if (!(info.getState() != null && Boolean.TRUE.equals(info.getState().getRunning()))) {
|
||||
break;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
logger().info("Stopping ISPN container");
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ import org.keycloak.testframework.clustering.LoadBalancer;
|
|||
import org.keycloak.testframework.infinispan.CacheType;
|
||||
import org.keycloak.testframework.logging.JBossLogConsumer;
|
||||
|
||||
import org.infinispan.server.test.core.CountdownLatchLoggingConsumer;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.testcontainers.images.RemoteDockerImage;
|
||||
import org.testcontainers.utility.DockerImageName;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,34 @@
|
|||
package org.keycloak.testframework.server;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.testcontainers.containers.output.BaseConsumer;
|
||||
import org.testcontainers.containers.output.OutputFrame;
|
||||
|
||||
class CountdownLatchLoggingConsumer extends BaseConsumer<CountdownLatchLoggingConsumer> {
|
||||
|
||||
private final CountDownLatch latch;
|
||||
private final Pattern pattern;
|
||||
|
||||
public CountdownLatchLoggingConsumer(int count, String regex) {
|
||||
this.latch = new CountDownLatch(count);
|
||||
this.pattern = Pattern.compile(regex, Pattern.DOTALL);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(OutputFrame outputFrame) {
|
||||
String log = outputFrame.getUtf8String();
|
||||
if (pattern.matcher(log).matches()) {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
public void await(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException {
|
||||
if (!latch.await(timeout, unit)) {
|
||||
throw new TimeoutException(String.format("After the await period %d %s the count down should be 0 and is %d", timeout, unit, latch.getCount()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -5,7 +5,7 @@ import java.util.Map;
|
|||
import org.keycloak.testframework.logging.JBossLogConsumer;
|
||||
import org.keycloak.testframework.util.ContainerImages;
|
||||
|
||||
import org.infinispan.server.test.core.InfinispanContainer;
|
||||
import org.infinispan.testcontainers.InfinispanContainer;
|
||||
import org.jboss.logging.Logger;
|
||||
|
||||
public class InfinispanExternalServer extends InfinispanContainer implements InfinispanServer {
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@
|
|||
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
|
||||
xmlns="urn:infinispan:config:15.0">
|
||||
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
|
||||
xmlns="urn:infinispan:config:16.0">
|
||||
|
||||
<cache-container name="keycloak">
|
||||
<transport lock-timeout="60000"/>
|
||||
|
|
|
|||
|
|
@ -18,63 +18,17 @@
|
|||
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:11.0 http://www.infinispan.org/schemas/infinispan-config-11.0.xsd"
|
||||
xmlns="urn:infinispan:config:11.0">
|
||||
xsi:schemaLocation="urn:infinispan:config:16.0 https://www.infinispan.org/schemas/infinispan-config-16.0.xsd"
|
||||
xmlns="urn:infinispan:config:16.0">
|
||||
|
||||
<!-- Distributed Cache Container Configuration -->
|
||||
<cache-container name="keycloak">
|
||||
<transport lock-timeout="60000" node-name="${jboss.node.name}" />
|
||||
<local-cache name="realms">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<memory storage="HEAP" max-count="10000"/>
|
||||
</local-cache>
|
||||
<local-cache name="users">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<memory storage="HEAP" max-count="10000"/>
|
||||
</local-cache>
|
||||
<distributed-cache name="sessions" owners="${session.cache.owners}"/>
|
||||
<distributed-cache name="authenticationSessions" owners="${session.cache.owners}"/>
|
||||
<distributed-cache name="offlineSessions" owners="${offline.session.cache.owners}"/>
|
||||
<distributed-cache name="clientSessions" owners="${session.cache.owners}"/>
|
||||
<distributed-cache name="offlineClientSessions" owners="${offline.session.cache.owners}"/>
|
||||
<distributed-cache name="loginFailures" owners="${login.failure.cache.owners}"/>
|
||||
<local-cache name="authorization">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<memory storage="HEAP" max-count="10000"/>
|
||||
</local-cache>
|
||||
<replicated-cache name="work"/>
|
||||
<local-cache name="keys">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<expiration max-idle="3600000"/>
|
||||
<memory storage="HEAP" max-count="1000"/>
|
||||
</local-cache>
|
||||
<local-cache name="crl" simple-cache="true">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<expiration lifespan="-1"/>
|
||||
<memory max-count="1000"/>
|
||||
</local-cache>
|
||||
<distributed-cache name="actionTokens" owners="2">
|
||||
<encoding>
|
||||
<key media-type="application/x-java-object"/>
|
||||
<value media-type="application/x-java-object"/>
|
||||
</encoding>
|
||||
<expiration max-idle="-1" interval="300000"/>
|
||||
<memory storage="HEAP" max-count="-1"/>
|
||||
</distributed-cache>
|
||||
</cache-container>
|
||||
</infinispan>
|
||||
|
|
|
|||
|
|
@ -119,11 +119,6 @@
|
|||
<artifactId>infinispan-component-annotations</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.infinispan</groupId>
|
||||
<artifactId>infinispan-remote-query-server</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.awaitility</groupId>
|
||||
<artifactId>awaitility</artifactId>
|
||||
|
|
|
|||
|
|
@ -69,10 +69,13 @@ public class HotRodServerRule extends ExternalResource {
|
|||
HotRodServerConfiguration build = new HotRodServerConfigurationBuilder().build();
|
||||
hotRodServer = new HotRodServer();
|
||||
hotRodServer.start(build, hotRodCacheManager);
|
||||
hotRodServer.postStart();
|
||||
|
||||
HotRodServerConfiguration build2 = new HotRodServerConfigurationBuilder().port(11333).build();
|
||||
hotRodServer2 = new HotRodServer();
|
||||
hotRodServer2.start(build2, hotRodCacheManager2);
|
||||
hotRodServer2.postStart();
|
||||
|
||||
|
||||
// Create a Hot Rod client
|
||||
org.infinispan.client.hotrod.configuration.ConfigurationBuilder remoteBuilder = new org.infinispan.client.hotrod.configuration.ConfigurationBuilder();
|
||||
|
|
|
|||
|
|
@ -18,16 +18,16 @@
|
|||
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd
|
||||
urn:org:jgroups http://www.jgroups.org/schema/jgroups-5.3.xsd"
|
||||
xmlns="urn:infinispan:config:15.0"
|
||||
xmlns:ispn="urn:infinispan:config:15.0">
|
||||
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd
|
||||
urn:org:jgroups http://www.jgroups.org/schema/jgroups-5.4.xsd"
|
||||
xmlns="urn:infinispan:config:16.0"
|
||||
xmlns:ispn="urn:infinispan:config:16.0">
|
||||
|
||||
<jgroups>
|
||||
<stack name="test" extends="tcp">
|
||||
<!-- no network traffic as all messages are handled inside the JVM -->
|
||||
<SHARED_LOOPBACK xmlns="urn:org:jgroups" ispn:stack.combine="REPLACE" ispn:stack.position="TCP"
|
||||
thread_pool.use_virtual_threads="true"
|
||||
use_vthreads="true"
|
||||
bundler_type="no-bundler"/>
|
||||
<SHARED_LOOPBACK_PING xmlns="urn:org:jgroups" ispn:stack.combine="REPLACE" ispn:stack.position="MPING"/>
|
||||
<!-- in JVM cluster, no failure detection, no flow control, no fragmentation. -->
|
||||
|
|
|
|||
|
|
@ -18,10 +18,10 @@
|
|||
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd
|
||||
urn:org:jgroups http://www.jgroups.org/schema/jgroups-5.3.xsd"
|
||||
xmlns="urn:infinispan:config:15.0"
|
||||
xmlns:ispn="urn:infinispan:config:15.0">
|
||||
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd
|
||||
urn:org:jgroups http://www.jgroups.org/schema/jgroups-5.4.xsd"
|
||||
xmlns="urn:infinispan:config:16.0"
|
||||
xmlns:ispn="urn:infinispan:config:16.0">
|
||||
|
||||
<!-- used by auth-server-cluster-undertow profile -->
|
||||
<!-- all containers run in the same JVM, we can use the shared loopback and ping.-->
|
||||
|
|
@ -30,7 +30,7 @@
|
|||
<stack name="test" extends="tcp">
|
||||
<!-- no network traffic as all messages are handled inside the JVM -->
|
||||
<SHARED_LOOPBACK xmlns="urn:org:jgroups" ispn:stack.combine="REPLACE" ispn:stack.position="TCP"
|
||||
thread_pool.use_virtual_threads="true"
|
||||
use_vthreads="true"
|
||||
bundler_type="no-bundler"/>
|
||||
<SHARED_LOOPBACK_PING xmlns="urn:org:jgroups" ispn:stack.combine="REPLACE" ispn:stack.position="MPING"/>
|
||||
<!-- in JVM cluster, no failure detection, no flow control, no fragmentation. -->
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@
|
|||
|
||||
<infinispan
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
|
||||
xmlns="urn:infinispan:config:15.0">
|
||||
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
|
||||
xmlns="urn:infinispan:config:16.0">
|
||||
|
||||
<!-- used undertow deployment -->
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue