Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
No results found
Show changes
Commits on Source (60)
...@@ -5,31 +5,53 @@ stages: ...@@ -5,31 +5,53 @@ stages:
- docs - docs
- mirror - mirror
cache:
untracked: true
key: "$CI_PROJECT_ID"
paths:
- ./cache/
variables: variables:
# production_repo: ubleipzig/folio-okapi # production_repo: ubleipzig/folio-okapi
production_repo: services.ub.uni-leipzig.de:11443/bdd_dev/folio/okapi production_repo: services.ub.uni-leipzig.de:11443/bdd_dev/folio/okapi
staging_repo: services.ub.uni-leipzig.de:11443/bdd_dev/folio/okapi staging_repo: services.ub.uni-leipzig.de:11443/bdd_dev/folio/okapi
alpha_repo: services.ub.uni-leipzig.de:11443/bdd_dev/folio/okapi alpha_repo: services.ub.uni-leipzig.de:11443/bdd_dev/folio/okapi
DOCKER_TLS_CERTDIR: "" DOCKER_TLS_CERTDIR: ""
OKAPI_VERSION: 2-40-0 OKAPI_VERSION: v6.1.3
docker_build: docker_build:
stage: build stage: build
image: ubleipzig/deployer:1.3.2 image:
services: name: gcr.io/kaniko-project/executor:v1.23.2-debug
- docker:dind entrypoint: [""]
script: | before_script:
deployer build \ - export dockerconfig=${DOCKER_ALPHA_AUTH_CONFIG}
--build-arg HTTP_PROXY=${HTTP_PROXY} \ - echo -ne "Setting docker auth config ..."
--build-arg HTTPS_PROXY=${HTTPS_PROXY} \ - mkdir -p ${HOME}/.docker && echo "${dockerconfig}" >${HOME}/.docker/config.json
--build-arg NO_PROXY=${NO_PROXY} \ - mkdir -p /kaniko/.docker && echo "${dockerconfig}" >/kaniko/.docker/config.json
--output image.tar.gz
script:
- /kaniko/executor
--context "${CI_PROJECT_DIR}"
--dockerfile "${CI_PROJECT_DIR}/Dockerfile"
--destination "${alpha_repo}:${CI_COMMIT_BRANCH}"
--destination "${alpha_repo}:alpha"
--build-arg "HTTP_PROXY=${HTTP_PROXY}"
--build-arg "HTTPS_PROXY=${HTTPS_PROXY}"
--build-arg "NO_PROXY=${NO_PROXY}"
--no-push
--tarPath image.tar
# --output image.tar.gz
# deployer build \
artifacts: artifacts:
name: docker-image # name: docker-image
paths: paths:
- image.tar.gz - image.tar
tags: tags:
- docker - kaniko
only: only:
changes: changes:
- Dockerfile - Dockerfile
...@@ -38,20 +60,20 @@ docker_build: ...@@ -38,20 +60,20 @@ docker_build:
docker_publish_alpha: docker_publish_alpha:
stage: publish stage: publish
image: ubleipzig/deployer:1.3.2 image:
services: name: gcr.io/go-containerregistry/crane:debug
- docker:dind entrypoint: [""]
script: | before_script:
deployer publish \ - export dockerconfig=${DOCKER_ALPHA_AUTH_CONFIG}
--input image.tar.gz \ - echo -ne "Setting docker auth config ..."
--docker-config "${DOCKER_ALPHA_AUTH_CONFIG}" \ - mkdir -p ${HOME}/.docker && echo "${dockerconfig}" >${HOME}/.docker/config.json
--name ${alpha_repo} \ script:
--tag ${CI_COMMIT_REF_SLUG} \ - crane push image.tar "${alpha_repo}:${CI_COMMIT_BRANCH}"
--tag alpha - crane tag "${alpha_repo}:${CI_COMMIT_BRANCH}" "alpha"
dependencies: dependencies:
- docker_build - docker_build
tags: tags:
- docker - kaniko
only: only:
refs: refs:
- branches - branches
...@@ -64,7 +86,7 @@ docker_publish_alpha: ...@@ -64,7 +86,7 @@ docker_publish_alpha:
docker_publish_staging: docker_publish_staging:
stage: publish stage: publish
image: ubleipzig/deployer:1.3.2 image: ubleipzig/deployer:1.5.0-rc2
services: services:
- docker:dind - docker:dind
script: | script: |
...@@ -89,7 +111,7 @@ docker_publish_staging: ...@@ -89,7 +111,7 @@ docker_publish_staging:
## Pipeline to publish to docker-hub. Disabled at the moment. ## Pipeline to publish to docker-hub. Disabled at the moment.
#docker_publish_production: #docker_publish_production:
# stage: publish # stage: publish
# image: ubleipzig/deployer:1.3.2 # image: ubleipzig/deployer:1.5.0-rc2
# services: # services:
# - docker:dind # - docker:dind
# script: | # script: |
...@@ -120,7 +142,7 @@ docker_publish_staging: ...@@ -120,7 +142,7 @@ docker_publish_staging:
## Pipeline to publish to ub internal nexus. ## Pipeline to publish to ub internal nexus.
docker_publish_production: docker_publish_production:
stage: publish stage: publish
image: ubleipzig/deployer:1.3.2 image: ubleipzig/deployer:1.5.0-rc2
services: services:
- docker:dind - docker:dind
script: | script: |
...@@ -145,4 +167,4 @@ docker_publish_production: ...@@ -145,4 +167,4 @@ docker_publish_production:
- branches - branches
only: only:
refs: refs:
- /^release\/.*/ - /^release\/.*/
\ No newline at end of file
FROM folioorg/okapi:2.40.0 FROM folioorg/okapi:6.1.3
EXPOSE 9130 EXPOSE 9130
ENTRYPOINT ["/docker-entrypoint"] ENTRYPOINT ["/docker-entrypoint"]
CMD [ "dev" ] CMD [ "dev" ]
ENV POSTGRES_HOST='localhost' \ ENV OKAPI_PORT='9130' \
POSTGRES_PORT='5432' \
POSTGRES_USER='okapi' \
POSTGRES_PASSWORD='changeme' \
POSTGRES_DB='okapi' \
OKAPI_PORT='9130' \
OKAPI_URL='http://localhost:9130' \ OKAPI_URL='http://localhost:9130' \
OKAPI_HOST='localhost' \ OKAPI_HOST='localhost' \
OKAPI_CLUSTERHOST='localhost' \ OKAPI_CLUSTERHOST='localhost' \
...@@ -19,9 +14,17 @@ ENV POSTGRES_HOST='localhost' \ ...@@ -19,9 +14,17 @@ ENV POSTGRES_HOST='localhost' \
CLUSTER_HOST='localhost' \ CLUSTER_HOST='localhost' \
CLUSTER_PORT='5703' \ CLUSTER_PORT='5703' \
HAZELCAST_CONFIG_FILE='/etc/hazelcast.xml' \ HAZELCAST_CONFIG_FILE='/etc/hazelcast.xml' \
DATABASE_TRY_COUNT=120 OKAPI_CONFIG_FILE='/etc/okapi/config.json' \
DATABASE_TRY_COUNT=120 \
ENABLE_METRICS='false' \
METRICS_OPTIONS='-Dvertx.metrics.options.enabled=true -DprometheusOptions={\"embeddedServerOptions\":{\"port\":9930}} -DjmxMetricsOptions={\"domain\":\"org.folio\"} -DmetricsPrefixFilter=org.folio'
USER root USER root
COPY assets/docker-entrypoint / COPY assets/docker-entrypoint /
COPY assets/hazelcast.xml /etc/ COPY assets/hazelcast.xml /etc/
\ No newline at end of file COPY assets/okapi/config.json /etc/okapi/
RUN apk add --no-cache jq
##USER folio
#!/usr/bin/env /bin/bash #!/usr/bin/env /bin/ash
# #
# entrypoint for okapi # entrypoint for okapi
...@@ -21,7 +21,11 @@ get_ordinary_args() { ...@@ -21,7 +21,11 @@ get_ordinary_args() {
} }
wait_for_database() { wait_for_database() {
echo -ne "waiting for database to come up and ready ..." POSTGRES_HOST=$(jq -r '.postgres_host' ${OKAPI_CONFIG_FILE})
POSTGRES_PORT=$(jq -r '.postgres_port' ${OKAPI_CONFIG_FILE})
echo -e "waiting for database to come up and ready ..."
echo -ne "Will test connection to $POSTGRES_HOST on port $POSTGRES_PORT ... "
while [ true ]; do while [ true ]; do
if [ $DATABASE_TRY_COUNT = 0 ]; then break; fi if [ $DATABASE_TRY_COUNT = 0 ]; then break; fi
result=`nc -z -w 1 $POSTGRES_HOST $POSTGRES_PORT 2>&1` result=`nc -z -w 1 $POSTGRES_HOST $POSTGRES_PORT 2>&1`
...@@ -42,18 +46,19 @@ wait_for_database() { ...@@ -42,18 +46,19 @@ wait_for_database() {
start_okapi() { start_okapi() {
local okapi_cmd=$1 local okapi_cmd=$1
shift shift
local java_args=$(get_java_args $@) local java_args=$(get_java_args $@)
if [ "$ENABLE_METRICS" = true ] ; then
java_args="${java_args} ${METRICS_OPTIONS}"
fi
java_args="${java_args} --add-modules java.se --add-exports java.base/jdk.internal.ref=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.management/sun.management=ALL-UNNAMED --add-opens jdk.management/com.sun.management.internal=ALL-UNNAMED"
echo -e "Das sind die args $java_args"
local ordinary_args=$(get_ordinary_args $@) local ordinary_args=$(get_ordinary_args $@)
echo "starting okapi in ${okapi_cmd}-mode" echo "starting okapi in ${okapi_cmd}-mode"
/bin/su -s /bin/sh folio -c "java \ /bin/su -s /bin/sh folio -c "java \
-Dstorage=\"postgres\" \
-Dpostgres_host=\"$POSTGRES_HOST\" \
-Dpostgres_port=\"$POSTGRES_PORT\" \
-Dpostgres_username=\"$POSTGRES_USER\" \
-Dpostgres_password=\"$POSTGRES_PASSWORD\" \
-Dpostgres_database=\"$POSTGRES_DB\" \
-Dhost=\"$OKAPI_HOST\" \ -Dhost=\"$OKAPI_HOST\" \
-Dport=\"$OKAPI_PORT\" \ -Dport=\"$OKAPI_PORT\" \
-Dokapiurl=\"$OKAPI_URL\" \ -Dokapiurl=\"$OKAPI_URL\" \
...@@ -64,6 +69,7 @@ start_okapi() { ...@@ -64,6 +69,7 @@ start_okapi() {
$java_args \ $java_args \
-jar \"$VERTICLE_HOME/$VERTICLE_FILE\" \ -jar \"$VERTICLE_HOME/$VERTICLE_FILE\" \
\"$okapi_cmd\" \ \"$okapi_cmd\" \
-conf \"$OKAPI_CONFIG_FILE\" \
-cluster-host \"$CLUSTER_HOST\" \ -cluster-host \"$CLUSTER_HOST\" \
-cluster-port \"$CLUSTER_PORT\" \ -cluster-port \"$CLUSTER_PORT\" \
-hazelcast-config-file \"$HAZELCAST_CONFIG_FILE\" \ -hazelcast-config-file \"$HAZELCAST_CONFIG_FILE\" \
......
#!/usr/bin/env /bin/ash
#
# entrypoint for okapi
set -o pipefail -o noclobber
get_java_args() {
for i in "$@"; do
if [ "${i#-D}" != "$i" ]; then
echo -ne "$i "
fi
done
}
get_ordinary_args() {
for i in "$@"; do
if [ "${i#-D}" == "$i" ]; then
echo -ne "$i "
fi
done
}
wait_for_database() {
POSTGRES_HOST=$(jq -r '.postgres_host' ${OKAPI_CONFIG_FILE})
POSTGRES_PORT=$(jq -r '.postgres_port' ${OKAPI_CONFIG_FILE})
echo -e "waiting for database to come up and ready ..."
echo -ne "Will test connection to $POSTGRES_HOST on port $POSTGRES_PORT ... "
while [ true ]; do
if [ $DATABASE_TRY_COUNT = 0 ]; then break; fi
result=`nc -z -w 1 $POSTGRES_HOST $POSTGRES_PORT 2>&1`
if [ $? = 0 ]; then
return 0
fi
let DATABASE_TRY_COUNT=$DATABASE_TRY_COUNT-1
sleep 1
done
echo "error"
echo $result
return 1
}
start_okapi() {
local okapi_cmd=$1
shift
local java_args=$(get_java_args $@)
if [ "$ENABLE_METRICS" = true ] ; then
java_args="${java_args} ${METRICS_OPTIONS}"
fi
local kubesettings=""
if [ -z ${KUBE_SERVER_URL+x} ]; then
kubesettings="-kube_server_url ${KUBE_SERVER_URL}"
elif [ -z ${KUBE_CONFIG_FILE+x} ]; then
kubesettings="-kube_config ${KUBE_CONFIG_FILE}"
fi
java_args="${java_args} --add-modules java.se --add-exports java.base/jdk.internal.ref=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.management/sun.management=ALL-UNNAMED --add-opens jdk.management/com.sun.management.internal=ALL-UNNAMED"
echo -e "Das sind die args $java_args"
local ordinary_args=$(get_ordinary_args $@)
echo "starting okapi in ${okapi_cmd}-mode"
/bin/su -s /bin/sh folio -c "java \
-Dhost=\"$OKAPI_HOST\" \
-Dport=\"$OKAPI_PORT\" \
-Dokapiurl=\"$OKAPI_URL\" \
-Dnodename=\"$OKAPI_NODENAME\" \
-Dloglevel=\"$OKAPI_LOGLEVEL\" \
-Dhazelcast.ip=\"$HAZELCAST_HOST\" \
-Dhazelcast.port=\"$HAZELCAST_PORT\" \
$java_args \
-jar \"$VERTICLE_HOME/$VERTICLE_FILE\" \
\"$okapi_cmd\" \
-conf \"$OKAPI_CONFIG_FILE\" \
-cluster-host \"$CLUSTER_HOST\" \
-cluster-port \"$CLUSTER_PORT\" \
-hazelcast-config-file \"$HAZELCAST_CONFIG_FILE\" \
$kubesettings \
$ordinary_args"
}
case $1 in
cluster|dev|deployment|proxy|help|initdatabase|purgedatabase)
wait_for_database && start_okapi "$@"
exit $?
;;
*)
/bin/su -s /bin/sh folio -c "$*"
;;
esac
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<!-- <!--
~ Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved. ~ Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
~ ~
~ Licensed under the Apache License, Version 2.0 (the "License"); ~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License. ~ you may not use this file except in compliance with the License.
...@@ -16,21 +16,25 @@ ...@@ -16,21 +16,25 @@
--> -->
<!-- <!--
The default Hazelcast configuration. This is used when no hazelcast.xml is present. The default Hazelcast configuration.
Please see the schema for how to configure Hazelcast at https://hazelcast.com/schema/config/hazelcast-config-3.8.xsd
or the documentation at https://hazelcast.org/documentation/ This XML file is used when no hazelcast.xml is present.
To learn how to configure Hazelcast, please see the schema at
https://hazelcast.com/schema/config/hazelcast-config-4.2.xsd
or the Reference Manual at https://hazelcast.org/documentation/
--> -->
<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.8.xsd"
xmlns="http://www.hazelcast.com/schema/config" <!--suppress XmlDefaultAttributeValue -->
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <hazelcast xmlns="http://www.hazelcast.com/schema/config"
<group> xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
<name>dev</name> xsi:schemaLocation="http://www.hazelcast.com/schema/config
<password>dev-pass</password> http://www.hazelcast.com/schema/config/hazelcast-config-4.2.xsd">
</group>
<management-center enabled="false">http://localhost:8080/mancenter</management-center> <cluster-name>dev</cluster-name>
<network> <network>
<public-address>${hazelcast.ip}:${hazelcast.port}</public-address> <port auto-increment="true" port-count="100">5701</port>
<port auto-increment="true" port-count="100">${hazelcast.port}</port>
<outbound-ports> <outbound-ports>
<!-- <!--
Allowed port range when connecting to other nodes. Allowed port range when connecting to other nodes.
...@@ -39,11 +43,10 @@ ...@@ -39,11 +43,10 @@
<ports>0</ports> <ports>0</ports>
</outbound-ports> </outbound-ports>
<join> <join>
<multicast enabled="true"> <auto-detection enabled="true"/>
<multicast enabled="false">
<multicast-group>224.2.2.3</multicast-group> <multicast-group>224.2.2.3</multicast-group>
<multicast-port>54327</multicast-port> <multicast-port>54327</multicast-port>
<multicast-time-to-live>32</multicast-time-to-live>
<multicast-timeout-seconds>35</multicast-timeout-seconds>
</multicast> </multicast>
<tcp-ip enabled="false"> <tcp-ip enabled="false">
<interface>127.0.0.1</interface> <interface>127.0.0.1</interface>
...@@ -52,17 +55,17 @@ ...@@ -52,17 +55,17 @@
</member-list> </member-list>
</tcp-ip> </tcp-ip>
<aws enabled="false"> <aws enabled="false">
<access-key>my-access-key</access-key>
<secret-key>my-secret-key</secret-key>
<!--optional, default is us-east-1 -->
<region>us-west-1</region>
<!--optional, default is ec2.amazonaws.com. If set, region shouldn't be set as it will override this property -->
<host-header>ec2.amazonaws.com</host-header>
<!-- optional, only instances belonging to this group will be discovered, default will try all running instances -->
<security-group-name>hazelcast-sg</security-group-name>
<tag-key>type</tag-key>
<tag-value>hz-nodes</tag-value>
</aws> </aws>
<gcp enabled="false">
</gcp>
<azure enabled="false">
</azure>
<kubernetes enabled="true">
</kubernetes>
<eureka enabled="false">
<self-registration>true</self-registration>
<namespace>hazelcast</namespace>
</eureka>
<discovery-strategies> <discovery-strategies>
</discovery-strategies> </discovery-strategies>
</join> </join>
...@@ -88,13 +91,33 @@ ...@@ -88,13 +91,33 @@
<!-- iteration count to use when generating the secret key --> <!-- iteration count to use when generating the secret key -->
<iteration-count>19</iteration-count> <iteration-count>19</iteration-count>
</symmetric-encryption> </symmetric-encryption>
<failure-detector>
<icmp enabled="false"/>
</failure-detector>
</network> </network>
<partition-group enabled="false"/> <partition-group enabled="false"/>
<executor-service name="default"> <executor-service name="default">
<pool-size>16</pool-size>
<!--Queue capacity. 0 means Integer.MAX_VALUE.--> <!--Queue capacity. 0 means Integer.MAX_VALUE.-->
<queue-capacity>0</queue-capacity> <queue-capacity>0</queue-capacity>
<pool-size>16</pool-size>
<statistics-enabled>true</statistics-enabled>
</executor-service> </executor-service>
<durable-executor-service name="default">
<capacity>100</capacity>
<durability>1</durability>
<pool-size>16</pool-size>
<statistics-enabled>true</statistics-enabled>
</durable-executor-service>
<scheduled-executor-service name="default">
<capacity>100</capacity>
<durability>1</durability>
<pool-size>16</pool-size>
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
<statistics-enabled>true</statistics-enabled>
</scheduled-executor-service>
<security>
<client-block-unmapped-actions>true</client-block-unmapped-actions>
</security>
<queue name="default"> <queue name="default">
<!-- <!--
Maximum size of the queue. When a JVM's local queue size reaches the maximum, Maximum size of the queue. When a JVM's local queue size reaches the maximum,
...@@ -117,6 +140,8 @@ ...@@ -117,6 +140,8 @@
<async-backup-count>0</async-backup-count> <async-backup-count>0</async-backup-count>
<empty-queue-ttl>-1</empty-queue-ttl> <empty-queue-ttl>-1</empty-queue-ttl>
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
</queue> </queue>
<map name="default"> <map name="default">
<!-- <!--
...@@ -128,6 +153,16 @@ ...@@ -128,6 +153,16 @@
--> -->
<in-memory-format>BINARY</in-memory-format> <in-memory-format>BINARY</in-memory-format>
<!--
Metadata creation policy for this map. Hazelcast may process objects of supported types ahead of time to
create additional metadata about them. This metadata then is used to make querying and indexing faster.
Metadata creation may decrease put throughput.
Valid values are:
CREATE_ON_UPDATE (default): Objects of supported types are pre-processed when they are created and updated.
OFF: No metadata is created.
-->
<metadata-policy>CREATE_ON_UPDATE</metadata-policy>
<!-- <!--
Number of backups. If 1 is set as the backup-count for example, Number of backups. If 1 is set as the backup-count for example,
then all entries of the map will be copied to another JVM for then all entries of the map will be copied to another JVM for
...@@ -139,46 +174,21 @@ ...@@ -139,46 +174,21 @@
--> -->
<async-backup-count>0</async-backup-count> <async-backup-count>0</async-backup-count>
<!-- <!--
Maximum number of seconds for each entry to stay in the map. Entries that are Maximum number of seconds for each entry to stay in the map. Entries that are
older than <time-to-live-seconds> and not updated for <time-to-live-seconds> older than <time-to-live-seconds> and not updated for <time-to-live-seconds>
will get automatically evicted from the map. will get automatically evicted from the map.
Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0. Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0
-->
<time-to-live-seconds>0</time-to-live-seconds>
<!--
Maximum number of seconds for each entry to stay idle in the map. Entries that are
idle(not touched) for more than <max-idle-seconds> will get
automatically evicted from the map. Entry is touched if get, put or containsKey is called.
Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
-->
<max-idle-seconds>0</max-idle-seconds>
<!--
Valid values are:
NONE (no eviction),
LRU (Least Recently Used),
LFU (Least Frequently Used).
NONE is the default.
-->
<eviction-policy>NONE</eviction-policy>
<!--
Maximum size of the map. When max size is reached,
map is evicted based on the policy defined.
Any integer between 0 and Integer.MAX_VALUE. 0 means
Integer.MAX_VALUE. Default is 0.
-->
<max-size policy="PER_NODE">0</max-size>
<!--
`eviction-percentage` property is deprecated and will be ignored when it is set.
As of version 3.7, eviction mechanism changed.
It uses a probabilistic algorithm based on sampling. Please see documentation for further details
--> -->
<eviction-percentage>25</eviction-percentage> <time-to-live-seconds>0</time-to-live-seconds>
<!-- <!--
`min-eviction-check-millis` property is deprecated and will be ignored when it is set. Maximum number of seconds for each entry to stay idle in the map. Entries that are
As of version 3.7, eviction mechanism changed. idle(not touched) for more than <max-idle-seconds> will get
It uses a probabilistic algorithm based on sampling. Please see documentation for further details automatically evicted from the map. Entry is touched if get, put or containsKey is called.
Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
--> -->
<min-eviction-check-millis>100</min-eviction-check-millis> <max-idle-seconds>0</max-idle-seconds>
<eviction eviction-policy="NONE" max-size-policy="PER_NODE" size="0"/>
<!-- <!--
While recovering from split-brain (network partitioning), While recovering from split-brain (network partitioning),
map entries in the small cluster will merge into the bigger cluster map entries in the small cluster will merge into the bigger cluster
...@@ -187,13 +197,14 @@ ...@@ -187,13 +197,14 @@
Values of these entries might be different for that same key. Values of these entries might be different for that same key.
Which value should be set for the key? Conflict is resolved by Which value should be set for the key? Conflict is resolved by
the policy set here. Default policy is PutIfAbsentMapMergePolicy the policy set here. Default policy is PutIfAbsentMapMergePolicy
There are built-in merge policies such as There are built-in merge policies such as
com.hazelcast.map.merge.PassThroughMergePolicy; entry will be overwritten if merging entry exists for the key. com.hazelcast.spi.merge.PassThroughMergePolicy; entry will be overwritten if merging entry exists for the key.
com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster. com.hazelcast.spi.merge.PutIfAbsentMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster.
com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins. com.hazelcast.spi.merge.HigherHitsMergePolicy ; entry with the higher hits wins.
com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins. com.hazelcast.spi.merge.LatestUpdateMergePolicy ; entry with the latest update wins.
--> -->
<merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy> <merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
<!-- <!--
Control caching of de-serialized values. Caching makes query evaluation faster, but it cost memory. Control caching of de-serialized values. Caching makes query evaluation faster, but it cost memory.
...@@ -204,37 +215,46 @@ ...@@ -204,37 +215,46 @@
--> -->
<cache-deserialized-values>INDEX-ONLY</cache-deserialized-values> <cache-deserialized-values>INDEX-ONLY</cache-deserialized-values>
<!--
Whether map level statistical information (total
hits, memory-cost etc.) should be gathered and stored.
-->
<statistics-enabled>true</statistics-enabled>
<!--
Whether statistical information (hits, creation
time, last access time etc.) should be gathered
and stored. You have to enable this if you plan to
implement a custom eviction policy, out-of-the-box
eviction policies work regardless of this setting.
-->
<per-entry-stats-enabled>false</per-entry-stats-enabled>
</map> </map>
<multimap name="default"> <multimap name="default">
<backup-count>1</backup-count> <backup-count>1</backup-count>
<value-collection-type>SET</value-collection-type> <value-collection-type>SET</value-collection-type>
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
</multimap> </multimap>
<replicatedmap name="default">
<in-memory-format>OBJECT</in-memory-format>
<async-fillup>true</async-fillup>
<statistics-enabled>true</statistics-enabled>
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
</replicatedmap>
<list name="default"> <list name="default">
<backup-count>1</backup-count> <backup-count>1</backup-count>
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
</list> </list>
<set name="default"> <set name="default">
<backup-count>1</backup-count> <backup-count>1</backup-count>
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
</set> </set>
<jobtracker name="default">
<max-thread-size>0</max-thread-size>
<!-- Queue size 0 means number of partitions * 2 -->
<queue-size>0</queue-size>
<retry-count>0</retry-count>
<chunk-size>1000</chunk-size>
<communicate-stats>true</communicate-stats>
<topology-changed-strategy>CANCEL_RUNNING_OPERATION</topology-changed-strategy>
</jobtracker>
<semaphore name="default">
<initial-permits>0</initial-permits>
<backup-count>1</backup-count>
<async-backup-count>0</async-backup-count>
</semaphore>
<reliable-topic name="default"> <reliable-topic name="default">
<read-batch-size>10</read-batch-size> <read-batch-size>10</read-batch-size>
<topic-overload-policy>BLOCK</topic-overload-policy> <topic-overload-policy>BLOCK</topic-overload-policy>
...@@ -247,14 +267,70 @@ ...@@ -247,14 +267,70 @@
<async-backup-count>0</async-backup-count> <async-backup-count>0</async-backup-count>
<time-to-live-seconds>0</time-to-live-seconds> <time-to-live-seconds>0</time-to-live-seconds>
<in-memory-format>BINARY</in-memory-format> <in-memory-format>BINARY</in-memory-format>
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
</ringbuffer> </ringbuffer>
<flake-id-generator name="default">
<prefetch-count>100</prefetch-count>
<prefetch-validity-millis>600000</prefetch-validity-millis>
<epoch-start>1514764800000</epoch-start>
<node-id-offset>0</node-id-offset>
<bits-sequence>6</bits-sequence>
<bits-node-id>16</bits-node-id>
<allowed-future-millis>15000</allowed-future-millis>
<statistics-enabled>true</statistics-enabled>
</flake-id-generator>
<serialization> <serialization>
<portable-version>0</portable-version> <portable-version>0</portable-version>
</serialization> </serialization>
<services enable-defaults="true"/>
<lite-member enabled="false"/> <lite-member enabled="false"/>
</hazelcast> <cardinality-estimator name="default">
\ No newline at end of file <backup-count>1</backup-count>
<async-backup-count>0</async-backup-count>
<merge-policy batch-size="100">HyperLogLogMergePolicy</merge-policy>
</cardinality-estimator>
<crdt-replication>
<replication-period-millis>1000</replication-period-millis>
<max-concurrent-replication-targets>1</max-concurrent-replication-targets>
</crdt-replication>
<pn-counter name="default">
<replica-count>2147483647</replica-count>
<statistics-enabled>true</statistics-enabled>
</pn-counter>
<cp-subsystem>
<cp-member-count>0</cp-member-count>
<group-size>0</group-size>
<session-time-to-live-seconds>300</session-time-to-live-seconds>
<session-heartbeat-interval-seconds>5</session-heartbeat-interval-seconds>
<missing-cp-member-auto-removal-seconds>14400</missing-cp-member-auto-removal-seconds>
<fail-on-indeterminate-operation-state>false</fail-on-indeterminate-operation-state>
<raft-algorithm>
<leader-election-timeout-in-millis>2000</leader-election-timeout-in-millis>
<leader-heartbeat-period-in-millis>5000</leader-heartbeat-period-in-millis>
<max-missed-leader-heartbeat-count>5</max-missed-leader-heartbeat-count>
<append-request-max-entry-count>100</append-request-max-entry-count>
<commit-index-advance-count-to-snapshot>10000</commit-index-advance-count-to-snapshot>
<uncommitted-entry-count-to-reject-new-appends>100</uncommitted-entry-count-to-reject-new-appends>
<append-request-backoff-timeout-in-millis>100</append-request-backoff-timeout-in-millis>
</raft-algorithm>
</cp-subsystem>
<metrics enabled="true">
<management-center enabled="true">
<retention-seconds>5</retention-seconds>
</management-center>
<jmx enabled="true"/>
<collection-frequency-seconds>5</collection-frequency-seconds>
</metrics>
<sql>
<executor-pool-size>-1</executor-pool-size>
<statement-timeout-millis>0</statement-timeout-millis>
</sql>
</hazelcast>
{
"storage" : "postgres",
"postgres_host" : "okapi-db",
"postgres_port" : "5432",
"postgres_database" : "okapi",
"postgres_username" : "okapi",
"postgres_password" : "changeMe"
}
...@@ -4,17 +4,16 @@ services: ...@@ -4,17 +4,16 @@ services:
build: . build: .
image: okapi:test image: okapi:test
environment: environment:
POSTGRES_DB: okapi
POSTGRES_USER: okapi
POSTGRES_PASSWORD: changeMe
POSTGRES_HOST: okapi-db
OKAPI_URL: http://okapi:9130 OKAPI_URL: http://okapi:9130
OKAPI_HOST: okapi OKAPI_HOST: okapi
OKAPI_LOGLEVEL: DEBUG OKAPI_LOGLEVEL: DEBUG
ENABLE_METRICS: 'true'
depends_on: depends_on:
- okapi-db - okapi-db
ports: ports:
- "127.0.0.1:9130:9130" - "127.0.0.1:9130:9130"
- "127.0.0.1:9930:9930"
command: cluster
okapi-db: okapi-db:
image: postgres:11-alpine image: postgres:11-alpine
...@@ -36,4 +35,4 @@ networks: ...@@ -36,4 +35,4 @@ networks:
name: folio name: folio
volumes: volumes:
pg-data: {} pg-data: {}
\ No newline at end of file