@coldxiangyu
2018-08-23T11:29:05.000000Z
字数 13268
阅读 1547
Spring
boot
参考[https://blog.csdn.net/educast/article/details/78315656 ]
spring.hazelcast.config=classpath:config/hazelcast.xml
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright (c) 2008-2016, Hazelcast, Inc. All Rights Reserved.
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!--
The default Hazelcast configuration. This is used when no hazelcast.xml is present.
Please see the schema for how to configure Hazelcast at https://hazelcast.com/schema/config/hazelcast-config-3.7.xsd
or the documentation at https://hazelcast.org/documentation/
-->
<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.7.xsd"
xmlns="http://www.hazelcast.com/schema/config"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<group>
<name>Coldxiangyu</name>
<password>passwd</password>
</group>
<network>
<port auto-increment="true" port-count="100">5701</port>
<outbound-ports>
<!--
Allowed port range when connecting to other nodes.
0 or * means use system provided port.
-->
<ports>0</ports>
</outbound-ports>
<join>
<tcp-ip enabled="false">
<interface>127.0.0.1</interface>
<member-list>
<member>127.0.0.1</member>
</member-list>
</tcp-ip>
</join>
<ssl enabled="false"/>
<socket-interceptor enabled="false"/>
<symmetric-encryption enabled="false">
<!--
encryption algorithm such as
DES/ECB/PKCS5Padding,
PBEWithMD5AndDES,
AES/CBC/PKCS5Padding,
Blowfish,
DESede
-->
<algorithm>PBEWithMD5AndDES</algorithm>
<!-- salt value to use when generating the secret key -->
<salt>thesalt</salt>
<!-- pass phrase to use when generating the secret key -->
<password>thepass</password>
<!-- iteration count to use when generating the secret key -->
<iteration-count>19</iteration-count>
</symmetric-encryption>
</network>
<partition-group enabled="false"/>
<executor-service name="default">
<pool-size>16</pool-size>
<!--Queue capacity. 0 means Integer.MAX_VALUE.-->
<queue-capacity>0</queue-capacity>
</executor-service>
<queue name="default">
<!--
Maximum size of the queue. When a JVM's local queue size reaches the maximum,
all put/offer operations will get blocked until the queue size
of the JVM goes down below the maximum.
Any integer between 0 and Integer.MAX_VALUE. 0 means
Integer.MAX_VALUE. Default is 0.
-->
<max-size>0</max-size>
<!--
Number of backups. If 1 is set as the backup-count for example,
then all entries of the map will be copied to another JVM for
fail-safety. 0 means no backup.
-->
<backup-count>1</backup-count>
<!--
Number of async backups. 0 means no backup.
-->
<async-backup-count>0</async-backup-count>
<empty-queue-ttl>-1</empty-queue-ttl>
</queue>
<map name="default">
<!--
Data type that will be used for storing recordMap.
Possible values:
BINARY (default): keys and values will be stored as binary data
OBJECT : values will be stored in their object forms
NATIVE : values will be stored in non-heap region of JVM
-->
<in-memory-format>BINARY</in-memory-format>
<!--
Number of backups. If 1 is set as the backup-count for example,
then all entries of the map will be copied to another JVM for
fail-safety. 0 means no backup.
-->
<backup-count>1</backup-count>
<!--
Number of async backups. 0 means no backup.
-->
<async-backup-count>0</async-backup-count>
<!--
Maximum number of seconds for each entry to stay in the map. Entries that are
older than <time-to-live-seconds> and not updated for <time-to-live-seconds>
will get automatically evicted from the map.
Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
-->
<time-to-live-seconds>0</time-to-live-seconds>
<!--
Maximum number of seconds for each entry to stay idle in the map. Entries that are
idle(not touched) for more than <max-idle-seconds> will get
automatically evicted from the map. Entry is touched if get, put or containsKey is called.
Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
-->
<max-idle-seconds>0</max-idle-seconds>
<!--
Valid values are:
NONE (no eviction),
LRU (Least Recently Used),
LFU (Least Frequently Used).
NONE is the default.
-->
<eviction-policy>NONE</eviction-policy>
<!--
Maximum size of the map. When max size is reached,
map is evicted based on the policy defined.
Any integer between 0 and Integer.MAX_VALUE. 0 means
Integer.MAX_VALUE. Default is 0.
-->
<max-size policy="PER_NODE">0</max-size>
<!--
`eviction-percentage` property is deprecated and will be ignored when it is set.
As of version 3.7, eviction mechanism changed.
It uses a probabilistic algorithm based on sampling. Please see documentation for further details
-->
<eviction-percentage>25</eviction-percentage>
<!--
`min-eviction-check-millis` property is deprecated and will be ignored when it is set.
As of version 3.7, eviction mechanism changed.
It uses a probabilistic algorithm based on sampling. Please see documentation for further details
-->
<min-eviction-check-millis>100</min-eviction-check-millis>
<!--
While recovering from split-brain (network partitioning),
map entries in the small cluster will merge into the bigger cluster
based on the policy set here. When an entry merge into the
cluster, there might an existing entry with the same key already.
Values of these entries might be different for that same key.
Which value should be set for the key? Conflict is resolved by
the policy set here. Default policy is PutIfAbsentMapMergePolicy
There are built-in merge policies such as
com.hazelcast.map.merge.PassThroughMergePolicy; entry will be overwritten if merging entry exists for the key.
com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster.
com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins.
com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins.
-->
<merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy>
<!--
Control caching of de-serialized values. Caching makes query evaluation faster, but it cost memory.
Possible Values:
NEVER: Never cache deserialized object
INDEX-ONLY: Caches values only when they are inserted into an index.
ALWAYS: Always cache deserialized values.
-->
<cache-deserialized-values>INDEX-ONLY</cache-deserialized-values>
</map>
<map name="hazlcast_online_cache">
<in-memory-format>BINARY</in-memory-format>
<backup-count>1</backup-count>
<async-backup-count>0</async-backup-count>
<!-- 最大过期时间,30分钟 -->
<time-to-live-seconds>1800</time-to-live-seconds>
<!-- 最长空闲时间,10分钟 -->
<max-idle-seconds>600</max-idle-seconds>
<eviction-policy>LRU</eviction-policy>
<max-size policy="PER_NODE">100000</max-size>
<eviction-percentage>25</eviction-percentage>
<min-eviction-check-millis>100</min-eviction-check-millis>
<merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy>
<cache-deserialized-values>INDEX-ONLY</cache-deserialized-values>
</map>
<map name="hazlcast_callout_cache">
<in-memory-format>BINARY</in-memory-format>
<backup-count>1</backup-count>
<async-backup-count>0</async-backup-count>
<!-- 最大过期时间,30分钟 -->
<time-to-live-seconds>1800</time-to-live-seconds>
<!-- 最长空闲时间,10分钟 -->
<max-idle-seconds>600</max-idle-seconds>
<eviction-policy>LRU</eviction-policy>
<max-size policy="PER_NODE">100000</max-size>
<eviction-percentage>25</eviction-percentage>
<min-eviction-check-millis>100</min-eviction-check-millis>
<merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy>
<cache-deserialized-values>INDEX-ONLY</cache-deserialized-values>
</map>
<map name="api_user_cache">
<in-memory-format>BINARY</in-memory-format>
<backup-count>1</backup-count>
<async-backup-count>0</async-backup-count>
<!-- 最大过期时间,7天 -->
<time-to-live-seconds>604800</time-to-live-seconds>
<!-- 最长空闲时间,8小时 -->
<max-idle-seconds>28800</max-idle-seconds>
<eviction-policy>NONE</eviction-policy>
<max-size policy="PER_NODE">0</max-size>
<eviction-percentage>25</eviction-percentage>
<min-eviction-check-millis>100</min-eviction-check-millis>
<merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy>
<cache-deserialized-values>INDEX-ONLY</cache-deserialized-values>
</map>
<map name="callcenter_current_call">
<in-memory-format>BINARY</in-memory-format>
<backup-count>1</backup-count>
<async-backup-count>0</async-backup-count>
<!-- 最大过期时间,7天 -->
<time-to-live-seconds>14400</time-to-live-seconds>
<!-- 最长空闲时间,8小时 -->
<max-idle-seconds>7200</max-idle-seconds>
<eviction-policy>NONE</eviction-policy>
<max-size policy="PER_NODE">0</max-size>
<eviction-percentage>25</eviction-percentage>
<min-eviction-check-millis>100</min-eviction-check-millis>
<merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy>
<cache-deserialized-values>INDEX-ONLY</cache-deserialized-values>
</map>
<multimap name="default">
<backup-count>1</backup-count>
<value-collection-type>SET</value-collection-type>
</multimap>
<list name="default">
<backup-count>1</backup-count>
</list>
<set name="default">
<backup-count>1</backup-count>
</set>
<jobtracker name="default">
<max-thread-size>10000</max-thread-size>
<!-- Queue size 0 means number of partitions * 2 -->
<queue-size>10000</queue-size>
<retry-count>0</retry-count>
<chunk-size>1000</chunk-size>
<communicate-stats>true</communicate-stats>
<topology-changed-strategy>CANCEL_RUNNING_OPERATION</topology-changed-strategy>
</jobtracker>
<semaphore name="default">
<initial-permits>0</initial-permits>
<backup-count>1</backup-count>
<async-backup-count>0</async-backup-count>
</semaphore>
<reliable-topic name="default">
<read-batch-size>10</read-batch-size>
<topic-overload-policy>BLOCK</topic-overload-policy>
<statistics-enabled>true</statistics-enabled>
</reliable-topic>
<ringbuffer name="default">
<capacity>10000</capacity>
<backup-count>1</backup-count>
<async-backup-count>0</async-backup-count>
<time-to-live-seconds>30</time-to-live-seconds>
<in-memory-format>BINARY</in-memory-format>
</ringbuffer>
<serialization>
<portable-version>0</portable-version>
</serialization>
<services enable-defaults="true"/>
<lite-member enabled="false"/>
</hazelcast>
public interface CacheBean {
/**
*
*/
public void put(String key , Object value , String orgi) ;
/**
*
*/
public void clear(String orgi);
public Object delete(String key , String orgi) ;
public void update(String key , String orgi , Object object) ;
/**
*
* @param key
* @param orgi
* @return
*/
public Object getCacheObject(String key, String orgi) ;
/**
*
* @param key
* @param orgi
* @return
*/
public Object getCacheObject(String key, String orgi,Object defaultValue) ;
/**
* 获取所有缓存对象
* @param orgi
* @return
*/
public Collection<?> getAllCacheObject(String orgi) ;
public CacheBean getCacheInstance(String cacheName);
public Object getCache();
public JsonObject getStatics();
public Lock getLock(String lock, String orgi);
public long getSize();
public long getAtomicLong(String cacheName) ;
public void setAtomicLong(String cacheName , long start) ;
}
public interface CacheInstance {
/**
* 系统缓存
* @return
*/
public CacheBean getSystemCacheBean();
}
public class CacheHelper {
private static CacheHelper instance = new CacheHelper();
/**
* 获取缓存实例
*/
public static CacheHelper getInstance(){
return instance ;
}
private static CacheInstance cacheInstance = new HazlcastCacheHelper();
public static CacheBean getSystemCacheBean() {
return cacheInstance!=null ? cacheInstance.getSystemCacheBean() : null ;
}
}
/**
* Hazlcast缓存处理实例类
* @author coldxiangyu
*
*/
public class HazlcastCacheHelper implements CacheInstance{
/**
* 服务类型枚举
* @author admin
*
*/
public enum CacheServiceEnum{
HAZLCAST_CULUSTER_SYSTEM;
public String toString(){
return super.toString().toLowerCase();
}
}
@Override
public CacheBean getSystemCacheBean() {
return UKDataContext.getContext().getBean(SystemCache.class).getCacheInstance(CacheServiceEnum.HAZLCAST_CULUSTER_SYSTEM.toString()) ;
}
}
@Service("system_cache")
public class SystemCache implements CacheBean{
@Autowired
public HazelcastInstance hazelcastInstance;
private String cacheName ;
public HazelcastInstance getInstance(){
return hazelcastInstance ;
}
public CacheBean getCacheInstance(String cacheName){
this.cacheName = cacheName ;
return this ;
}
@Override
public void put(String key, Object value, String orgi) {
getInstance().getMap(getName()).put(key, value) ;
}
@Override
public void clear(String orgi) {
getInstance().getMap(getName()).clear();
}
@Override
public Object delete(String key, String orgi) {
return getInstance().getMap(getName()).remove(key) ;
}
@Override
public void update(String key, String orgi, Object value) {
getInstance().getMap(getName()).put(key, value);
}
@Override
public Object getCacheObject(String key, String orgi) {
return getInstance().getMap(getName()).get(key);
}
public String getName() {
return cacheName ;
}
// @Override
public void service() throws Exception {
// TODO Auto-generated method stub
}
@Override
public Collection<?> getAllCacheObject(String orgi) {
return getInstance().getMap(getName()).keySet();
}
@Override
public Object getCacheObject(String key, String orgi, Object defaultValue) {
return getCacheObject(key, orgi);
}
@Override
public Object getCache() {
return getInstance().getMap(cacheName);
}
@Override
public Lock getLock(String lock , String orgi) {
// TODO Auto-generated method stub
return getInstance().getLock(lock);
}
@Override
public long getSize() {
return getInstance().getMap(getName()).size();
}
@Override
public long getAtomicLong(String cacheName) {
return getInstance().getAtomicLong(getName()).incrementAndGet();
}
@Override
public void setAtomicLong(String cacheName, long start) {
getInstance().getAtomicLong(getName()).set(start);
}
@Override
public JsonObject getStatics() {
// TODO Auto-generated method stub
return getInstance().getMap(getName()).getLocalMapStats().toJson();
}
}
/**
* 获取系统地区配置
* @return
*/
public static void initSystemArea(){
CacheHelper.getSystemCacheBean().delete("system", "test") ;
AreaTypeRepository areaTypeRes = UKDataContext.getContext().getBean(AreaTypeRepository.class) ;
CacheHelper.getSystemCacheBean().put("system", areaTypeRes.findAll(), "test");
}