// Don't cache non-GET responses. We're technically allowed to cache
// HEAD requests and some POST requests, but the complexity of doing
// so is high and the benefit is low.
You can't cache POST requests with OkHttp’s cache. You’ll need to store them using some other mechanism
public class MemoryCache {
private final LruCache cache;
private final List keys = new ArrayList<>();
public MemoryCache(int maxSize) {
this.cache = new LruCache<>(maxSize);
}
private void lookupExpired() {
Completable.fromAction(
() -> {
String key;
for (int i = 0; i < keys.size(); i++) {
key = keys.get(i);
Entry value = cache.get(key);
if (value != null && value.isExpired()) {
remove(key);
}
}
})
.subscribeOn(Schedulers.single())
.subscribe();
}
@CheckForNull
public synchronized Entry get(String key) {
Entry value = cache.get(key);
if (value != null && value.isExpired()) {
remove(key);
lookupExpired();
return null;
}
lookupExpired();
return value;
}
public synchronized Entry put(String key, Entry value) {
if (!keys.contains(key)) {
keys.add(key);
}
Entry oldValue = cache.put(key, value);
lookupExpired();
return oldValue;
}
public Entry remove(String key) {
keys.remove(key);
return cache.remove(key);
}
public Map snapshot() {
return cache.snapshot();
}
public void trimToSize(int maxSize) {
cache.trimToSize(maxSize);
}
public int createCount() {
return cache.createCount();
}
public void evictAll() {
cache.evictAll();
}
public int evictionCount() {
return cache.evictionCount();
}
public int hitCount() {
return cache.hitCount();
}
public int maxSize() {
return cache.maxSize();
}
public int missCount() {
return cache.missCount();
}
public int putCount() {
return cache.putCount();
}
public int size() {
return cache.size();
}
@Immutable
public static final class Entry {
@SerializedName("data")
public final Object data;
@SerializedName("ttl")
public final long ttl;
}
}
public final class DiskCache implements Closeable, Flushable {
/**
* Unlike {@link okhttp3.Cache} ENTRY_COUNT = 2
* We don't save the CacheHeader and Respond in two separate files
* Instead, we wrap them in {@link Entry}
*/
private static final int ENTRY_COUNT = 1;
private static final int VERSION = 201105;
private static final int ENTRY_METADATA = 0;
private final DiskLruCache cache;
public DiskCache(File directory, long maxSize) {
cache = DiskLruCache.create(FileSystem.SYSTEM, directory, VERSION, ENTRY_COUNT, maxSize);
}
public Entry get(String key) {
DiskLruCache.Snapshot snapshot;
try {
snapshot = cache.get(key);
if (snapshot == null) {
return null;
}
} catch (IOException e) {
return null;
}
try {
BufferedSource source = Okio.buffer(snapshot.getSource(0));
String json = source.readUtf8();
source.close();
Util.closeQuietly(snapshot);
return DataLayerUtil.fromJson(json, null, Entry.class);
} catch (IOException e) {
Util.closeQuietly(snapshot);
return null;
}
}
public void put(String key, Entry entry) {
DiskLruCache.Editor editor = null;
try {
editor = cache.edit(key);
if (editor != null) {
BufferedSink sink = Okio.buffer(editor.newSink(ENTRY_METADATA));
sink.writeUtf8(entry.toString());//Entry.toString() is json String
sink.close();
editor.commit();
}
} catch (IOException e) {
abortQuietly(editor);
}
}
public void remove(String key) throws IOException {
cache.remove(key);
}
private void abortQuietly(DiskLruCache.Editor editor) {
try {
if (editor != null) {
editor.abort();
}
} catch (IOException ignored) {
}
}
public void initialize() throws IOException {
cache.initialize();
}
public void delete() throws IOException {
cache.delete();
}
public void evictAll() throws IOException {
cache.evictAll();
}
public long size() throws IOException {
return cache.size();
}
public long maxSize() {
return cache.getMaxSize();
}
public File directory() {
return cache.getDirectory();
}
public boolean isClosed() {
return cache.isClosed();
}
@Override
public void flush() throws IOException {
cache.flush();
}
@Override
public void close() throws IOException {
cache.close();
}
/**
* Data and metadata for an entry returned by the cache.
* It's extracted from android Volley library.
* See {@code https://github.com/google/volley}
*/
@Immutable
public static final class Entry {
/**
* The data returned from cache.
* Use {@link com.thepacific.data.common.DataLayerUtil#toJsonByteArray(Object, Gson)}
* to serialize a data object
*/
@SerializedName("data")
public final byte[] data;
/**
* Time to live(TTL) for this record
*/
@SerializedName("ttl")
public final long ttl;
/**
* Soft TTL for this record
*/
@SerializedName("softTtl")
public final long softTtl;
/**
* @return To a json String
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{")
.append("data=")
.append(Arrays.toString(data))
.append(", ttl=")
.append(ttl)
.append(", softTtl=")
.append(softTtl)
.append("}");
return builder.toString();
}
/**
* True if the entry is expired.
*/
public boolean isExpired() {
return this.ttl < System.currentTimeMillis();
}
/**
* True if a refresh is needed from the original data source.
*/
public boolean refreshNeeded() {
return this.softTtl < System.currentTimeMillis();
}
}
/**
* A repository can get cached data {@link Repository#get(Object)}, or force
* a call to network(skipping cache) {@link Repository#fetch(Object, boolean)}
*/
public abstract class Repository {
protected final Gson gson;
protected final DiskCache diskCache;
protected final MemoryCache memoryCache;
protected final OnAccessFailure onAccessFailure;
protected String key;
public Repository(Gson gson,
DiskCache diskCache,
MemoryCache memoryCache,
OnAccessFailure onAccessFailure) {
this.gson = gson;
this.diskCache = diskCache;
this.memoryCache = memoryCache;
this.onAccessFailure = onAccessFailure;
}
/**
* Return an Observable of {@link Source } for request query
* Data will be returned from oldest non expired source
* Sources are memory cache, disk cache, finally network
*/
@Nonnull
public final Observable> get(@Nonnull final T query) {
ExecutorUtil.requireWorkThread();
return stream(query)
.flatMap(it -> {
if (it.status == Status.SUCCESS) {
return Observable.just(it);
}
return load(query);
})
.flatMap(it -> {
if (it.status == Status.SUCCESS) {
return Observable.just(it);
}
return fetch(query, true);
});
}
/***
* @param query query parameters
* @param persist true for persisting data to disk
* @return an Observable of R for requested query skipping Memory & Disk Cache
*/
@Nonnull
public final Observable> fetch(@Nonnull final T query, boolean persist) {
ExecutorUtil.requireWorkThread();
Preconditions.checkNotNull(query);
key = getKey(query);
return dispatchNetwork().flatMap(it -> {
if (it.isSuccess()) {
R newData = it.data();
if (isIrrelevant(newData)) {
return Observable.just(Source.irrelevant());
}
long ttl = DataLayerUtil.elapsedTimeMillis(ttl());
long softTtl = DataLayerUtil.elapsedTimeMillis(softTtl());
long now = System.currentTimeMillis();
Preconditions.checkState(ttl > now && softTtl > now && ttl >= softTtl);
if (persist) {
byte[] bytes = DataLayerUtil.toJsonByteArray(newData, gson);
diskCache.put(key, DiskCache.Entry.create(bytes, ttl, softTtl));
} else {
clearDiskCache();
}
memoryCache.put(key, MemoryCache.Entry.create(newData, ttl));
return Observable.just(Source.success(newData));
}
IoError ioError = new IoError(it.message(), it.code());
if (isAccessFailure(it.code())) {
diskCache.evictAll();
memoryCache.evictAll();
ExecutorUtil.postToMainThread(() -> onAccessFailure.run(ioError));
return Observable.empty();
}
memoryCache.remove(key);
clearDiskCache();
return Observable.just(Source.failure(ioError));
});
}
/***
* @param query query parameters
* @return an Observable of R for requested from Disk Cache
*/
@Nonnull
public final Observable> load(@Nonnull final T query) {
ExecutorUtil.requireWorkThread();
Preconditions.checkNotNull(query);
key = getKey(query);
return Observable.defer(() -> {
DiskCache.Entry diskEntry = diskCache.get(key);
if (diskEntry == null) {
return Observable.just(Source.irrelevant());
}
R newData = gson.fromJson(DataLayerUtil.byteArray2String(diskEntry.data), dataType());
if (diskEntry.isExpired() || isIrrelevant(newData)) {
memoryCache.remove(key);
clearDiskCache();
return Observable.just(Source.irrelevant());
}
memoryCache.put(key, MemoryCache.Entry.create(newData, diskEntry.ttl));
return Observable.just(Source.success(newData));
});
}
/***
* @param query query parameters
* @return an Observable of R for requested from Memory Cache with refreshing query
* It differs with {@link Repository#stream()}
*/
@Nonnull
public final Observable> stream(@Nonnull final T query) {
Preconditions.checkNotNull(query);
key = getKey(query);
return stream();
}
/***
* @return an Observable of R for requested from Memory Cache without refreshing query
* It differs with {@link Repository#stream(Object)}
*/
@Nonnull
public final Observable> stream() {
return Observable.defer(() -> {
MemoryCache.Entry memoryEntry = memoryCache.get(key);
//No need to check isExpired(), MemoryCache.get(key) has already done
if (memoryEntry == null) {
return Observable.just(Source.irrelevant());
}
R newData = (R) memoryEntry.data;
if (isIrrelevant(newData)) {
return Observable.just(Source.irrelevant());
}
return Observable.just(Source.success(newData));
});
}
/***
* @return an R from Memory Cache
*/
@Nonnull
public final R memory() {
MemoryCache.Entry memoryEntry = memoryCache.get(key);
if (memoryEntry == null) {
throw new IllegalStateException("Not supported");
}
R newData = (R) memoryEntry.data;
if (isIrrelevant((R) memoryEntry.data)) {
throw new IllegalStateException("Not supported");
}
return newData;
}
public final void clearMemoryCache() {
memoryCache.remove(key);
}
public final void clearDiskCache() {
ExecutorUtil.requireWorkThread();
try {
diskCache.remove(key);
} catch (IOException ignored) {
}
}
/**
* @return default network cache time is 10. It must be {@code TimeUnit.MINUTES}
*/
protected int ttl() {
return 10;
}
/**
* @return default refresh cache time is 5. It must be {@code TimeUnit.MINUTES}
*/
protected final int softTtl() {
return 5;
}
/**
* @param code HTTP/HTTPS error code
* @return some server does't support standard authorize rules
*/
protected boolean isAccessFailure(final int code) {
return code == 403 || code == 405;
}
/**
* @return to make sure never returning empty or null data
*/
protected abstract boolean isIrrelevant(R data);
/**
* @return request HTTP/HTTPS API
*/
protected abstract Observable> dispatchNetwork();
/**
* @return cache key
*/
protected abstract String getKey(T query);
/**
* @return gson deserialize Class type for R {@code Type typeOfT = R.class} for List {@code
* Type typeOfT = new TypeToken>() { }.getType()}
*/
protected abstract Type dataType();
}
使用
@Test
public void testGet() {
userRepo.get(userQuery)
.onErrorReturn(e -> Source.failure(e))
.startWith(Source.inProgress())
.subscribe(it -> {
switch (it.status) {
case IN_PROGRESS:
System.out.println("Show Loading Dialog===============");
break;
case IRRELEVANT:
System.out.println("Empty Data===============");
break;
case ERROR:
System.out.println("Error Occur===============");
break;
case SUCCESS:
System.out.println("Update UI===============");
break;
default:
throw new UnsupportedOperationException();
}
});
assertEquals(2, userRepo.memory().size());
}
访问权限是java中一个比较中要的知识点,它规定者什么方法可以访问,什么不可以访问
一:包访问权限;
自定义包:
package com.wj.control;
//包
public class Demo {
//定义一个无参的方法
public void DemoPackage(){
System.out.println("调用
用户自定义聚合函数,用户提供的多个入参通过聚合计算(求和、求最大值、求最小值)得到一个聚合计算结果的函数。
问题:UDF也可以提供输入多个参数然后输出一个结果的运算,比如加法运算add(3,5),add这个UDF需要实现UDF的evaluate方法,那么UDF和UDAF的实质分别究竟是什么?
Double evaluate(Double a, Double b)
在利用tomcat-redis-session-manager做session同步时,遇到了在session保存一个自定义对象时,修改该对象中的某个属性,session未进行序列化,属性没有被存储到redis中。 在 tomcat-redis-session-manager的github上有如下说明: Session Change Tracking
As noted in the &qu
关于Table Driven Approach的一篇非常好的文章:
http://www.codeproject.com/Articles/42732/Table-driven-Approach
package com.ljn.base;
import java.util.Random;
public class TableDriven {
public