精华内容
下载资源
问答
  • Redis API 必杀解读:引入RedisTemplate

    万次阅读 2017-08-18 16:20:34
    题记在工作和学习中啊,比如说JAVA开发,要使用Redis,首先要引入一个RedisTemplate类// // Source code recreated from a .class file by IntelliJ IDEA // (powered by Fernflower decompiler) //package org....

    题记

    在工作和学习中啊,比如说JAVA开发,要使用Redis,首先要引入一个RedisTemplate类.
    在此,将所有的方法都已经注释出来了.

    //
    // Source code recreated from a .class file by IntelliJ IDEA
    // (powered by Fernflower decompiler)
    //
    
    package org.springframework.data.redis.core;
    
    import java.io.Closeable;
    import java.lang.reflect.Proxy;
    import java.util.ArrayList;
    import java.util.Collection;
    import java.util.Collections;
    import java.util.Date;
    import java.util.Iterator;
    import java.util.LinkedHashSet;
    import java.util.List;
    import java.util.Map;
    import java.util.Set;
    import java.util.concurrent.TimeUnit;
    import org.springframework.beans.factory.BeanClassLoaderAware;
    import org.springframework.dao.DataAccessException;
    import org.springframework.dao.InvalidDataAccessApiUsageException;
    import org.springframework.data.redis.connection.DataType;
    import org.springframework.data.redis.connection.RedisConnection;
    import org.springframework.data.redis.connection.RedisConnectionFactory;
    import org.springframework.data.redis.connection.SortParameters;
    import org.springframework.data.redis.connection.RedisZSetCommands.Tuple;
    import org.springframework.data.redis.core.ZSetOperations.TypedTuple;
    import org.springframework.data.redis.core.query.QueryUtils;
    import org.springframework.data.redis.core.query.SortQuery;
    import org.springframework.data.redis.core.script.DefaultScriptExecutor;
    import org.springframework.data.redis.core.script.RedisScript;
    import org.springframework.data.redis.core.script.ScriptExecutor;
    import org.springframework.data.redis.core.types.RedisClientInfo;
    import org.springframework.data.redis.serializer.JdkSerializationRedisSerializer;
    import org.springframework.data.redis.serializer.RedisSerializer;
    import org.springframework.data.redis.serializer.SerializationUtils;
    import org.springframework.data.redis.serializer.StringRedisSerializer;
    import org.springframework.transaction.support.TransactionSynchronizationManager;
    import org.springframework.util.Assert;
    import org.springframework.util.ClassUtils;
    import org.springframework.util.CollectionUtils;
    
    public class RedisTemplate<K, V> extends RedisAccessor implements RedisOperations<K, V>, BeanClassLoaderAware {
        private boolean enableTransactionSupport = false;
        private boolean exposeConnection = false;
        private boolean initialized = false;
        private boolean enableDefaultSerializer = true;
        private RedisSerializer<?> defaultSerializer;
        private ClassLoader classLoader;
        private RedisSerializer keySerializer = null;
        private RedisSerializer valueSerializer = null;
        private RedisSerializer hashKeySerializer = null;
        private RedisSerializer hashValueSerializer = null;
        private RedisSerializer<String> stringSerializer = new StringRedisSerializer();
        private ScriptExecutor<K> scriptExecutor;
        private ValueOperations<K, V> valueOps;
        private ListOperations<K, V> listOps;
        private SetOperations<K, V> setOps;
        private ZSetOperations<K, V> zSetOps;
        private GeoOperations<K, V> geoOps;
        private HyperLogLogOperations<K, V> hllOps;
    
        public RedisTemplate() {
        }
    
        //afterPropertiesSet (初始化操作)加载配置后执行
        public void afterPropertiesSet() {
            super.afterPropertiesSet();
            boolean defaultUsed = false;
            //serializer 序列化
            if (this.defaultSerializer == null) {
                this.defaultSerializer = new JdkSerializationRedisSerializer(this.classLoader != null ? this.classLoader : this.getClass().getClassLoader());
            }
            //enable 使能够,提供做…的权利[措施]; 使可能; 授予权利或方法;
            if (this.enableDefaultSerializer) {
                if (this.keySerializer == null) {
                    this.keySerializer = this.defaultSerializer;
                    defaultUsed = true;
                }
    
                if (this.valueSerializer == null) {
                    this.valueSerializer = this.defaultSerializer;
                    defaultUsed = true;
                }
    
                if (this.hashKeySerializer == null) {
                    this.hashKeySerializer = this.defaultSerializer;
                    defaultUsed = true;
                }
    
                if (this.hashValueSerializer == null) {
                    this.hashValueSerializer = this.defaultSerializer;
                    defaultUsed = true;
                }
            }
    
            if (this.enableDefaultSerializer && defaultUsed) {
                Assert.notNull(this.defaultSerializer, "default serializer null and not all serializers initialized");
            }
    
            //script脚本
            //Executor 遗嘱执行人; 执行者; 实行者;
            if (this.scriptExecutor == null) {
                this.scriptExecutor = new DefaultScriptExecutor(this);
            }
            //初始化完成
            this.initialized = true;
        }
    
        //execute 执行   exposeConnection暴露连接
        public <T> T execute(RedisCallback<T> action) {
            return this.execute(action, this.isExposeConnection());
        }
    
        public <T> T execute(RedisCallback<T> action, boolean exposeConnection) {
            return this.execute(action, exposeConnection, false);
        }
    
        //pipeline 管道
        public <T> T execute(RedisCallback<T> action, boolean exposeConnection, boolean pipeline) {
            Assert.isTrue(this.initialized, "template not initialized; call afterPropertiesSet() before using it");
            Assert.notNull(action, "Callback object must not be null");
            RedisConnectionFactory factory = this.getConnectionFactory();
            RedisConnection conn = null;
    
            Object var11;
            try {
                //enableTransactionSupport 是否支持事务
                if (this.enableTransactionSupport) {
                    conn = RedisConnectionUtils.bindConnection(factory, this.enableTransactionSupport);
                } else {
                    conn = RedisConnectionUtils.getConnection(factory);
                }
    
                //现存的; 目前的;
                boolean existingConnection = TransactionSynchronizationManager.hasResource(factory);
                RedisConnection connToUse = this.preProcessConnection(conn, existingConnection);
                boolean pipelineStatus = connToUse.isPipelined();
                if (pipeline && !pipelineStatus) {
                    connToUse.openPipeline();
                }
    
                RedisConnection connToExpose = exposeConnection ? connToUse : this.createRedisConnectionProxy(connToUse);
                T result = action.doInRedis(connToExpose);
                if (pipeline && !pipelineStatus) {
                    connToUse.closePipeline();
                }
    
                var11 = this.postProcessResult(result, connToUse, existingConnection);
            } finally {
                RedisConnectionUtils.releaseConnection(conn, factory);
            }
    
            return var11;
        }
    
        public <T> T execute(SessionCallback<T> session) {
            Assert.isTrue(this.initialized, "template not initialized; call afterPropertiesSet() before using it");
            Assert.notNull(session, "Callback object must not be null");
            RedisConnectionFactory factory = this.getConnectionFactory();
            RedisConnectionUtils.bindConnection(factory, this.enableTransactionSupport);
    
            Object var3;
            try {
                var3 = session.execute(this);
            } finally {
                RedisConnectionUtils.unbindConnection(factory);
            }
    
            return var3;
        }
    
        //executePipelined 执行管道
        public List<Object> executePipelined(SessionCallback<?> session) {
            return this.executePipelined(session, this.valueSerializer);
        }
    
        public List<Object> executePipelined(final SessionCallback<?> session, final RedisSerializer<?> resultSerializer) {
            Assert.isTrue(this.initialized, "template not initialized; call afterPropertiesSet() before using it");
            Assert.notNull(session, "Callback object must not be null");
            RedisConnectionFactory factory = this.getConnectionFactory();
            RedisConnectionUtils.bindConnection(factory, this.enableTransactionSupport);
    
            List var4;
            try {
                var4 = (List)this.execute(new RedisCallback<List<Object>>() {
                    public List<Object> doInRedis(RedisConnection connection) throws DataAccessException {
                        connection.openPipeline();
                        boolean pipelinedClosed = false;
    
                        List var5;
                        try {
                            Object result = RedisTemplate.this.executeSession(session);
                            if (result != null) {
                                throw new InvalidDataAccessApiUsageException("Callback cannot return a non-null value as it gets overwritten by the pipeline");
                            }
    
                            List<Object> closePipeline = connection.closePipeline();
                            pipelinedClosed = true;
                            var5 = RedisTemplate.this.deserializeMixedResults(closePipeline, resultSerializer, RedisTemplate.this.hashKeySerializer, RedisTemplate.this.hashValueSerializer);
                        } finally {
                            if (!pipelinedClosed) {
                                connection.closePipeline();
                            }
    
                        }
    
                        return var5;
                    }
                });
            } finally {
                RedisConnectionUtils.unbindConnection(factory);
            }
    
            return var4;
        }
    
        public List<Object> executePipelined(RedisCallback<?> action) {
            return this.executePipelined(action, this.valueSerializer);
        }
    
        public List<Object> executePipelined(final RedisCallback<?> action, final RedisSerializer<?> resultSerializer) {
            return (List)this.execute(new RedisCallback<List<Object>>() {
                public List<Object> doInRedis(RedisConnection connection) throws DataAccessException {
                    connection.openPipeline();
                    boolean pipelinedClosed = false;
    
                    List var5;
                    try {
                        Object result = action.doInRedis(connection);
                        if (result != null) {
                            throw new InvalidDataAccessApiUsageException("Callback cannot return a non-null value as it gets overwritten by the pipeline");
                        }
    
                        List<Object> closePipeline = connection.closePipeline();
                        pipelinedClosed = true;
                        var5 = RedisTemplate.this.deserializeMixedResults(closePipeline, resultSerializer, RedisTemplate.this.hashKeySerializer, RedisTemplate.this.hashValueSerializer);
                    } finally {
                        if (!pipelinedClosed) {
                            connection.closePipeline();
                        }
    
                    }
    
                    return var5;
                }
            });
        }
    
        public <T> T execute(RedisScript<T> script, List<K> keys, Object... args) {
            return this.scriptExecutor.execute(script, keys, args);
        }
    
        public <T> T execute(RedisScript<T> script, RedisSerializer<?> argsSerializer, RedisSerializer<T> resultSerializer, List<K> keys, Object... args) {
            return this.scriptExecutor.execute(script, argsSerializer, resultSerializer, keys, args);
        }
    
        public <T extends Closeable> T executeWithStickyConnection(RedisCallback<T> callback) {
            Assert.isTrue(this.initialized, "template not initialized; call afterPropertiesSet() before using it");
            Assert.notNull(callback, "Callback object must not be null");
            RedisConnectionFactory factory = this.getConnectionFactory();
            RedisConnection connection = this.preProcessConnection(RedisConnectionUtils.doGetConnection(factory, true, false, false), false);
            return (Closeable)callback.doInRedis(connection);
        }
    
        //Session会话
        private Object executeSession(SessionCallback<?> session) {
            return session.execute(this);
        }
    
        //Proxy 代理服务器; 代表权; 代理人,代替物; 委托书;
        protected RedisConnection createRedisConnectionProxy(RedisConnection pm) {
            Class<?>[] ifcs = ClassUtils.getAllInterfacesForClass(pm.getClass(), this.getClass().getClassLoader());
            return (RedisConnection)Proxy.newProxyInstance(pm.getClass().getClassLoader(), ifcs, new CloseSuppressingInvocationHandler(pm));
        }
    
        protected RedisConnection preProcessConnection(RedisConnection connection, boolean existingConnection) {
            return connection;
        }
    
        protected <T> T postProcessResult(T result, RedisConnection conn, boolean existingConnection) {
            return result;
        }
    
        public boolean isExposeConnection() {
            return this.exposeConnection;
        }
    
        public void setExposeConnection(boolean exposeConnection) {
            this.exposeConnection = exposeConnection;
        }
    
        //是否默认序列化
        public boolean isEnableDefaultSerializer() {
            return this.enableDefaultSerializer;
        }
    
        public void setEnableDefaultSerializer(boolean enableDefaultSerializer) {
            this.enableDefaultSerializer = enableDefaultSerializer;
        }
    
        public RedisSerializer<?> getDefaultSerializer() {
            return this.defaultSerializer;
        }
    
        public void setDefaultSerializer(RedisSerializer<?> serializer) {
            this.defaultSerializer = serializer;
        }
    
        public void setKeySerializer(RedisSerializer<?> serializer) {
            this.keySerializer = serializer;
        }
    
        public RedisSerializer<?> getKeySerializer() {
            return this.keySerializer;
        }
    
        public void setValueSerializer(RedisSerializer<?> serializer) {
            this.valueSerializer = serializer;
        }
    
        public RedisSerializer<?> getValueSerializer() {
            return this.valueSerializer;
        }
    
        public RedisSerializer<?> getHashKeySerializer() {
            return this.hashKeySerializer;
        }
    
        public void setHashKeySerializer(RedisSerializer<?> hashKeySerializer) {
            this.hashKeySerializer = hashKeySerializer;
        }
    
        public RedisSerializer<?> getHashValueSerializer() {
            return this.hashValueSerializer;
        }
    
        public void setHashValueSerializer(RedisSerializer<?> hashValueSerializer) {
            this.hashValueSerializer = hashValueSerializer;
        }
    
        public RedisSerializer<String> getStringSerializer() {
            return this.stringSerializer;
        }
    
        public void setStringSerializer(RedisSerializer<String> stringSerializer) {
            this.stringSerializer = stringSerializer;
        }
    
        public void setScriptExecutor(ScriptExecutor<K> scriptExecutor) {
            this.scriptExecutor = scriptExecutor;
        }
    
        private byte[] rawKey(Object key) {
            Assert.notNull(key, "non null key required");
            return this.keySerializer == null && key instanceof byte[] ? (byte[])((byte[])key) : this.keySerializer.serialize(key);
        }
    
        private byte[] rawString(String key) {
            return this.stringSerializer.serialize(key);
        }
    
        private byte[] rawValue(Object value) {
            return this.valueSerializer == null && value instanceof byte[] ? (byte[])((byte[])value) : this.valueSerializer.serialize(value);
        }
    
        private byte[][] rawKeys(Collection<K> keys) {
            byte[][] rawKeys = new byte[keys.size()][];
            int i = 0;
    
            Object key;
            for(Iterator var4 = keys.iterator(); var4.hasNext(); rawKeys[i++] = this.rawKey(key)) {
                key = var4.next();
            }
    
            return rawKeys;
        }
    
        //deserializeKey 反序列化
        private K deserializeKey(byte[] value) {
            return this.keySerializer != null ? this.keySerializer.deserialize(value) : value;
        }
    
        private List<Object> deserializeMixedResults(List<Object> rawValues, RedisSerializer valueSerializer, RedisSerializer hashKeySerializer, RedisSerializer hashValueSerializer) {
            if (rawValues == null) {
                return null;
            } else {
                List<Object> values = new ArrayList();
                Iterator var6 = rawValues.iterator();
    
                while(true) {
                    while(var6.hasNext()) {
                        Object rawValue = var6.next();
                        if (rawValue instanceof byte[] && valueSerializer != null) {
                            values.add(valueSerializer.deserialize((byte[])((byte[])rawValue)));
                        } else if (rawValue instanceof List) {
                            values.add(this.deserializeMixedResults((List)rawValue, valueSerializer, hashKeySerializer, hashValueSerializer));
                        } else if (rawValue instanceof Set && !((Set)rawValue).isEmpty()) {
                            values.add(this.deserializeSet((Set)rawValue, valueSerializer));
                        } else if (rawValue instanceof Map && !((Map)rawValue).isEmpty() && ((Map)rawValue).values().iterator().next() instanceof byte[]) {
                            values.add(SerializationUtils.deserialize((Map)rawValue, hashKeySerializer, hashValueSerializer));
                        } else {
                            values.add(rawValue);
                        }
                    }
    
                    return values;
                }
            }
        }
    
        private Set<?> deserializeSet(Set rawSet, RedisSerializer valueSerializer) {
            if (rawSet.isEmpty()) {
                return rawSet;
            } else {
                Object setValue = rawSet.iterator().next();
                if (setValue instanceof byte[] && valueSerializer != null) {
                    return SerializationUtils.deserialize(rawSet, valueSerializer);
                } else {
                    return setValue instanceof Tuple ? this.convertTupleValues(rawSet, valueSerializer) : rawSet;
                }
            }
        }
    
        //拼装数组值
        private Set<TypedTuple<V>> convertTupleValues(Set<Tuple> rawValues, RedisSerializer valueSerializer) {
            Set<TypedTuple<V>> set = new LinkedHashSet(rawValues.size());
    
            Tuple rawValue;
            Object value;
            for(Iterator var4 = rawValues.iterator(); var4.hasNext(); set.add(new DefaultTypedTuple(value, rawValue.getScore()))) {
                rawValue = (Tuple)var4.next();
                value = rawValue.getValue();
                if (valueSerializer != null) {
                    value = valueSerializer.deserialize(rawValue.getValue());
                }
            }
    
            return set;
        }
    
        public List<Object> exec() {
            List<Object> results = this.execRaw();
            return this.getConnectionFactory().getConvertPipelineAndTxResults() ? this.deserializeMixedResults(results, this.valueSerializer, this.hashKeySerializer, this.hashValueSerializer) : results;
        }
    
        public List<Object> exec(RedisSerializer<?> valueSerializer) {
            return this.deserializeMixedResults(this.execRaw(), valueSerializer, valueSerializer, valueSerializer);
        }
    
        protected List<Object> execRaw() {
            return (List)this.execute(new RedisCallback<List<Object>>() {
                public List<Object> doInRedis(RedisConnection connection) throws DataAccessException {
                    return connection.exec();
                }
            });
        }
    
        //删除,根据键删除值
        public void delete(K key) {
            final byte[] rawKey = this.rawKey(key);
            this.execute(new RedisCallback<Object>() {
                public Object doInRedis(RedisConnection connection) {
                    connection.del(new byte[][]{rawKey});
                    return null;
                }
            }, true);
        }
    
        //删除,根据键的集合,批量删除值
        public void delete(Collection<K> keys) {
            if (!CollectionUtils.isEmpty(keys)) {
                final byte[][] rawKeys = this.rawKeys(keys);
                this.execute(new RedisCallback<Object>() {
                    public Object doInRedis(RedisConnection connection) {
                        connection.del(rawKeys);
                        return null;
                    }
                }, true);
            }
        }
    
        //是否含有指定的键
        public Boolean hasKey(K key) {
            final byte[] rawKey = this.rawKey(key);
            return (Boolean)this.execute(new RedisCallback<Boolean>() {
                public Boolean doInRedis(RedisConnection connection) {
                    return connection.exists(rawKey);
                }
            }, true);
        }
    
        //缓存是否过期
        //expire 期满; 文件、协议等(因到期而)失效; 断气; 逝世;
        public Boolean expire(K key, final long timeout, final TimeUnit unit) {
            final byte[] rawKey = this.rawKey(key);
            final long rawTimeout = TimeoutUtils.toMillis(timeout, unit);
            return (Boolean)this.execute(new RedisCallback<Boolean>() {
                public Boolean doInRedis(RedisConnection connection) {
                    try {
                        return connection.pExpire(rawKey, rawTimeout);
                    } catch (Exception var3) {
                        return connection.expire(rawKey, TimeoutUtils.toSeconds(timeout, unit));
                    }
                }
            }, true);
        }
    
        public Boolean expireAt(K key, final Date date) {
            final byte[] rawKey = this.rawKey(key);
            return (Boolean)this.execute(new RedisCallback<Boolean>() {
                public Boolean doInRedis(RedisConnection connection) {
                    try {
                        return connection.pExpireAt(rawKey, date.getTime());
                    } catch (Exception var3) {
                        return connection.expireAt(rawKey, date.getTime() / 1000L);
                    }
                }
            }, true);
        }
    
        //订阅发布
        public void convertAndSend(String channel, Object message) {
            Assert.hasText(channel, "a non-empty channel is required");
            final byte[] rawChannel = this.rawString(channel);
            final byte[] rawMessage = this.rawValue(message);
            this.execute(new RedisCallback<Object>() {
                public Object doInRedis(RedisConnection connection) {
                    connection.publish(rawChannel, rawMessage);
                    return null;
                }
            }, true);
        }
    
        public Long getExpire(K key) {
            final byte[] rawKey = this.rawKey(key);
            return (Long)this.execute(new RedisCallback<Long>() {
                public Long doInRedis(RedisConnection connection) {
                    return connection.ttl(rawKey);
                }
            }, true);
        }
    
        public Long getExpire(K key, final TimeUnit timeUnit) {
            final byte[] rawKey = this.rawKey(key);
            return (Long)this.execute(new RedisCallback<Long>() {
                public Long doInRedis(RedisConnection connection) {
                    try {
                        return connection.pTtl(rawKey, timeUnit);
                    } catch (Exception var3) {
                        return connection.ttl(rawKey, timeUnit);
                    }
                }
            }, true);
        }
    
        public Set<K> keys(K pattern) {
            final byte[] rawKey = this.rawKey(pattern);
            Set<byte[]> rawKeys = (Set)this.execute(new RedisCallback<Set<byte[]>>() {
                public Set<byte[]> doInRedis(RedisConnection connection) {
                    return connection.keys(rawKey);
                }
            }, true);
            return this.keySerializer != null ? SerializationUtils.deserialize(rawKeys, this.keySerializer) : rawKeys;
        }
    
        //持久化
        //persist:坚持; 存留; 固执; 继续存在;
        public Boolean persist(K key) {
            final byte[] rawKey = this.rawKey(key);
            return (Boolean)this.execute(new RedisCallback<Boolean>() {
                public Boolean doInRedis(RedisConnection connection) {
                    return connection.persist(rawKey);
                }
            }, true);
        }
    
        //move移动
        public Boolean move(K key, final int dbIndex) {
            final byte[] rawKey = this.rawKey(key);
            return (Boolean)this.execute(new RedisCallback<Boolean>() {
                public Boolean doInRedis(RedisConnection connection) {
                    return connection.move(rawKey, dbIndex);
                }
            }, true);
        }
    
        //获取随机key值
        public K randomKey() {
            byte[] rawKey = (byte[])this.execute(new RedisCallback<byte[]>() {
                public byte[] doInRedis(RedisConnection connection) {
                    return connection.randomKey();
                }
            }, true);
            return this.deserializeKey(rawKey);
        }
    
        //key值重命名
        public void rename(K oldKey, K newKey) {
            final byte[] rawOldKey = this.rawKey(oldKey);
            final byte[] rawNewKey = this.rawKey(newKey);
            this.execute(new RedisCallback<Object>() {
                public Object doInRedis(RedisConnection connection) {
                    connection.rename(rawOldKey, rawNewKey);
                    return null;
                }
            }, true);
        }
    
        //如果没有就重命名
        //Absent 缺席的,不在场的; 缺少的,缺乏的; 不在意的,茫然的;
        public Boolean renameIfAbsent(K oldKey, K newKey) {
            final byte[] rawOldKey = this.rawKey(oldKey);
            final byte[] rawNewKey = this.rawKey(newKey);
            return (Boolean)this.execute(new RedisCallback<Boolean>() {
                public Boolean doInRedis(RedisConnection connection) {
                    return connection.renameNX(rawOldKey, rawNewKey);
                }
            }, true);
        }
    
        //DataType类型的 类型
        public DataType type(K key) {
            final byte[] rawKey = this.rawKey(key);
            return (DataType)this.execute(new RedisCallback<DataType>() {
                public DataType doInRedis(RedisConnection connection) {
                    return connection.type(rawKey);
                }
            }, true);
        }
    
        public byte[] dump(K key) {
            final byte[] rawKey = this.rawKey(key);
            return (byte[])this.execute(new RedisCallback<byte[]>() {
                public byte[] doInRedis(RedisConnection connection) {
                    return connection.dump(rawKey);
                }
            }, true);
        }
    
        //restore 修复; 归还; 交还; 使恢复;
        public void restore(K key, final byte[] value, long timeToLive, TimeUnit unit) {
            final byte[] rawKey = this.rawKey(key);
            final long rawTimeout = TimeoutUtils.toMillis(timeToLive, unit);
            this.execute(new RedisCallback<Object>() {
                public Boolean doInRedis(RedisConnection connection) {
                    connection.restore(rawKey, rawTimeout, value);
                    return null;
                }
            }, true);
        }
    
        //multi 前缀
        public void multi() {
            this.execute(new RedisCallback<Object>() {
                public Object doInRedis(RedisConnection connection) throws DataAccessException {
                    connection.multi();
                    return null;
                }
            }, true);
        }
    
        //discard 丢弃,抛弃; 解雇; 出牌;
        public void discard() {
            this.execute(new RedisCallback<Object>() {
                public Object doInRedis(RedisConnection connection) throws DataAccessException {
                    connection.discard();
                    return null;
                }
            }, true);
        }
    
        //watch 注视,注意; 看守,监视; 守候(机会等); 密切注意
        public void watch(K key) {
            final byte[] rawKey = this.rawKey(key);
            this.execute(new RedisCallback<Object>() {
                public Object doInRedis(RedisConnection connection) {
                    connection.watch(new byte[][]{rawKey});
                    return null;
                }
            }, true);
        }
    
        public void watch(Collection<K> keys) {
            final byte[][] rawKeys = this.rawKeys(keys);
            this.execute(new RedisCallback<Object>() {
                public Object doInRedis(RedisConnection connection) {
                    connection.watch(rawKeys);
                    return null;
                }
            }, true);
        }
    
        public void unwatch() {
            this.execute(new RedisCallback<Object>() {
                public Object doInRedis(RedisConnection connection) throws DataAccessException {
                    connection.unwatch();
                    return null;
                }
            }, true);
        }
    
        //sort 排序
        public List<V> sort(SortQuery<K> query) {
            return this.sort(query, this.valueSerializer);
        }
    
        public <T> List<T> sort(SortQuery<K> query, RedisSerializer<T> resultSerializer) {
            final byte[] rawKey = this.rawKey(query.getKey());
            final SortParameters params = QueryUtils.convertQuery(query, this.stringSerializer);
            List<byte[]> vals = (List)this.execute(new RedisCallback<List<byte[]>>() {
                public List<byte[]> doInRedis(RedisConnection connection) throws DataAccessException {
                    return connection.sort(rawKey, params);
                }
            }, true);
            return SerializationUtils.deserialize(vals, resultSerializer);
        }
    
        public <T> List<T> sort(SortQuery<K> query, BulkMapper<T, V> bulkMapper) {
            return this.sort(query, bulkMapper, this.valueSerializer);
        }
    
        public <T, S> List<T> sort(SortQuery<K> query, BulkMapper<T, S> bulkMapper, RedisSerializer<S> resultSerializer) {
            List<S> values = this.sort(query, resultSerializer);
            if (values != null && !values.isEmpty()) {
                int bulkSize = query.getGetPattern().size();
                List<T> result = new ArrayList(values.size() / bulkSize + 1);
                List<S> bulk = new ArrayList(bulkSize);
                Iterator var8 = values.iterator();
    
                while(var8.hasNext()) {
                    S s = var8.next();
                    bulk.add(s);
                    if (bulk.size() == bulkSize) {
                        result.add(bulkMapper.mapBulk(Collections.unmodifiableList(bulk)));
                        bulk = new ArrayList(bulkSize);
                    }
                }
    
                return result;
            } else {
                return Collections.emptyList();
            }
        }
    
        public Long sort(SortQuery<K> query, K storeKey) {
            final byte[] rawStoreKey = this.rawKey(storeKey);
            final byte[] rawKey = this.rawKey(query.getKey());
            final SortParameters params = QueryUtils.convertQuery(query, this.stringSerializer);
            return (Long)this.execute(new RedisCallback<Long>() {
                public Long doInRedis(RedisConnection connection) throws DataAccessException {
                    return connection.sort(rawKey, params, rawStoreKey);
                }
            }, true);
        }
    
        //BoundValueOperations暂时不知道什么意思,可能是操作边界值
        //bound n界限,限制; 跃起; (球等的) 反跳 ,v缚; 给…划界,限制; 使弹回,使跳跃;
        public BoundValueOperations<K, V> boundValueOps(K key) {
            return new DefaultBoundValueOperations(key, this);
        }
    
        //操作值,描述具有简单值的条目
        public ValueOperations<K, V> opsForValue() {
            if (this.valueOps == null) {
                this.valueOps = new DefaultValueOperations(this);
            }
    
            return this.valueOps;
        }
    
        //操作集合,操作具有list值的条目
        public ListOperations<K, V> opsForList() {
            if (this.listOps == null) {
                this.listOps = new DefaultListOperations(this);
            }
    
            return this.listOps;
        }
    
        //以绑定指定key的方式,操作具有list的条目
        public BoundListOperations<K, V> boundListOps(K key) {
            return new DefaultBoundListOperations(key, this);
        }
    
        //以绑定指定key的方式,操作具有set的条目
        public BoundSetOperations<K, V> boundSetOps(K key) {
            return new DefaultBoundSetOperations(key, this);
        }
    
        //操作具有set值的条目
        public SetOperations<K, V> opsForSet() {
            if (this.setOps == null) {
                this.setOps = new DefaultSetOperations(this);
            }
    
            return this.setOps;
        }
    
        //以绑定指定key的方式,操作具有ZSet(排序的set)的条目
        public BoundZSetOperations<K, V> boundZSetOps(K key) {
            return new DefaultBoundZSetOperations(key, this);
        }
    
        //操作具有ZSet值(排序的set)的条目
        public ZSetOperations<K, V> opsForZSet() {
            if (this.zSetOps == null) {
                this.zSetOps = new DefaultZSetOperations(this);
            }
    
            return this.zSetOps;
        }
    
        //Geospatial 键操作
        public GeoOperations<K, V> opsForGeo() {
            if (this.geoOps == null) {
                this.geoOps = new DefaultGeoOperations(this);
            }
    
            return this.geoOps;
        }
    
        //Redis Geospatial 键绑定操作
        public BoundGeoOperations<K, V> boundGeoOps(K key) {
            return new DefaultBoundGeoOperations(key, this);
        }
    
        //操作Redis HyperLogLog类型数据,比如:pfadd,pfcount,...
        public HyperLogLogOperations<K, V> opsForHyperLogLog() {
            if (this.hllOps == null) {
                this.hllOps = new DefaultHyperLogLogOperations(this);
            }
    
            return this.hllOps;
        }
    
        //Redis Hash键绑定操作
        public <HK, HV> BoundHashOperations<K, HK, HV> boundHashOps(K key) {
            return new DefaultBoundHashOperations(key, this);
        }
    
        //操作Redis Hash类型数据
        public <HK, HV> HashOperations<K, HK, HV> opsForHash() {
            return new DefaultHashOperations(this);
        }
    
        //Cluster 群操作
        public ClusterOperations<K, V> opsForCluster() {
            return new DefaultClusterOperations(this);
        }
    
        public void killClient(final String host, final int port) {
            this.execute(new RedisCallback<Void>() {
                public Void doInRedis(RedisConnection connection) throws DataAccessException {
                    connection.killClient(host, port);
                    return null;
                }
            });
        }
    
        //获取redis客户端的集合
        public List<RedisClientInfo> getClientList() {
            return (List)this.execute(new RedisCallback<List<RedisClientInfo>>() {
                public List<RedisClientInfo> doInRedis(RedisConnection connection) throws DataAccessException {
                    return connection.getClientList();
                }
            });
        }
    
        //SLAVEOF 命令用于在 Redis 运行时动态地修改复制(replication)功能的行为
        public void slaveOf(final String host, final int port) {
            this.execute(new RedisCallback<Void>() {
                public Void doInRedis(RedisConnection connection) throws DataAccessException {
                    connection.slaveOf(host, port);
                    return null;
                }
            });
        }
    
        public void slaveOfNoOne() {
            this.execute(new RedisCallback<Void>() {
                public Void doInRedis(RedisConnection connection) throws DataAccessException {
                    connection.slaveOfNoOne();
                    return null;
                }
            });
        }
    
        public void setEnableTransactionSupport(boolean enableTransactionSupport) {
            this.enableTransactionSupport = enableTransactionSupport;
        }
    
        public void setBeanClassLoader(ClassLoader classLoader) {
            this.classLoader = classLoader;
        }
    }
    

    这个类基本上就是所有的RedisTemplate.方法就可以用了哟。
    例如:

    /**保存成功后同步到缓存中**/
                    redisTemplate.opsForList().rightPush(CachePrefix.CERT_BUNDLE_LIST + "_" + iosBundle.getCertId(), iosBundle.getId());
    展开全文
  • springboot maven引入本地jar包

    千次阅读 2018-09-05 17:57:33
    问题描述 最近尝试引入阿里云的jar包,idea是可以直接跑调用是没问题的。但是打成jar包部署的时候,项目能跑,但是到关键的调用短信sdk的时候就爆ClassNofFoundException错误。看了很多网上的帖子,很多都说用...

    问题描述

    最近尝试引入阿里云的jar包,idea是可以直接跑调用是没问题的。但是打成jar包部署的时候,项目能跑,但是到关键的调用短信sdk的时候就爆ClassNofFoundException错误。看了很多网上的帖子,很多都说用plugin 中resource来弄,对我的项目一点鸟用也没有。最后还是在stackoverflow上找到了答案,所以做了以下总结。


    解决

    如何引入本地jar包

    1.在resources下面新建lib文件夹,并把jar包文件放到这个目录下 
    这里写图片描述 
    2.在pom文件定义几个依赖指向刚才引入的文件

    <dependency>
                <groupId>com.aliyun.alicom</groupId>
                <artifactId>alicom-mns-receive-sdk</artifactId>
                <version>0.0.1-SNAPSHOT</version>
                <scope>system</scope>
                <systemPath>${project.basedir}/src/main/resources/lib/alicom-mns-receive-sdk-1.0.0.jar</systemPath>
            </dependency>
            <dependency>
                <groupId>com.aliyun.mns</groupId>
                <artifactId>aliyun-sdk-mns</artifactId>
                <version>1.1.8</version>
                <scope>system</scope>
                <systemPath>${project.basedir}/src/main/resources/lib/aliyun-sdk-mns-1.1.8.jar</systemPath>
            </dependency>

    注意:重点是systemPath这个路径必须得是你jar的路径。其他的按照套路填就行,要求不是太严格。${project.basedir}只是一个系统自己的常量,不用管它


    如何把项目打成jar,同时把本地jar包也引入进去

    直接在maven的pom里给springboot的打包插件引入一下参数就行

    <includeSystemScope>true</includeSystemScope>

    总体是这样的

    <build>
            <plugins>
                <plugin>
                    <groupId>org.springframework.boot</groupId>
                    <artifactId>spring-boot-maven-plugin</artifactId>
                    <configuration>
                        <includeSystemScope>true</includeSystemScope>
                    </configuration>
                </plugin>
            </plugins>
        </build>
    展开全文
  • 模型对触发的自击穿特性进行了描述,在PSpice软件中用一个表格宏模型和电压控制模块来反映触发的触发特性和正向导通过程,并且把影响触发自击穿特性的非电物理因素诸如管内气压、主电极距离和管内气体电离参数...
  • 此仓库包含WSSTG中引入的VID语句数据集的主要基线。 有关VID语句数据集的信息,请参阅和。 任务 描述:“一只棕色和白色的狗躺在草地上,然后站起来。” 提出的WSSTG任务旨在在视频中定位一个时空(即绿色边框的...
  • 针对这个问题的得失权衡,就出现了两种实现方案,Linux显然选择了保守的方案而不是激进的方案,我们在下面的这段代码中可以找到关于这个保守方案的身影,代码来自Linux 3.17版本: /* People celebrate: "We love ...

    又到了周末,本周把国庆假期遗留的一个问题进行一个总结。我把形而上的讨论放在本文的最后,这里将快速进入正题,只说一句,浙江温州皮鞋湿!

    我们先来看一个标准TCP最简单的AIMD CC过程,这里以Reno为例,简单直接:
    在这里插入图片描述

    但是,在Linux3.18rc5之后,如果在关闭SACK(后面会讲为什么要关闭SACK)的前提下重新模拟上述的AIMD过程,将会是下面的样子:
    在这里插入图片描述

    事实上,不管你用的是不是Reno算法,即便是Cubic,BIC这种,也依然是上面的结果,即在3.18rc5内核以后,ssthresh的值总是保持着初始值。

    出现这种奇怪的现象,就必须要解释一下为什么了。

    好,我先描述一下事情的来龙去脉。


    国庆节前,有网友Email给我,咨询一个问题,说是在使用Reno算法时出现了比较奇怪的现象,即:

    • 3.17内核:在模拟超时之后,cwnd会慢启动增加到ssthresh,之后执行AI过程。
    • 3.18内核:在模拟超时之后,cwnd始终保持慢启动状态,没有进入AI过程。

    确实诡异,这让我想起了两个月前有个微信好友咨询的另一个问题,即在他使用2.6.32或者3.10内核的时候,一切都正常,而在使用4.9内核的时候,cwnd总是不经意间从1开始,他问我 3.10以后到4.9之间,Linux关于TCP慢启动的实现是不是有什么变化 …当时由于在忙工作和小小出国旅游的事情,就有点心不在焉,忽略了。

    把这两个问题一起来看的话,似乎有些关联,不过国庆期间回深圳探亲一直没顾得上回复先前那位给我发Email的网友,休假结束后准备把这个问题一探究竟。


    感谢这位网友告诉我变化是从3.18rc5开始的。

    撸了一遍3.18rc5的patch,和TCP相关的有如下:
    [net] tcp: zero retrans_stamp if all retrans were ackedhttps://patchwork.ozlabs.org/patch/406624/

    先看一下这个patch是干嘛的。

    patch描述上说的非常清楚,我简单引用一下:

    Ueki Kohei reported that when we are using NewReno with connections that
    have a very low traffic, we may timeout the connection too early if a
    second loss occurs after the first one was successfully acked but no
    data was transfered later. Below is his description of it:


    When SACK is disabled, and a socket suffers multiple separate TCP
    retransmissions, that socket’s ETIMEDOUT value is calculated from the
    time of the first retransmission instead of the latest
    retransmission.


    This happens because the tcp_sock’s retrans_stamp is set once then never
    cleared. Take the following connection:

                     Linux                    remote-machine
                       |                           |
       send#1---->(*1)|--------> data#1 --------->|
                 |     |                           |
                RTO    :                           :
                |     |                           |
                ---(*2)|----> data#1(retrans) ---->|
                | (*3)|<---------- ACK <----------|
                 |     |                           |
                 |     :                           :
                |     :                           :
                 |     :                           :
               16 minutes (or more)                :
                 |     :                           :
                 |     :                           :
                 |     :                           :
                 |     |                           |
        send#2---->(*4)|--------> data#2 --------->|
                 |     |                           |
                RTO    :                           :
                 |     |                           |
                ---(*5)|----> data#2(retrans) ---->|
                 |     |                           |
                 |     |                           |
               RTO*2   :                           :
                 |     |                           |
                 |     |                           |
     ETIMEDOUT<----(*6)|                           |
    

    (*1) One data packet sent.
    (*2) Because no ACK packet is received, the packet is retransmitted.
    (*3) The ACK packet is received. The transmitted packet is acknowledged.

    At this point the first “retransmission event” has passed and been
    recovered from. Any future retransmission is a completely new “event”.

    (*4) After 16 minutes (to correspond with retries2=15), a new data
    packet is sent. Note: No data is transmitted between (*3) and (*4).

    The socket’s timeout SHOULD be calculated from this point in time, but
    instead it’s calculated from the prior “event” 16 minutes ago.

    (*5) Because no ACK packet is received, the packet is retransmitted.
    (*6) At the time of the 2nd retransmission, the socket returns
    ETIMEDOUT.

    Therefore, now we clear retrans_stamp as soon as all data during the
    loss window is fully acked.

    那么这个patch是如何影响本文一开始描述的问题的呢?这还得从实现上看起。


    我一直说TCP的代码像屎一样,确实是,所以我一向不推荐上来就分析代码,而是先看RFC。

    本文描述的ssthresh被重置问题背后是 反过来的一个花开两朵各表一枝的故事,我们一个一个说,先说一下相关的RFC,然后再说说上述3.18rc5的这个patch,最后两个合起来,就导致了ssthresh被重置的问题。

    和这个问题有关的RFC是RFC6582:https://tools.ietf.org/html/rfc6582
    不过也可以看RFC2582这个原始一点的版本:https://tools.ietf.org/html/rfc2582

    不管是哪个,和本文的问题相关的就一点,即 对重复ACK的处理

    1. Three duplicate ACKs:
      When the third duplicate ACK is received, the TCP sender first
      checks the value of recover to see if the Cumulative
      Acknowledgment field covers more than recover. If so, the value
      of recover is incremented to the value of the highest sequence
      number transmitted by the TCP so far. The TCP then enters fast
      retransmit (step 2 of Section 3.2 of [RFC5681]). If not, the TCP
      does not enter fast retransmit and does not reset ssthresh.

    总的来讲,NewReno对Reno的改进主要就是为了避免重复连续进入降cwnd的状态,从而保持pipe的尽可能满载,而导致降cwnd的事件,就是CC状态机进入了超时或者快速重传这些状态。

    所以说,不管是超时,还是快速重传,其状态退出的条件是ACK必须完全覆盖进入该状态时发送的最大包,否则就保持该状态不变。

    我们假设一次丢包被发现时,发送的最大包为P,那么如果ACK刚刚好覆盖到P这个临界包时,要不要退出丢包恢复状态呢?RFC2582里有这么一段描述:

    There are two separate scenarios in which the TCP sender could
    receive three duplicate acknowledgements acknowledging “send_high”
    but no more than “send_high”. One scenario would be that the data
    sender transmitted four packets with sequence numbers higher than
    “send_high”, that the first packet was dropped in the network, and
    the following three packets triggered three duplicate
    acknowledgements acknowledging “send_high”. The second scenario
    would be that the sender unnecessarily retransmitted three packets
    below “send_high”, and that these three packets triggered three
    duplicate acknowledgements acknowledging “send_high”. In the absence
    of SACK, the TCP sender in unable to distinguish between these two
    scenarios.

    针对这个问题的得失权衡,就出现了两种实现方案,Linux显然选择了保守的方案而不是激进的方案,我们在下面的这段代码中可以找到关于这个保守方案的身影,代码来自Linux 3.17版本:

    /* People celebrate: "We love our President!" */
    static bool tcp_try_undo_recovery(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    	if (tcp_may_undo(tp)) {
    		// 这里很重要,但不是现在.所以我先忽略!
    	}
    	//仅仅会影响未开启SACK的流,这一点在RFC中有描述.之所以会采用这种保守的措施,是因为在不支持SACK的情况下,TCP协议无法区分重复ACK的触发缘由.
    	if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
    		/* Hold old state until something *above* high_seq
    		 * is ACKed. For Reno it is MUST to prevent false
    		 * fast retransmits (RFC2582). SACK TCP is safe. */
    		tcp_moderate_cwnd(tp);
    		// 如果刚刚覆盖到high_seq这个临界点,那么退出函数,暂且不将状态恢复到Open,而是保持丢包恢复状态.
    		return true;
    	}
    	tcp_set_ca_state(sk, TCP_CA_Open);
    	return false;
    }
    
    

    看到上述代码,这意味着 如果当前的TCP流的ACK刚刚等于high_seq,那么将会在下次更新的ACK到来时恢复到Open状态,这是显然的。 还有一个显然的事实是,下次依然会进入到这个tcp_try_undo_recovery函数中,下次将不再进入if分支而退出,进而将状态设置为Open
    在这里插入图片描述

    第一个故事到此结束,我们已经把代码流程理清楚了。

    好,现在来看3.18rc5的那个patch:

    diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
    index a12b455928e52211efdc6b471ef54de6218f5df0..65686efeaaf3c36706390d3bfd260fd1fb942b7f 100644
    --- a/net/ipv4/tcp_input.c
    +++ b/net/ipv4/tcp_input.c
    @@ -2410,6 +2410,8 @@  static bool tcp_try_undo_recovery(struct sock *sk)
     		 * is ACKed. For Reno it is MUST to prevent false
     		 * fast retransmits (RFC2582). SACK TCP is safe. */
     		tcp_moderate_cwnd(tp);
    +		if (!tcp_any_retrans_done(sk))
    +			tp->retrans_stamp = 0;
     		return true;
     	}
     	tcp_set_ca_state(sk, TCP_CA_Open);
    

    修改的正是函数 tcp_try_undo_recovery,在恰好ACK临界包high_seq的时候,退出tcp_try_undo_recovery前,将retrans_stamp 进行了清零处理,从而解决了ETIMEDOUT的问题,但是,现在我们看一下同为tcp_try_undo_recovery函数逻辑的最开始的tcp_may_undo分支。

    tcp_may_undo的实现如下:

    static inline bool tcp_may_undo(const struct tcp_sock *tp)
    {
    	return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
    }
    static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
    {
    	return !tp->retrans_stamp ||
    		(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
    		 before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
    }
    

    显然,第一次进入tcp_try_undo_recovery时,undo条件是不满足的,可是第一次进入tcp_try_undo_recovery时却把retrans_stamp 给置为0了!

    这意味着第二次进入tcp_try_undo_recovery的时候,会进入undo分支,后果就是tcp_undo_cwnd_reduction被调用:

    #define TCP_INFINITE_SSTHRESH	0x7fffffff
    static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
    {
    	...
    	// prior_ssthresh最开始进入丢包状态时保存初始值TCP_INFINITE_SSTHRESH	
    	if (tp->prior_ssthresh) {
    		...
    		if (tp->prior_ssthresh > tp->snd_ssthresh) {
    			tp->snd_ssthresh = tp->prior_ssthresh;
    			tcp_ecn_withdraw_cwr(tp);
    		}
    	} 
    	...
    }
    
    

    一切成了下面的样子:
    在这里插入图片描述


    问题以及问题的成因就是这样子,然而3.18发布很久了,几乎没有人发现这个问题,我觉得原因大概有这么几点:

    • 如今不开启SACK的很少了;
    • 很少有需要注意到细节的场景;
    • 这其实并不是问题。

    我仔细想了一下上述第三个,反问,这是问题吗?

    引一篇很早以前写的文章:
    TCP核心概念-慢启动,ssthresh,拥塞避免,公平性的真实含义:https://blog.csdn.net/dog250/article/details/51439747

    ssthresh是什么?

    ssthresh本质就是一个 “公平性下界” 的度量,如果把丢包视为拥塞的信号,那么发生超时或快速重传时的cwnd正是一个撑爆管道的BDP,那么一个下界相当于从当前BDP的1/2处开始CA(拥塞避免)就是正确的,这个之前我有过数学证明。

    3.18后的内核TCP实现把超时恢复后的ssthresh恢复成了 ***“上一个下界”***,这是不合理的,然而这可能是无心之过。

    3.18rc5的这个patch是为了解决ETIMEDOUT这个bug的,我想作者应该是解决了该bug,但是却引入了本文描述的ssthresh被重置这另外一个问题,这个问题虽然不影响TCP的正确性,但确实是不合理的。


    最后给出两个我的模拟问题的packetdrill脚本,首先一个是模拟超时的:

    +0.000 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
    +0.000 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
    +0.000 bind(3, ..., ...) = 0
    +0.000 listen(3, 1) = 0
    
    +0.000 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
    +0.000 > S. 0:0(0) ack 1 <...>
    +0.000 < . 1:1(0) ack 1 win 2000
    +0.000 accept(3, ..., ...) = 4
    
    +0.000 %{
    print "init ssthresh:",tcpi_snd_ssthresh
    print "init cwnd:",tcpi_snd_cwnd
    }%
    +0.000 write(4, ..., 10000) = 10000
    +0.000 < . 1:1(0) ack 4001 win 2000
    +0.000 %{
    print "brto cwnd:",tcpi_snd_cwnd
    }%
    +0.250 %{
    print "ssthresh timeout", tcpi_snd_ssthresh
    print "cwnd:",tcpi_snd_cwnd
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    
    }%
    +0 < . 1:1(0) ack 6001 win 2000
    +0 %{
    print "ssthresh ack 6001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    +0 < . 1:1(0) ack 7001 win 2000
    +0 %{
    print "ssthresh ack 7001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    +0 < . 1:1(0) ack 8001 win 2000
    +0.100 %{
    print "ssthresh ack 8001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    
    +0 < . 1:1(0) ack 9001 win 2000
    +0.100 %{
    print "ssthresh ack 9001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    
    +0 < . 1:1(0) ack 10001 win 2000
    +0.100 %{
    print "ssthresh ack 10001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    
    
    +0.000 write(4, ..., 1000) = 1000
    +0 < . 1:1(0) ack 11001 win 2000
    +0.100 %{
    print "ssthresh ack 11001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    
    +0.000 write(4, ..., 1000) = 1000
    +0 < . 1:1(0) ack 12001 win 2000
    +0.100 %{
    print "ssthresh ack 11001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    
    +0.000 write(4, ..., 1000) = 1000
    +0 < . 1:1(0) ack 13001 win 2000
    +0.100 %{
    print "ssthresh ack 11001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    // done!
    

    然后一个是模拟快速重传的:

    +0.000 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
    +0.000 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
    +0.000 bind(3, ..., ...) = 0
    +0.000 listen(3, 1) = 0
    
    +0.000 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
    +0.000 > S. 0:0(0) ack 1 <...>
    +0.000 < . 1:1(0) ack 1 win 2000
    +0.000 accept(3, ..., ...) = 4
    
    +0.000 %{
    print "init ssthresh:",tcpi_snd_ssthresh
    print "init cwnd:",tcpi_snd_cwnd
    }%
    +0.000 write(4, ..., 10000) = 10000
    +0.000 < . 1:1(0) ack 4001 win 2000
    +0.000 %{
    print "brto cwnd:",tcpi_snd_cwnd
    }%
    
    +0.000 < . 1:1(0) ack 4001 win 2000
    +0.000 < . 1:1(0) ack 4001 win 2000
    +0.000 < . 1:1(0) ack 4001 win 2000
    +0.000 %{
    print "ssthresh timeout", tcpi_snd_ssthresh
    print "cwnd:",tcpi_snd_cwnd
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    
    }%
    +0 < . 1:1(0) ack 6001 win 2000
    +0 %{
    print "ssthresh ack 6001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    
    +0 < . 1:1(0) ack 10001 win 2000
    +0.100 %{
    print "ssthresh ack 10001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    
    
    +0.000 write(4, ..., 1000) = 1000
    +0 < . 1:1(0) ack 11001 win 2000
    +0.100 %{
    print "ssthresh ack 11001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    
    +0.000 write(4, ..., 1000) = 1000
    +0 < . 1:1(0) ack 12001 win 2000
    +0.100 %{
    print "ssthresh ack 11001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    
    +0.000 write(4, ..., 1000) = 1000
    +0 < . 1:1(0) ack 13001 win 2000
    +0.100 %{
    print "ssthresh ack 11001", tcpi_snd_ssthresh
    print "ca_state:", tcpi_ca_state
    print "lost:", tcpi_lost
    }%
    // done!
    

    有破要有立,方成正道。

    那么怎么解决这个问题呢?其实也简单,在确认是丢包状态自然恢复而不是undo恢复时,将tcp_sock对象的prior_ssthresh清除即可。

    我们知道,这个地点就是本文最开始那个patch的地方,只需要加一行代码,将:

    		if (!tcp_any_retrans_done(sk))
    			tp->retrans_stamp = 0;
    
    

    改为:

    		if (!tcp_any_retrans_done(sk)) {
    			tp->retrans_stamp = 0;
    			tp->prior_ssthresh = 0;
    		}
    

    即可!


    每写一篇技术文章,背后都是有一个连贯的小故事,这记载了我自己的一些经历或者记录了我和另外一些同好进行交流的细节。显然这类文章并不能算是技术文档,而只能算是随笔或者技术散文一类。

    本就是性情中人,喝酒吃肉舞文弄墨算是还可以,然而思路却还是比较跳跃,被很多人说是没有逻辑,可能个中逻辑也只有我自己能串起来吧,比如皮鞋,比如经理,比如座椅爆炸…这就是我为什么连一篇简单的技术白皮书都懒得写,却可以写十年博客的原因吧,在这十年之前,我还有将近二十年的日记。

    不会倒酒,不会干杯,不会敬酒,不会划拳,却有时可以喝倒一桌人,也许个中原因和写博客而不写文档有些类似吧。


    最后,我想说说关于 选择 的话题。

    为什么TCP的代码像屎一样,因为有太多的逻辑分支不得不采用if-else来不断迭代,处处穿插这微妙的trick!

    很多女的衣服鞋子非常多,这就导致她们出门的效率极其低下,不仅仅是纠结穿哪件上衣,还要纠结穿哪条裙子,还要纠结哪双鞋子更好看,甚至还有发型,但这不是最要命的,最要命的是上述这些如何搭配,这可是一个叉乘啊!

    像我就不用纠结,光头长发roundrobin,一件上衣一条短裤一双鞋,没得选择,自然就可以说走就走。

    每一个if语句都会带来性能的损失,选择了一个就意味着放弃了其它,而你必须选择一个,所以你必须有所放弃,放弃意味着失落,失落意味着损耗,不管是损耗你的心情,还是CPU的指令周期,所以,选择并不是一件好事。

    选择意味着低效!

    TCP的CC同样是复杂而令人恶心的,原因在于有太多的选择,看下面的一篇Wiki:
    TCP congestion controlhttps://en.wikipedia.org/wiki/TCP_congestion_control
    请看完它。

    太多了太多了。如果我用Cubic,你用BBR,那么ICCRG就要考虑Cubic如何和BBR协调公平性,收敛自己的优势,平滑同伴的劣势,这便在算法实现的时候,增加了一个trick,表现为一条或者多条if-else语句,ICCRG不得不考虑所有这些算法共存的时候,互联网如何看起来和声称的一样优秀。

    不幸的是,即便是ICCRG也不知道这些算法分别在整个互联网的占比情况和地域部署的信息,这便很难开展全局优化这样的工作。

    更为不幸的是,很多人并不按照章法出牌,类似一个包发两边以侵略性争抢带宽的 算法 层出不穷,这便是端到端自主拥塞控制固有的缺陷带来的永远无法解决的问题,随之,TCP拥塞控制变成了一个社会学博弈问题,而不再是一个技术问题。

    不幸中的万幸,早就有人意识到了这一点,并且采取了行动。

    这便是CAAI所做的工作,CAAI的全程是 TCP Congestion Avoidance Algorithm
    Identification

    关于CAAI的详情,这里有一篇文档:http://digitalcommons.unl.edu/cgi/viewcontent.cgi?article=1159&context=cseconfwork
    在综述中,作者进行了一个相当形象的类比:

    As an analogy, if we consider the Internet as a country, an Internet node as a house, and a TCP algorithm running at a node as a person living at a house, the process of obtaining the TCP deployment information can be considered as the TCP algorithm census in the country of the Internet. Just like the population census is vital for the study and planning of the society, the TCP algorithm census is vital for the study and planning of the Internet.

    是的,CAAI就是在做 互联网上的‘人口普查’ 工作。这是一个非常好的开始,但是,我对其是否能持续下去持悲观态度。

    我们回望我们的500年,有多少类似的失败。《乌托邦》始终停留在幻想中,巴黎公社失败了,孙中山失败了,…这所有的失败,其根源只有一个,即 把容器里的东西看成了整齐划一的同质的东西,事实上,最终它们实质的 异构性无法完美的相似相溶。每个独立的个体都是与众不同的个体,不相似,则不相容,而社会学的任务就是研究这背后的模式。

    回到我们的TCP拥塞控制工程学上,几乎所有的CC算法在设计的时候都有一种假设,即 互联网上所有的节点都在运行同一个算法,在这个基本原则之后,才会做 如果有不运行该算法的节点,我该怎么办 这种Bugfix,然后引入一系列的trick,让事情趋于复杂。

    这便解释了为什么Google的BBR算法在其SDN全局控制的B4网络上为什么如鱼得水而到了国内三大运营商的网络里却是一塌糊涂。因为国内的网络没有全局控制,也没有全部部署BBR。

    我们从BBR 2.0中可以看到,BBR已经开始在引入trick了,然而这并不是一件好事。

    怎么办?穿上皮鞋吧。


    皮鞋进水不会胖,旋转座椅会爆炸。
    这是一篇没有喝真露而写好的文章。

    展开全文
  • android studio引入最新版银联支付功能

    千次阅读 2015-09-23 15:00:32
    昨天leader说银联支付SDK更新了,两个客户端同步更新一下,IOS我不着,还是好我的Android吧,废话不多说看效果: 通过支付控件进行交易的流程如下图: 具体描述: (1)用户在客户端中点击购买商品,客户端...

    昨天leader说银联支付SDK更新了,两个客户端同步更新一下,IOS我管不着,还是管好我的Android吧,废话不多说看效果:
    这里写图片描述

    通过支付控件进行交易的流程如下图:

    这里写图片描述

    具体描述:
    (1)用户在客户端中点击购买商品,客户端发起订单生成请求到商户后台;
    (2)商户后台收到订单生成请求后,按照《手机控件支付产品接口规范》组织并推送订单信息至银联后台;
    (3)银联后台接收订单信息并检查通过后,生成对应交易流水号(即TN),并回复交易流水号至商户后台(应答要素:交易流水号等);
    (4)商户后台接收到交易流水号,将交易流水号返回给客户端;
    (5)客户端通过交易流水号(TN)调用支付控件;
    (6)用户在支付控件中输入相关支付信息后,由支付控件向银联后台发起支付请求;
    (7)支付成功后,银联后台将支付结果通知给商户后台;
    (8)银联将支付结果通知支付控件;
    (9)支付控件显示支付结果并将支付结果返回给客户端;

    注: 本文档主要关注上述流程中(5)、(9)部分的实现

    目前各个平台支持的设备情况如下:
    Android平台SDK主要适用于Android 2.1及以上版本的终端设备;
    iOS版本支付控件适用iOS 6.0及以上版本终端设备。

    本例子仅以静态库集成进行演示:

    • 引入jar,放在libs文件夹下:
      这里写图片描述
    • 引入.so文件,AS IDE .so默认放在src/main/jniLibs文件夹下,jniLibs得手动创建;
      这里写图片描述
    • 引入data.bin 放入assets文件夹下:
      这里写图片描述

    来看一下build.gradle文件:

    apply plugin: 'com.android.application'
    
    android {
        compileSdkVersion 23
        buildToolsVersion "23.0.1"
    
        defaultConfig {
            applicationId "com.dashentao.yinliandemo"
            minSdkVersion 11
            targetSdkVersion 23
            versionCode 1
            versionName "1.0"
        }
        buildTypes {
            release {
                minifyEnabled false
                proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
            }
        }
    }
    
    dependencies {
    //    compile fileTree(dir: 'libs', include: ['.jar','.so'])
        compile fileTree(include: ['*.jar'], dir: 'libs')
        compile 'com.android.support:appcompat-v7:23.0.1'
        compile 'com.android.support:design:22.2.0'
    }
    

    compile fileTree(include: [‘*.jar’], dir: ‘libs’)表示编译Libs下面的jar文件,很关键;

    当然AndroidManifest.xml 也得进行一些修改:
    权限:

     <uses-permission android:name="android.permission.INTERNET" />
        <uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" />
        <uses-permission android:name="android.permission.CHANGE_NETWORK_STATE" />
        <uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
        <uses-permission android:name="android.permission.READ_PHONE_STATE" />
        <uses-permission android:name="android.permission.ACCESS_WIFI_STATE" />
        <uses-permission android:name="android.permission.NFC" />

    注册PayActivity:

    <activity
                android:name="com.unionpay.uppay.PayActivity"
                android:configChanges="orientation|keyboardHidden"
                android:excludeFromRecents="true"
                android:label="@string/app_name"
                android:screenOrientation="portrait"
                android:windowSoftInputMode="adjustResize" />

    基本的引入功能大概这么一些:

    我们来看下:具体逻辑代码:
    activity_main.xml代码太简单我就不贴了,MainActivity代码如下:

    package com.dashentao.yinliandemo;
    
    import android.content.Intent;
    import android.os.Bundle;
    import android.os.Handler;
    import android.os.Message;
    import android.support.design.widget.Snackbar;
    import android.support.v7.app.AppCompatActivity;
    import android.text.TextUtils;
    import android.view.Menu;
    import android.view.MenuItem;
    import android.view.View;
    import android.widget.Button;
    import android.widget.RelativeLayout;
    
    import com.unionpay.UPPayAssistEx;
    import com.unionpay.uppay.PayActivity;
    
    import java.io.ByteArrayOutputStream;
    import java.io.InputStream;
    import java.net.URL;
    import java.net.URLConnection;
    
    /**
     * @author dashentao
     * @date 2015 9-23
     * @since V 1.0
     */
    public class MainActivity extends AppCompatActivity {
        private Button button1;
        private RelativeLayout container;
        private static final String TN_URL_01 = "http://101.231.204.84:8091/sim/getacptn";
        private static final String R_SUCCESS = "success";
        private static final String R_FAIL = "fail";
        private static final String R_CANCEL = "cancel";
        private Handler mHandler = new Handler() {
            public void handleMessage(Message msg) {
                // “00” – 银联正式环境
                // “01” – 银联测试环境,该环境中不发生真实交易
                String tn = (String) msg.obj;
                if (!TextUtils.isEmpty(tn)) {
                    // 测试环境
                    String serverMode = "01";
                    UPPayAssistEx.startPayByJAR(MainActivity.this,
                            PayActivity.class, null, null, tn, serverMode);
                }
            }
        };
    
        @Override
        protected void onCreate(Bundle savedInstanceState) {
            super.onCreate(savedInstanceState);
            setContentView(R.layout.activity_main);
            button1 = (Button) findViewById(R.id.button1);
            container = (RelativeLayout) findViewById(R.id.container1);
            button1.setOnClickListener(new View.OnClickListener() {
                @Override
                public void onClick(View v) {
                    new MyThread().start();
                }
            });
        }
    
        @Override
        public boolean onCreateOptionsMenu(Menu menu) {
            // Inflate the menu; this adds items to the action bar if it is present.
            getMenuInflater().inflate(R.menu.menu_main, menu);
            return true;
        }
    
        @Override
        public boolean onOptionsItemSelected(MenuItem item) {
            // Handle action bar item clicks here. The action bar will
            // automatically handle clicks on the Home/Up button, so long
            // as you specify a parent activity in AndroidManifest.xml.
            int id = item.getItemId();
    
            //noinspection SimplifiableIfStatement
            if (id == R.id.action_settings) {
                return true;
            }
    
            return super.onOptionsItemSelected(item);
        }
    
        @Override
        protected void onActivityResult(int requestCode, int resultCode, Intent data) {
            super.onActivityResult(requestCode, resultCode, data);
            if (data == null) {
                return;
            }
    
            String str = data.getExtras().getString("pay_result");
            if (str.equalsIgnoreCase(R_SUCCESS)) {
                Snackbar.make(container, R.string.pay_success, Snackbar.LENGTH_LONG).show();
            } else if (str.equalsIgnoreCase(R_FAIL)) {
                Snackbar.make(container, R.string.pay_fail, Snackbar.LENGTH_LONG).show();
            } else if (str.equalsIgnoreCase(R_CANCEL)) {
                Snackbar.make(container, R.string.pay_cancel, Snackbar.LENGTH_LONG).show();
            }
        }
    
        /**
         * 获取tn线程
         *
         * @author JamesTao
         */
        private class MyThread extends Thread {
            public MyThread() {
            }
    
            @Override
            public void run() {
                super.run();
                String tn = null;
                InputStream is;
                try {
                    String url = TN_URL_01;
                    URL myURL = new URL(url);
                    URLConnection ucon = myURL.openConnection();
                    ucon.setConnectTimeout(120 * 1000);
                    is = ucon.getInputStream();
                    int i = -1;
                    ByteArrayOutputStream baos = new ByteArrayOutputStream();
                    while ((i = is.read()) != -1) {
                        baos.write(i);
                    }
    
                    tn = baos.toString();
                    is.close();
                    baos.close();
                } catch (Exception e) {
                    e.printStackTrace();
                }
    
                Message msg = mHandler.obtainMessage();
                msg.obj = tn;
                mHandler.sendMessage(msg);
            }
        }
    }
    

    逻辑分析如下:
    * 首先会去请求订单信息TN;
    * 拿到TN之后就会带上订单信息调用UPPayAssistEx.startPayByJAR(xxx);吊起支付页面。
    * 支付完成之后会在onActivityResult(xxx)得到支付回调信息。

    github代码链接

    展开全文
  • 我曾经解过很多关于这方面的内核bug: nat模块复制tso结构不完全导致SSL握手弹证书慢。 IP路由neighbour系统对pointopoint设备的处理不合理导致争锁。 IPv6路由缓存设计不合理导致争锁。 Overlayfs的mount设计不...
  • 长瘦管道的MSS对TCP性能的影响

    万次阅读 2016-08-14 11:03:25
    除了我国领土辽阔以及光速极限带来的物理限制之外,认为引入的时延在我国也不乏见,比如运营商的整形限速,或者说往光缆里掺廉价元素影响波导以及折射率,更常见的是铜网线质量低劣... 另外,请注意上面的那个解析...
  • 问题描述: 最近尝试引入淘宝授权以及阿里云物联网网关,sdk-core-java、taobao-sdk-java是maven就有的,但是测试环境下taobao-sdk-java这个jar包却不是放在maven的,所以得引入本地的下载回来的jar包。本地开发直接...
  • 问题描述最近尝试引入阿里云的短信验证码,阿里云的core sdk是maven就有的,但是短信相关的jar包却不是放在maven的,所以得引入本地的下载回来的jar包。本地开发直接引入,idea是可以直接跑调用是没问题的。但是打成...
  • linux无名管道和有名管道

    万次阅读 多人点赞 2017-04-07 16:27:04
    为了实现命名管道,引入了一种新的文件类型——FIFO文件(遵循先进先出的原则)。实现一个命名管道实际上就是实现一个FIFO文件。命名管道一旦建立,之后它的读、写以及关闭操作都与普通管道完全相同。虽然FIFO文件的...
  • 进程控制块PCB结构 task_struct 描述

    千次阅读 多人点赞 2013-09-16 10:46:21
    一、task_struct 结构描述 1.进程状态(State) 进程执行时,它会根据具体情况改变状态。进程状态是调度和对换的依据。Linux 中的进程主要有如下状态,如表4.1 所示。 (1)可运行状态 处于这种状态的进程...
  • linux文件描述符-标准输入输出

    千次阅读 2014-11-24 16:36:11
    当某个进程打开文件时,操作系统返回相应的文件描述符,进程为了处理该文件必须引用此描述符。 所谓的文件描述符是一个低级的正整数。最前面的三个文件描述符(0,1,2)分别与标准输入(stdin),标准输出(stdout...
  • Mongodb中数据聚合之聚合管道aggregate

    万次阅读 2016-05-22 11:05:08
    在之前的两篇文章Mongodb中数据聚合之基本聚合函数count、distinct、group >和Mongodb中数据聚合之MapReduce ...面对着广大用户对数据统计的需求,Mongodb从2.2版本之后便引入了新的功能聚合框架(a
  • linux 无名管道pipe和有名管道FIFO

    千次阅读 2012-06-17 17:29:09
    进程打开FIFO后,就可以根据open时指定的选项对其进行相应的读/写操作(请参考open的帮助文档中关于选项的说明)。 #include  ssize_t read(int fildes, void *buf, size_t nbyte);  ssize_t write(int ...
  • 一维激波(Lax shock tube)问题的数值求解

    千次阅读 多人点赞 2019-05-28 11:52:59
    问题描述一般的守恒算法格式精确解常用守恒格式(先介绍标量)Lax-Friedrichs格式Roe格式Lax-Wendroff格式ENO格式简介(先介绍标量)5阶WENO格式空间离散Lax-Friedrichs通量分裂原函数方法重构左值右值空间算子和...
  • 另一个是紧挨着进程描述符的小数据结构thread_info,叫做线程描述符。 Linux把thread_info(线程描述符)和内核态的线程堆栈存放在一起,这块区域通常是8192K(占两个页框),其实地址必须是8192的整数倍。 ...
  • 问题描述:引用ElasticSearch的某些类时出现红色提示,ctrl+单击这个类无法跳转到Jar中,但是项目可以正常运行 第一种方法:https://blog.csdn.net/qq_27922171/article/details/103736214 原因分析:这个时idea...
  • 关于 它处理 的 信息 不同复杂 的 任务。 一个 简单,但 不灵活 的方式来实施 这个应用程序 可以 执行此 处理 为 单一 模块。 然而,这种方法 有可能减少 用于 重构代码 , 对其进行优化 , 或者 重新使用 它,如果 ...
  • 3. 打开的描述符号可以读写(two-way双工)(建议用只读或只写打开) 或者用shutdown函数关闭读或写关闭,变成单工 4. 管道文件关闭后,数据不持久.(程序如不删除管道,程序结束后,无法读到数据) 5. 管道的数据...
  • Linux 管道(pipe)原理及使用

    千次阅读 2013-12-20 17:44:53
    可以引入信号量,有效的保护临界区代码,就可以避免这些问题。在单任务环境下,也可以通过采取适当的措施来避免信号量的使用,从而提高程序的执行效率。 4.linux 内核中 pipe 的读写实现 Linux 内核中采用 ...
  • 通过引入高阶变形梯度,合理地修正了传统Cauchy-Born准则在描述纳米变形几何关系时所存在的缺陷。利用原子间相互作用势以及能量等效原理,得到了基于广义连续介质模型的单壁碳纳米的本构关系。由此得到的本构...
  • 进程通讯之无名管道

    万次阅读 2018-03-25 15:55:17
    关于无名管道的使用要求是:只能用于两个有关联的进程之间的通信(如父、子进程的通信),而对于父子进程之间的通信,创建管道文件和打开管道文件必须是在fork之前完成。如果是在fork之后完成,那么子进程与父进程将...
  • 文章目录系列文章链接目录前言一、题目要求二、作品实物图展示三、题目总体分析四、总体方案描述与系统框图总结 前言 《简易电路特性测试仪》这一题是我们实验室做的第二道国赛训练题,同时也是我个人非常喜欢的...
  • 2.从晶体到“1+1=2” 2.1.晶体如何表示0和1 2.2.从晶体到门电路 2.3.从门电路到半加器 3.完成一次真正的计算 3.1.穿孔纸带 3.2.演练一番 3.3.编程语言的本质 1.理论先行 1.1.二进制思...
  • 进程的描述与控制 进程的描述 程序的顺序执行和并发执行 顺序执行的特点 顺序性:每一操作都在下一操作开始前结束,严格按照顺序; 封闭性:程序在封闭环境下执行,程序运行时占全机资源,资源的状态只有...
  • 游戏程序设计之渲染管道

    千次阅读 2016-01-15 15:15:21
    网格中的三角形是物体的组成部分,通常下列属于都是描述网格中三角形:多边形,图元,网格几何体。我们可以通过指定三角形的三个顶点来描述三角形。  另外在DX中顶点还可以有颜色属性以及法线向量属性;

空空如也

空空如也

1 2 3 4 5 ... 20
收藏数 66,631
精华内容 26,652
关键字:

关于引入管的描述