Skip to main content

java reentrantlock 基础

· 2 min read

背景

java 的基础内容

内容备注
java aqsaqs 主要包括state以及 阻塞queue

aqs

  • state: volatileint变量
  • block(park/unpark) : 使用了linux的pthread_cond_wait , 也就是使用了条件变量
  • queue: 为什么需要queue , 因为条件变量唤醒queue中内容,queue一般放的是被阻塞的线程

可重入性

这里的可重入性指的是同一个线程可以多次对 Reentrantlock多次上锁

那么这个实现是怎么实现的呢?

其实就是state=0的时候,将当前线程Thread通过setExclusiveOwnerThread塞入Reentrantlock对应的同步器Sync

    abstract static class Sync extends AbstractQueuedSynchronizer {
private static final long serialVersionUID = -5179523762034025860L;

/**
* Performs non-fair tryLock. tryAcquire is implemented in
* subclasses, but both need nonfair try for trylock method.
*/
@ReservedStackAccess
final boolean nonfairTryAcquire(int acquires) {
final Thread current = Thread.currentThread();
int c = getState();
if (c == 0) {
if (compareAndSetState(0, acquires)) {
setExclusiveOwnerThread(current); // 核心步骤 ,将自己塞入锁对应的同步器中
return true;
}
}
else if (current == getExclusiveOwnerThread()) {
int nextc = c + acquires;
if (nextc < 0) // overflow
throw new Error("Maximum lock count exceeded");
setState(nextc);
return true;
}
return false;
}
}

springboot 启动流程

· 7 min read

背景

了解spring-boot 启动流程

流程分析

核心流程是调用refresh方法

// org\springframework\spring-context\5.2.15.RELEASE\spring-context-5.2.15.RELEASE-sources.jar!\org\springframework\context\support\AbstractApplicationContext.java
@Override
public void refresh() throws BeansException, IllegalStateException {
synchronized (this.startupShutdownMonitor) {
// Prepare this context for refreshing.
prepareRefresh();

// Tell the subclass to refresh the internal bean factory.
ConfigurableListableBeanFactory beanFactory = obtainFreshBeanFactory();

// Prepare the bean factory for use in this context.
prepareBeanFactory(beanFactory);

try {
// Allows post-processing of the bean factory in context subclasses.
postProcessBeanFactory(beanFactory);

// Invoke factory processors registered as beans in the context.
invokeBeanFactoryPostProcessors(beanFactory);

// Register bean processors that intercept bean creation.
registerBeanPostProcessors(beanFactory);

// Initialize message source for this context.
initMessageSource();

// Initialize event multicaster for this context.
initApplicationEventMulticaster();

// Initialize other special beans in specific context subclasses.
onRefresh();

// Check for listener beans and register them.
registerListeners();

// Instantiate all remaining (non-lazy-init) singletons.
finishBeanFactoryInitialization(beanFactory);

// Last step: publish corresponding event.
finishRefresh();
}

catch (BeansException ex) {
if (logger.isWarnEnabled()) {
logger.warn("Exception encountered during context initialization - " +
"cancelling refresh attempt: " + ex);
}

// Destroy already created singletons to avoid dangling resources.
destroyBeans();

// Reset 'active' flag.
cancelRefresh(ex);

// Propagate exception to caller.
throw ex;
}

finally {
// Reset common introspection caches in Spring's core, since we
// might not ever need metadata for singleton beans anymore...
resetCommonCaches();
}
}
}

bean factory 继承关系

AbstractBeanFactory  ---> implement  ConfigurableBeanFactory ---> extends HierarchicalBeanFactory -->  extends BeanFactory

BeanFactory核心接口

public interface BeanFactory {

Object getBean(String name) throws BeansException;


<T> T getBean(String name, Class<T> requiredType) throws BeansException;

Object getBean(String name, Object... args) throws BeansException;

<T> T getBean(Class<T> requiredType) throws BeansException;

<T> T getBean(Class<T> requiredType, Object... args) throws BeansException;


}

	/**
* Return an instance, which may be shared or independent, of the specified bean.
* @param name the name of the bean to retrieve
* @param requiredType the required type of the bean to retrieve
* @param args arguments to use when creating a bean instance using explicit arguments
* (only applied when creating a new instance as opposed to retrieving an existing one)
* @param typeCheckOnly whether the instance is obtained for a type check,
* not for actual use
* @return an instance of the bean
* @throws BeansException if the bean could not be created
*/
@SuppressWarnings("unchecked")
protected <T> T doGetBean(
String name, @Nullable Class<T> requiredType, @Nullable Object[] args, boolean typeCheckOnly)
throws BeansException {

String beanName = transformedBeanName(name);
Object bean;

// Eagerly check singleton cache for manually registered singletons.
Object sharedInstance = getSingleton(beanName);
if (sharedInstance != null && args == null) {
if (logger.isTraceEnabled()) {
if (isSingletonCurrentlyInCreation(beanName)) {
logger.trace("Returning eagerly cached instance of singleton bean '" + beanName +
"' that is not fully initialized yet - a consequence of a circular reference");
}
else {
logger.trace("Returning cached instance of singleton bean '" + beanName + "'");
}
}
bean = getObjectForBeanInstance(sharedInstance, name, beanName, null);
}

else {
...

if (!typeCheckOnly) {
markBeanAsCreated(beanName);
}

try {
RootBeanDefinition mbd = getMergedLocalBeanDefinition(beanName);
checkMergedBeanDefinition(mbd, beanName, args);

// Guarantee initialization of beans that the current bean depends on.
String[] dependsOn = mbd.getDependsOn();
if (dependsOn != null) {
for (String dep : dependsOn) {
if (isDependent(beanName, dep)) {
throw new BeanCreationException(mbd.getResourceDescription(), beanName,
"Circular depends-on relationship between '" + beanName + "' and '" + dep + "'");
}
registerDependentBean(dep, beanName);
try {
getBean(dep);
}
catch (NoSuchBeanDefinitionException ex) {
throw new BeanCreationException(mbd.getResourceDescription(), beanName,
"'" + beanName + "' depends on missing bean '" + dep + "'", ex);
}
}
}

// Create bean instance.
if (mbd.isSingleton()) {
sharedInstance = getSingleton(beanName, () -> {
try {
return createBean(beanName, mbd, args);
}
catch (BeansException ex) {
// Explicitly remove instance from singleton cache: It might have been put there
// eagerly by the creation process, to allow for circular reference resolution.
// Also remove any beans that received a temporary reference to the bean.
destroySingleton(beanName);
throw ex;
}
});
bean = getObjectForBeanInstance(sharedInstance, name, beanName, mbd);
}

else if (mbd.isPrototype()) {
// It's a prototype -> create a new instance.
Object prototypeInstance = null;
try {
beforePrototypeCreation(beanName);
prototypeInstance = createBean(beanName, mbd, args);
}
finally {
afterPrototypeCreation(beanName);
}
bean = getObjectForBeanInstance(prototypeInstance, name, beanName, mbd);
}

else {
String scopeName = mbd.getScope();
if (!StringUtils.hasLength(scopeName)) {
throw new IllegalStateException("No scope name defined for bean ´" + beanName + "'");
}
Scope scope = this.scopes.get(scopeName);
if (scope == null) {
throw new IllegalStateException("No Scope registered for scope name '" + scopeName + "'");
}
try {
Object scopedInstance = scope.get(beanName, () -> {
beforePrototypeCreation(beanName);
try {
return createBean(beanName, mbd, args);
}
finally {
afterPrototypeCreation(beanName);
}
});
bean = getObjectForBeanInstance(scopedInstance, name, beanName, mbd);
}
catch (IllegalStateException ex) {
throw new BeanCreationException(beanName,
"Scope '" + scopeName + "' is not active for the current thread; consider " +
"defining a scoped proxy for this bean if you intend to refer to it from a singleton",
ex);
}
}
}
catch (BeansException ex) {
cleanupAfterBeanCreationFailure(beanName);
throw ex;
}
}

// Check if required type matches the type of the actual bean instance.
if (requiredType != null && !requiredType.isInstance(bean)) {
try {
T convertedBean = getTypeConverter().convertIfNecessary(bean, requiredType);
if (convertedBean == null) {
throw new BeanNotOfRequiredTypeException(name, requiredType, bean.getClass());
}
return convertedBean;
}
catch (TypeMismatchException ex) {
if (logger.isTraceEnabled()) {
logger.trace("Failed to convert bean '" + name + "' to required type '" +
ClassUtils.getQualifiedName(requiredType) + "'", ex);
}
throw new BeanNotOfRequiredTypeException(name, requiredType, bean.getClass());
}
}
return (T) bean;
}

回调

	@Override
protected Object createBean(String beanName, RootBeanDefinition mbd, @Nullable Object[] args)
throws BeanCreationException {

if (logger.isTraceEnabled()) {
logger.trace("Creating instance of bean '" + beanName + "'");
}
RootBeanDefinition mbdToUse = mbd;

// Make sure bean class is actually resolved at this point, and
// clone the bean definition in case of a dynamically resolved Class
// which cannot be stored in the shared merged bean definition.
Class<?> resolvedClass = resolveBeanClass(mbd, beanName);
if (resolvedClass != null && !mbd.hasBeanClass() && mbd.getBeanClassName() != null) {
mbdToUse = new RootBeanDefinition(mbd);
mbdToUse.setBeanClass(resolvedClass);
}

// Prepare method overrides.
try {
mbdToUse.prepareMethodOverrides();
}
catch (BeanDefinitionValidationException ex) {
throw new BeanDefinitionStoreException(mbdToUse.getResourceDescription(),
beanName, "Validation of method overrides failed", ex);
}

try {
// Give BeanPostProcessors a chance to return a proxy instead of the target bean instance.
Object bean = resolveBeforeInstantiation(beanName, mbdToUse);
if (bean != null) {
return bean;
}
}
catch (Throwable ex) {
throw new BeanCreationException(mbdToUse.getResourceDescription(), beanName,
"BeanPostProcessor before instantiation of bean failed", ex);
}

try {
Object beanInstance = doCreateBean(beanName, mbdToUse, args);
if (logger.isTraceEnabled()) {
logger.trace("Finished creating instance of bean '" + beanName + "'");
}
return beanInstance;
}
catch (BeanCreationException | ImplicitlyAppearedSingletonException ex) {
// A previously detected exception with proper bean creation context already,
// or illegal singleton state to be communicated up to DefaultSingletonBeanRegistry.
throw ex;
}
catch (Throwable ex) {
throw new BeanCreationException(
mbdToUse.getResourceDescription(), beanName, "Unexpected exception during bean creation", ex);
}
}

bean 相关内容

  • BeanDefinitionRegistry

BeanDefinitionRegistry 持有BeanDefinition

Interface for registries that hold bean definitions, for example RootBeanDefinition and ChildBeanDefinition instances. 相关核心接口 BeanDefinition getBeanDefinition(String beanName) throws NoSuchBeanDefinitionException;

  • BeanDefinition

    A BeanDefinition describes a bean instance, which has property values, constructor argument values, and further information supplied by concrete implementations. beandefition 描述的bean的数组和构造器 ,和Classs 类差不多

最后使用的堆栈

instantiateClass:204, BeanUtils (org.springframework.beans)
instantiate:87, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
instantiateBean:1315, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBeanInstance:1218, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 1771040410 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$324)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:207, AbstractBeanFactory (org.springframework.beans.factory.support)
invokeBeanFactoryPostProcessors:90, PostProcessorRegistrationDelegate (org.springframework.context.support)
invokeBeanFactoryPostProcessors:707, AbstractApplicationContext (org.springframework.context.support)
refresh:533, AbstractApplicationContext (org.springframework.context.support)
refresh:755, SpringApplication (org.springframework.boot)
refresh:747, SpringApplication (org.springframework.boot)
refreshContext:402, SpringApplication (org.springframework.boot)
run:312, SpringApplication (org.springframework.boot)
run:140, SpringApplicationBuilder (org.springframework.boot.builder)
bootstrapServiceContext:212, BootstrapApplicationListener (org.springframework.cloud.bootstrap)
onApplicationEvent:117, BootstrapApplicationListener (org.springframework.cloud.bootstrap)
onApplicationEvent:74, BootstrapApplicationListener (org.springframework.cloud.bootstrap)
doInvokeListener:172, SimpleApplicationEventMulticaster (org.springframework.context.event)
invokeListener:165, SimpleApplicationEventMulticaster (org.springframework.context.event)
multicastEvent:139, SimpleApplicationEventMulticaster (org.springframework.context.event)
multicastEvent:127, SimpleApplicationEventMulticaster (org.springframework.context.event)
environmentPrepared:80, EventPublishingRunListener (org.springframework.boot.context.event)
environmentPrepared:53, SpringApplicationRunListeners (org.springframework.boot)
prepareEnvironment:342, SpringApplication (org.springframework.boot)
run:307, SpringApplication (org.springframework.boot)
main:40, DatacenterApplication (com.patpat.datacenter)
	public static <T> T instantiateClass(Constructor<T> ctor, Object... args) throws BeanInstantiationException {
Assert.notNull(ctor, "Constructor must not be null");
try {
...
return ctor.newInstance(argsWithDefaultValues);
}
}
...
}

Method Reference Expressions

· 2 min read

背景

了解java的class格式java file format

介绍

大部分的类的加载都会依赖defineClass(byte[] b, int off, int len)

顺序就算b =你的class文件的字节流 , 然后这个函数会读字节流,加载,然后塞进matespace

本篇内容

为什么我需要这篇文章:

  • 我看java的动态代理的时候,发现原来其实动态代理和静态编译原来只是接口不一样,实际上大家都是调用这个defineClass(byte[] b, int off, int len) 方法的

我想写一个Proxygenerate的demo , 他就是为了生成class 字节流

开始

我们看看编译好的是什么样子的:

//   touch A.java
public class A{
}

然后编译:

javac A.java
hexdump A.class
0000000 feca beba 0000 4100 0d00 000a 0002 0703
0000010 0400 000c 0005 0106 1000 616a 6176 6c2f
0000020 6e61 2f67 624f 656a 7463 0001 3c06 6e69
0000030 7469 013e 0300 2928 0756 0800 0001 4101
0000040 0001 4304 646f 0165 0f00 694c 656e 754e
0000050 626d 7265 6154 6c62 0165 0a00 6f53 7275
0000060 6563 6946 656c 0001 4106 6a2e 7661 0061
0000070 0021 0007 0002 0000 0000 0001 0001 0005
0000080 0006 0001 0009 0000 001d 0001 0001 0000
0000090 2a05 00b7 b101 0000 0100 0a00 0000 0600
00000a0 0100 0000 0100 0100 0b00 0000 0200 0c00
00000b0

我们开始看classfile structure

ClassFile {
u4 magic;
u2 minor_version;
u2 major_version;
u2 constant_pool_count;
cp_info constant_pool[constant_pool_count-1];
u2 access_flags;
u2 this_class;
u2 super_class;
u2 interfaces_count;
u2 interfaces[interfaces_count];
u2 fields_count;
field_info fields[fields_count];
u2 methods_count;
method_info methods[methods_count];
u2 attributes_count;
attribute_info attributes[attributes_count];
}

第一部分是magic,也就是0xCAFEBABE

magic

The magic item supplies the magic number identifying the class file format; it has the value 0xCAFEBABE.

相关阅读

nebula 安装

· 2 min read

背景

搭建一个简单的实例

介绍

nebula 是一个图数据库

搭建 后端

使用docker swam 启动

## 拉代码
git clone [email protected]:vesoft-inc/nebula-docker-compose.git

## 切换目录
cd nebula-docker-compose
## 创建目录 logs data 目录
mkdir logs data
mkdir data/storage0 logs/storage0 data/meta0 logs/meta0 logs/graph

## 启动 ,stack 命名为nebula
docker compose up -d

## 查看启动情况
docker stack ps nebula

遇到问题:

No such image: vesoft/nebula 的错误 ,因为dns被墙了

ID             NAME                     IMAGE                            NODE      DESIRED STATE   CURRENT STATE              ERROR                              PORTS
92hscx92ugte nebula_graphd.1 vesoft/nebula-graphd:nightly dai Ready Preparing 4 seconds ago
730ke91w1lil \_ nebula_graphd.1 vesoft/nebula-graphd:nightly dai Shutdown Rejected 4 seconds ago "No such image: vesoft/nebula-…"

Error response from daemon: Get "https://registry-1.docker.io/v2/": dial tcp: lookup registry-1.docker.io on 192.168.1.1:53: read udp 192.168.1.108:44332->192.168.1.1:53: read: connection refused

因为docker的问题:在这里找到相关内容

解决方案:

## 因为dns问题 ,加上8.8.8.8 即可
echo "nameserver 8.8.8.8" | sudo tee /etc/resolv.conf > /dev/null

搭建前端

## 拉代码
git clone [email protected]:vesoft-inc/nebula-studio.git
## 切目录
cd nebula-studio/
## 安装依赖
npm install
## 前端启动

遇到问题

studio.go:22:2: github.com/facebook/[email protected]: Get "https://proxy.golang.org/github.com/facebook/fbthrift/@v/v0.31.1-0.20211129061412-801ed7f9f295.mod": dial tcp 142.250.204.145:443: i/o timeout
studio.go:24:2: github.com/facebook/[email protected]: Get "https://proxy.golang.org/github.com/facebook/fbthrift/@v/v0.31.1-0.20211129061412-801ed7f9f295.mod": dial tcp 142.250.204.145:443: i/o timeout

解决方案:

https://goproxy.io/zh/

InvocationHandler 和动态代理

· 4 min read

背景

了解java的InvocationHandler

在Java中,Proxy.newProxyInstance 的实现以及动态生成的代理类的字节码是由Java的 ProxyGenerator 类完成的

介绍

动态代理主要包括两个部分

  • invocationHandler 接口 , 实现回调
  • Proxy.newInstance生成代理类

proxy.newInstance

1 获取过程是反射所有的接口 2 在生成byte[] 字符串,这个是class文件 3 生成代理类

  private static final class ProxyBuilder {
private static final Unsafe UNSAFE = Unsafe.getUnsafe();

// prefix for all proxy class names
private static final String proxyClassNamePrefix = "$Proxy";

// next number to use for generation of unique proxy class names
private static final AtomicLong nextUniqueNumber = new AtomicLong();

// a reverse cache of defined proxy classes
private static final ClassLoaderValue<Boolean> reverseProxyCache =
new ClassLoaderValue<>();

private static Class<?> defineProxyClass(Module m, List<Class<?>> interfaces) {
String proxyPkg = null; // package to define proxy class in
int accessFlags = Modifier.PUBLIC | Modifier.FINAL;

/*
* Record the package of a non-public proxy interface so that the
* proxy class will be defined in the same package. Verify that
* all non-public proxy interfaces are in the same package.
*/
for (Class<?> intf : interfaces) {
int flags = intf.getModifiers();
if (!Modifier.isPublic(flags)) {
accessFlags = Modifier.FINAL; // non-public, final
String pkg = intf.getPackageName();
if (proxyPkg == null) {
proxyPkg = pkg;
} else if (!pkg.equals(proxyPkg)) {
throw new IllegalArgumentException(
"non-public interfaces from different packages");
}
}
}

if (proxyPkg == null) {
// all proxy interfaces are public
proxyPkg = m.isNamed() ? PROXY_PACKAGE_PREFIX + "." + m.getName()
: PROXY_PACKAGE_PREFIX;
} else if (proxyPkg.isEmpty() && m.isNamed()) {
throw new IllegalArgumentException(
"Unnamed package cannot be added to " + m);
}

if (m.isNamed()) {
if (!m.getDescriptor().packages().contains(proxyPkg)) {
throw new InternalError(proxyPkg + " not exist in " + m.getName());
}
}

/*
* Choose a name for the proxy class to generate.
*/
long num = nextUniqueNumber.getAndIncrement();
String proxyName = proxyPkg.isEmpty()
? proxyClassNamePrefix + num
: proxyPkg + "." + proxyClassNamePrefix + num;

ClassLoader loader = getLoader(m);
trace(proxyName, m, loader, interfaces);

/*
* Generate the specified proxy class.
*/
byte[] proxyClassFile = ProxyGenerator.generateProxyClass(
proxyName, interfaces.toArray(EMPTY_CLASS_ARRAY), accessFlags);
try {
Class<?> pc = UNSAFE.defineClass(proxyName, proxyClassFile,
0, proxyClassFile.length,
loader, null);
reverseProxyCache.sub(pc).putIfAbsent(loader, Boolean.TRUE);
return pc;
} catch (ClassFormatError e) {
/*
* A ClassFormatError here means that (barring bugs in the
* proxy class generation code) there was some other
* invalid aspect of the arguments supplied to the proxy
* class creation (such as virtual machine limitations
* exceeded).
*/
throw new IllegalArgumentException(e.toString());
}
}

相关堆栈:

proxyClassContext:844, Proxy$ProxyBuilder (java.lang.reflect)
<init>:638, Proxy$ProxyBuilder (java.lang.reflect)
<init>:643, Proxy$ProxyBuilder (java.lang.reflect)
lambda$getProxyConstructor$0:429, Proxy (java.lang.reflect)
apply:-1, Proxy$$Lambda/0x00007fd0dc066718 (java.lang.reflect)
get:329, AbstractClassLoaderValue$Memoizer (jdk.internal.loader)
computeIfAbsent:205, AbstractClassLoaderValue (jdk.internal.loader)
getProxyConstructor:427, Proxy (java.lang.reflect)
newProxyInstance:1034, Proxy (java.lang.reflect)
run:301, AnnotationParser$1 (sun.reflect.annotation)
run:299, AnnotationParser$1 (sun.reflect.annotation)
executePrivileged:778, AccessController (java.security)
doPrivileged:319, AccessController (java.security)
annotationForMap:299, AnnotationParser (sun.reflect.annotation)
parseAnnotation2:288, AnnotationParser (sun.reflect.annotation)
parseAnnotations2:121, AnnotationParser (sun.reflect.annotation)
parseSelectAnnotations:102, AnnotationParser (sun.reflect.annotation)
<init>:146, AnnotationType (sun.reflect.annotation)
getInstance:85, AnnotationType (sun.reflect.annotation)
parseAnnotation2:262, AnnotationParser (sun.reflect.annotation)
parseAnnotations2:121, AnnotationParser (sun.reflect.annotation)
parseAnnotations:73, AnnotationParser (sun.reflect.annotation)
createAnnotationData:4246, Class (java.lang)
annotationData:4235, Class (java.lang)
getDeclaredAnnotations:4202, Class (java.lang)
getDeclaredAnnotations:449, AnnotationsScanner (org.springframework.core.annotation)
processClassHierarchy:188, AnnotationsScanner (org.springframework.core.annotation)
processClassHierarchy:196, AnnotationsScanner (org.springframework.core.annotation)
processClassHierarchy:171, AnnotationsScanner (org.springframework.core.annotation)
processClass:108, AnnotationsScanner (org.springframework.core.annotation)
process:92, AnnotationsScanner (org.springframework.core.annotation)
scan:82, AnnotationsScanner (org.springframework.core.annotation)
scan:248, TypeMappedAnnotations (org.springframework.core.annotation)
get:155, TypeMappedAnnotations (org.springframework.core.annotation)
get:137, TypeMappedAnnotations (org.springframework.core.annotation)
findOrder:123, OrderUtils (org.springframework.core.annotation)
getOrderFromAnnotations:116, OrderUtils (org.springframework.core.annotation)
findOrderFromAnnotation:75, AnnotationAwareOrderComparator (org.springframework.core.annotation)
findOrder:68, AnnotationAwareOrderComparator (org.springframework.core.annotation)
getOrder:128, OrderComparator (org.springframework.core)
getOrder:116, OrderComparator (org.springframework.core)
doCompare:86, OrderComparator (org.springframework.core)
compare:73, OrderComparator (org.springframework.core)
countRunAndMakeAscending:355, TimSort (java.util)
sort:220, TimSort (java.util)
sort:1308, Arrays (java.util)
sort:1804, ArrayList (java.util)
sort:111, AnnotationAwareOrderComparator (org.springframework.core.annotation)
load:211, SpringFactoriesLoader (org.springframework.core.io.support)
load:160, SpringFactoriesLoader (org.springframework.core.io.support)
getSpringFactoriesInstances:482, SpringApplication (org.springframework.boot)
getSpringFactoriesInstances:478, SpringApplication (org.springframework.boot)
<init>:283, SpringApplication (org.springframework.boot)
<init>:262, SpringApplication (org.springframework.boot)
run:1342, SpringApplication (org.springframework.boot)
run:1331, SpringApplication (org.springframework.boot)
main:10, DemoApplication (com.example.demo)

相关阅读

spring-aop transaction

· One min read

背景

了解java 的method reference

介绍

invoke:118, TransactionInterceptor (org.springframework.transaction.interceptor)
proceed:186, ReflectiveMethodInvocation (org.springframework.aop.framework)
proceed:750, CglibAopProxy$CglibMethodInvocation (org.springframework.aop.framework)
intercept:692, CglibAopProxy$DynamicAdvisedInterceptor (org.springframework.aop.framework)
dealWithOneRow:-1, IdMappingCommonServiceImpl$$EnhancerBySpringCGLIB$$4b7d8558 (com.patpat.datacenter.service.idmapping.impl.common)
testTrans:33, IdmappingConverterTest (com.patpat.datacenter.idmapping)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)

rocksdb

· 7 min read

rocksdb get 使用

(gdb) bt
#0 std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >::assign (__n=6, __s=0x555555db8135 "value", this=0x7fffffffde90) at /usr/include/c++/12/bits/basic_string.h:1063
#1 rocksdb::SaveValue (arg=0x7fffffffd690, entry=<optimized out>) at db/memtable.cc:1068
#2 0x000055555588eed2 in rocksdb::(anonymous namespace)::SkipListRep::Get (this=<optimized out>, k=..., callback_args=0x7fffffffd690, callback_func=0x555555769fe0 <rocksdb::SaveValue(void*, char const*)>)
at memtable/skiplistrep.cc:90
#3 0x00005555557683c3 in rocksdb::MemTable::GetFromTable (this=this@entry=0x555555db7d70, key=..., max_covering_tombstone_seq=<optimized out>, do_merge=do_merge@entry=true, callback=callback@entry=0x0,
is_blob_index=is_blob_index@entry=0x0, value=0x7fffffffde90, columns=0x0, timestamp=0x0, s=0x7fffffffd950, merge_context=0x7fffffffd990, seq=0x7fffffffd8f8, found_final_value=0x7fffffffd7b6,
merge_in_progress=0x7fffffffd7b7) at db/memtable.cc:1329
#4 0x0000555555769215 in rocksdb::MemTable::Get (this=0x555555db7d70, key=..., value=0x7fffffffde90, columns=0x0, timestamp=timestamp@entry=0x0, s=s@entry=0x7fffffffd950, merge_context=<optimized out>,
max_covering_tombstone_seq=<optimized out>, seq=<optimized out>, read_opts=..., immutable_memtable=<optimized out>, callback=<optimized out>, is_blob_index=<optimized out>, do_merge=<optimized out>)
at db/memtable.cc:1285
#5 0x000055555565e189 in rocksdb::MemTable::Get (do_merge=true, is_blob_index=<optimized out>, callback=<optimized out>, immutable_memtable=false, read_opts=..., max_covering_tombstone_seq=0x7fffffffd8f0,
merge_context=0x7fffffffd990, s=0x7fffffffd950, timestamp=0x0, columns=<optimized out>, value=<optimized out>, key=..., this=<optimized out>) at ./db/memtable.h:279
#6 rocksdb::DBImpl::GetImpl (this=0x555555d7db40, read_options=..., key=..., get_impl_options=...) at db/db_impl/db_impl.cc:2293
#7 0x000055555565522b in rocksdb::DBImpl::GetImpl (this=this@entry=0x555555d7db40, read_options=..., column_family=column_family@entry=0x555555da3ba0, key=..., value=value@entry=0x7fffffffddc0,
timestamp=<optimized out>) at db/db_impl/db_impl.cc:2025
#8 0x0000555555655492 in rocksdb::DBImpl::Get (this=0x555555d7db40, _read_options=..., column_family=0x555555da3ba0, key=..., value=0x7fffffffddc0, timestamp=<optimized out>) at db/db_impl/db_impl.cc:2013
#9 0x000055555564cc57 in rocksdb::DBImpl::Get (this=<optimized out>, read_options=..., column_family=<optimized out>, key=..., value=<optimized out>) at db/db_impl/db_impl.cc:1985
#10 0x0000555555642bf2 in rocksdb::DB::Get (this=this@entry=0x555555d7db40, options=..., column_family=0x555555da3ba0, key=..., value=value@entry=0x7fffffffde90) at ./include/rocksdb/db.h:562
#11 0x0000555555626ed2 in rocksdb::DB::Get (value=0x7fffffffde90, key=..., options=..., this=0x555555d7db40) at ./include/rocksdb/db.h:573
#12 rocksdb_get (db=<optimized out>, options=0x555555dbdc70, key=<optimized out>, keylen=<optimized out>, vallen=0x7fffffffdf00, errptr=0x7fffffffdf10) at db/c.cc:1293
#13 0x00005555556240d7 in main (argc=1, argv=0x7fffffffe078) at c_simple_example.c:67
(gdb) up
#1 rocksdb::SaveValue (arg=0x7fffffffd690, entry=<optimized out>) at db/memtable.cc:1068

走到这个分支:

      case kTypeValue: {                                             // (1) kv type 的类型
if (s->inplace_update_support) {
s->mem->GetLock(s->key->user_key())->ReadLock();
}

Slice v = GetLengthPrefixedSlice(key_ptr + key_length); // (2) 获取值

*(s->status) = Status::OK();

if (!s->do_merge) {
// Preserve the value with the goal of returning it as part of
// raw merge operands to the user
// TODO(yanqin) update MergeContext so that timestamps information
// can also be retained.

merge_context->PushOperand(
v, s->inplace_update_support == false /* operand_pinned */);
} else if (*(s->merge_in_progress)) {
assert(s->do_merge);

if (s->value || s->columns) {
// `op_failure_scope` (an output parameter) is not provided (set to
// nullptr) since a failure must be propagated regardless of its
// value.
*(s->status) = MergeHelper::TimedFullMerge(
merge_operator, s->key->user_key(),
MergeHelper::kPlainBaseValue, v, merge_context->GetOperands(),
s->logger, s->statistics, s->clock,
/* update_num_ops_stats */ true, s->value, s->columns,
/* op_failure_scope */ nullptr);
}
} else if (s->value) {
s->value->assign(v.data(), v.size()); // (3) 将取得的值塞到s->value
} else if (s->columns) {
s->columns->SetPlainValue(v);
}

Status DBImpl::GetImpl(const ReadOptions& read_options, const Slice& key,
GetImplOptions& get_impl_options) {
assert(get_impl_options.value != nullptr ||
get_impl_options.merge_operands != nullptr ||
get_impl_options.columns != nullptr);

assert(get_impl_options.column_family);

if (read_options.timestamp) {
const Status s = FailIfTsMismatchCf(get_impl_options.column_family,
*(read_options.timestamp));
if (!s.ok()) {
return s;
}
} else {
const Status s = FailIfCfHasTs(get_impl_options.column_family);
if (!s.ok()) {
return s;
}
}

// Clear the timestamps for returning results so that we can distinguish
// between tombstone or key that has never been written
if (get_impl_options.timestamp) {
get_impl_options.timestamp->clear();
}

GetWithTimestampReadCallback read_cb(0); // Will call Refresh

PERF_CPU_TIMER_GUARD(get_cpu_nanos, immutable_db_options_.clock);
StopWatch sw(immutable_db_options_.clock, stats_, DB_GET);
PERF_TIMER_GUARD(get_snapshot_time);

auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(
get_impl_options.column_family);
auto cfd = cfh->cfd();

if (tracer_) {
// TODO: This mutex should be removed later, to improve performance when
// tracing is enabled.
InstrumentedMutexLock lock(&trace_mutex_);
if (tracer_) {
// TODO: maybe handle the tracing status?
tracer_->Get(get_impl_options.column_family, key).PermitUncheckedError();
}
}

if (get_impl_options.get_merge_operands_options != nullptr) {
for (int i = 0; i < get_impl_options.get_merge_operands_options
->expected_max_number_of_operands;
++i) {
get_impl_options.merge_operands[i].Reset();
}
}

// Acquire SuperVersion
SuperVersion* sv = GetAndRefSuperVersion(cfd);
if (read_options.timestamp && read_options.timestamp->size() > 0) {
const Status s =
FailIfReadCollapsedHistory(cfd, sv, *(read_options.timestamp));
if (!s.ok()) {
ReturnAndCleanupSuperVersion(cfd, sv);
return s;
}
}

TEST_SYNC_POINT_CALLBACK("DBImpl::GetImpl:AfterAcquireSv", nullptr);
TEST_SYNC_POINT("DBImpl::GetImpl:1");
TEST_SYNC_POINT("DBImpl::GetImpl:2");

SequenceNumber snapshot;
if (read_options.snapshot != nullptr) {
if (get_impl_options.callback) {
// Already calculated based on read_options.snapshot
snapshot = get_impl_options.callback->max_visible_seq();
} else {
snapshot =
reinterpret_cast<const SnapshotImpl*>(read_options.snapshot)->number_;
}
} else {
// Note that the snapshot is assigned AFTER referencing the super
// version because otherwise a flush happening in between may compact away
// data for the snapshot, so the reader would see neither data that was be
// visible to the snapshot before compaction nor the newer data inserted
// afterwards.
snapshot = GetLastPublishedSequence();
if (get_impl_options.callback) {
// The unprep_seqs are not published for write unprepared, so it could be
// that max_visible_seq is larger. Seek to the std::max of the two.
// However, we still want our callback to contain the actual snapshot so
// that it can do the correct visibility filtering.
get_impl_options.callback->Refresh(snapshot);

// Internally, WriteUnpreparedTxnReadCallback::Refresh would set
// max_visible_seq = max(max_visible_seq, snapshot)
//
// Currently, the commented out assert is broken by
// InvalidSnapshotReadCallback, but if write unprepared recovery followed
// the regular transaction flow, then this special read callback would not
// be needed.
//
// assert(callback->max_visible_seq() >= snapshot);
snapshot = get_impl_options.callback->max_visible_seq();
}
}
// If timestamp is used, we use read callback to ensure <key,t,s> is returned
// only if t <= read_opts.timestamp and s <= snapshot.
// HACK: temporarily overwrite input struct field but restore
SaveAndRestore<ReadCallback*> restore_callback(&get_impl_options.callback);
const Comparator* ucmp = get_impl_options.column_family->GetComparator();
assert(ucmp);
if (ucmp->timestamp_size() > 0) {
assert(!get_impl_options
.callback); // timestamp with callback is not supported
read_cb.Refresh(snapshot);
get_impl_options.callback = &read_cb;
}
TEST_SYNC_POINT("DBImpl::GetImpl:3");
TEST_SYNC_POINT("DBImpl::GetImpl:4");

// Prepare to store a list of merge operations if merge occurs.
MergeContext merge_context;
SequenceNumber max_covering_tombstone_seq = 0;

Status s;
// First look in the memtable, then in the immutable memtable (if any).
// s is both in/out. When in, s could either be OK or MergeInProgress.
// merge_operands will contain the sequence of merges in the latter case.
LookupKey lkey(key, snapshot, read_options.timestamp);
PERF_TIMER_STOP(get_snapshot_time);

bool skip_memtable = (read_options.read_tier == kPersistedTier &&
has_unpersisted_data_.load(std::memory_order_relaxed));
bool done = false;
std::string* timestamp =
ucmp->timestamp_size() > 0 ? get_impl_options.timestamp : nullptr;
if (!skip_memtable) {
// Get value associated with key
if (get_impl_options.get_value) {
if (sv->mem->Get(
lkey,
get_impl_options.value ? get_impl_options.value->GetSelf()
: nullptr,
get_impl_options.columns, timestamp, &s, &merge_context,
&max_covering_tombstone_seq, read_options,
false /* immutable_memtable */, get_impl_options.callback,
get_impl_options.is_blob_index)) {
done = true;

if (get_impl_options.value) {
get_impl_options.value->PinSelf();
}

RecordTick(stats_, MEMTABLE_HIT);
} else if ((s.ok() || s.IsMergeInProgress()) &&
sv->imm->Get(lkey,
get_impl_options.value
? get_impl_options.value->GetSelf()
: nullptr,
get_impl_options.columns, timestamp, &s,
&merge_context, &max_covering_tombstone_seq,
read_options, get_impl_options.callback,
get_impl_options.is_blob_index)) {
done = true;

if (get_impl_options.value) {
get_impl_options.value->PinSelf();
}

RecordTick(stats_, MEMTABLE_HIT);
}
} else {
// Get Merge Operands associated with key, Merge Operands should not be
// merged and raw values should be returned to the user.
if (sv->mem->Get(lkey, /*value=*/nullptr, /*columns=*/nullptr,
/*timestamp=*/nullptr, &s, &merge_context,
&max_covering_tombstone_seq, read_options,
false /* immutable_memtable */, nullptr, nullptr,
false)) {
done = true;
RecordTick(stats_, MEMTABLE_HIT);
} else if ((s.ok() || s.IsMergeInProgress()) &&
sv->imm->GetMergeOperands(lkey, &s, &merge_context,
&max_covering_tombstone_seq,
read_options)) {
done = true;
RecordTick(stats_, MEMTABLE_HIT);
}
}
if (!done && !s.ok() && !s.IsMergeInProgress()) {
ReturnAndCleanupSuperVersion(cfd, sv);
return s;
}
}
TEST_SYNC_POINT("DBImpl::GetImpl:PostMemTableGet:0");
TEST_SYNC_POINT("DBImpl::GetImpl:PostMemTableGet:1");
PinnedIteratorsManager pinned_iters_mgr;
if (!done) {
PERF_TIMER_GUARD(get_from_output_files_time);
sv->current->Get(
read_options, lkey, get_impl_options.value, get_impl_options.columns,
timestamp, &s, &merge_context, &max_covering_tombstone_seq,
&pinned_iters_mgr,
get_impl_options.get_value ? get_impl_options.value_found : nullptr,
nullptr, nullptr,
get_impl_options.get_value ? get_impl_options.callback : nullptr,
get_impl_options.get_value ? get_impl_options.is_blob_index : nullptr,
get_impl_options.get_value);
RecordTick(stats_, MEMTABLE_MISS);
}

{
PERF_TIMER_GUARD(get_post_process_time);

RecordTick(stats_, NUMBER_KEYS_READ);
size_t size = 0;
if (s.ok()) {
const auto& merge_threshold = read_options.merge_operand_count_threshold;
if (merge_threshold.has_value() &&
merge_context.GetNumOperands() > merge_threshold.value()) {
s = Status::OkMergeOperandThresholdExceeded();
}

if (get_impl_options.get_value) {
if (get_impl_options.value) {
size = get_impl_options.value->size();
} else if (get_impl_options.columns) {
size = get_impl_options.columns->serialized_size();
}
} else {
// Return all merge operands for get_impl_options.key
*get_impl_options.number_of_operands =
static_cast<int>(merge_context.GetNumOperands());
if (*get_impl_options.number_of_operands >
get_impl_options.get_merge_operands_options
->expected_max_number_of_operands) {
s = Status::Incomplete(
Status::SubCode::KMergeOperandsInsufficientCapacity);
} else {
// Each operand depends on one of the following resources: `sv`,
// `pinned_iters_mgr`, or `merge_context`. It would be crazy expensive
// to reference `sv` for each operand relying on it because `sv` is
// (un)ref'd in all threads using the DB. Furthermore, we do not track
// on which resource each operand depends.
//
// To solve this, we bundle the resources in a `GetMergeOperandsState`
// and manage them with a `SharedCleanablePtr` shared among the
// `PinnableSlice`s we return. This bundle includes one `sv` reference
// and ownership of the `merge_context` and `pinned_iters_mgr`
// objects.
bool ref_sv = ShouldReferenceSuperVersion(merge_context);
if (ref_sv) {
assert(!merge_context.GetOperands().empty());
SharedCleanablePtr shared_cleanable;
GetMergeOperandsState* state = nullptr;
state = new GetMergeOperandsState();
state->merge_context = std::move(merge_context);
state->pinned_iters_mgr = std::move(pinned_iters_mgr);

sv->Ref();

state->sv_handle = new SuperVersionHandle(
this, &mutex_, sv,
immutable_db_options_.avoid_unnecessary_blocking_io);

shared_cleanable.Allocate();
shared_cleanable->RegisterCleanup(CleanupGetMergeOperandsState,
state /* arg1 */,
nullptr /* arg2 */);
for (size_t i = 0; i < state->merge_context.GetOperands().size();
++i) {
const Slice& sl = state->merge_context.GetOperands()[i];
size += sl.size();

get_impl_options.merge_operands->PinSlice(
sl, nullptr /* cleanable */);
if (i == state->merge_context.GetOperands().size() - 1) {
shared_cleanable.MoveAsCleanupTo(
get_impl_options.merge_operands);
} else {
shared_cleanable.RegisterCopyWith(
get_impl_options.merge_operands);
}
get_impl_options.merge_operands++;
}
} else {
for (const Slice& sl : merge_context.GetOperands()) {
size += sl.size();
get_impl_options.merge_operands->PinSelf(sl);
get_impl_options.merge_operands++;
}
}
}
}
RecordTick(stats_, BYTES_READ, size);
PERF_COUNTER_ADD(get_read_bytes, size);
}

ReturnAndCleanupSuperVersion(cfd, sv);

RecordInHistogram(stats_, BYTES_PER_READ, size);
}
return s;
}

相关阅读