准备工作
1 开发环境:window,idea,maven,spring boot,mybatis,druid(淘宝数据库连接池)
2 数据库服务器:linux,mysql master(192.168.203.135),mysql salve(192.168.203.139)
3 读写分离之前必须先做好数据库的主从复制,关于主从复制不是该篇幅的主要叙述重点,关于主从复制读者可以自行google或者百度,教程基本都是一样,可行
注意以下几点:
a:做主从复制时,首先确定两台服务器的mysql没任何自定义库(否则只可以配置完后之前的东西没法同步,或者两个库都有完全相同的库应该也是可以同步)
b:server_id必须配置不一样
c:防火墙不能把mysql服务端口给拦截了(默认3306)
d:确保两台mysql可以相互访问
e:重置master,slave。Reset master;reset slave;开启关闭slave,start slave;stop slave;
f:主DB server和从DB server数据库的版本一致
4 读写分离方式:
4-1 基于程序代码内部实现: 在代码中根据select 、insert进行路由分类,这类方法也是目前生产环境下应用最广泛的。优点是性能较好,因为程序在代码中实现,不需要增加额外的硬件开支,缺点是需要开发人员来实现,运维人员无从下手。
4-2 基于中间代理层实现: 代理一般介于应用服务器和数据库服务器之间,代理数据库服务器接收到应用服务器的请求后根据判断后转发到,后端数据库,有以下代表性的程序。
本文基于两种方式的叙述:
基于应用层代码实现方式(内容都是通过代码体现,必要的说明存在代码中)
1 配置pom.xml,导入需要的jar包
<?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.lishun</groupId> <artifactId>mysql_master_salve</artifactId> <version>0.0.1-SNAPSHOT</version> <packaging>jar</packaging> <name>mysql_master_salve</name> <description>Demo project for Spring Boot</description> <parent> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-parent</artifactId> <version>1.5.10.RELEASE</version> <relativePath/> <!-- lookup parent from repository --> </parent> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding> <java.version>1.8</java.version> </properties> <dependencies> <dependency> <groupId>org.mybatis.spring.boot</groupId> <artifactId>mybatis-spring-boot-starter</artifactId> <version>1.3.1</version> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <scope>runtime</scope> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-test</artifactId> <scope>test</scope> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> <version>RELEASE</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>druid</artifactId> <version>1.0.18</version> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-aop</artifactId> </dependency> </dependencies> <build> <plugins> <plugin> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-maven-plugin</artifactId> </plugin> <plugin> <groupId>org.mybatis.generator</groupId> <artifactId>mybatis-generator-maven-plugin</artifactId> <version>1.3.2</version> <dependencies> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>5.1.43</version> </dependency> </dependencies> <configuration> <overwrite>true</overwrite> </configuration> </plugin> </plugins> </build> </project>
2 配置application.properties
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
|
server.port= 9022 #mybatis配置*mapper.xml文件和实体别名 mybatis.mapper-locations=classpath:mapper/*.xml mybatis.type-aliases- package =com.lishun.entity spring.datasource.driver- class -name=com.mysql.jdbc.Driver spring.datasource.password= 123456 spring.datasource.username=root #写节点 spring.datasource.master.url=jdbc:mysql: //192.168.203.135:3306/worldmap #两个个读节点(为了方便测试这里用的是同一个服务器数据库,生产环境应该不使用) spring.datasource.salve1.url=jdbc:mysql: //192.168.203.139:3306/worldmap spring.datasource.salve2.url=jdbc:mysql: //192.168.203.139:3306/worldmap # druid 连接池 Setting # 初始化大小,最小,最大 spring.datasource.type=com.alibaba.druid.pool.DruidDataSource spring.datasource.initialSize= 5 spring.datasource.minIdle= 5 spring.datasource.maxActive= 20 # 配置获取连接等待超时的时间 spring.datasource.maxWait= 60000 # 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 spring.datasource.timeBetweenEvictionRunsMillis= 60000 # 配置一个连接在池中最小生存的时间,单位是毫秒 spring.datasource.minEvictableIdleTimeMillis= 300000 spring.datasource.validationQuery=SELECT 1 FROM rscipc_sys_user spring.datasource.testWhileIdle= true spring.datasource.testOnBorrow= false spring.datasource.testOnReturn= false # 打开PSCache,并且指定每个连接上PSCache的大小 spring.datasource.poolPreparedStatements= true spring.datasource.maxPoolPreparedStatementPerConnectionSize= 20 # 配置监控统计拦截的filters,去掉后监控界面sql无法统计, \'wall\' 用于防火墙 spring.datasource.filters=stat,wall,log4j # 通过connectProperties属性来打开mergeSql功能;慢SQL记录 spring.datasource.connectionProperties=druid.stat.mergeSql= true ;druid.stat.slowSqlMillis= 5000 spring.datasource.logSlowSql= true #End |
3 启动类(注意:其他需要spring管理的bean(service,config等)必须放在该启动类的子包下,不然会扫描不到bean,导致注入失败)
1
2
3
4
5
6
7
|
@SpringBootApplication @MapperScan ( "com.lishun.mapper" ) //!!!!!! 注意:扫描所有mapper public class MysqlMasterSalveApplication { public static void main(String[] args) { SpringApplication.run(MysqlMasterSalveApplication. class , args); } } |
4 动态数据源 DynamicDataSource
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
/** * @author lishun * @Description:动态数据源, 继承AbstractRoutingDataSource * @date 2017/8/9 */ public class DynamicDataSource extends AbstractRoutingDataSource { public static final Logger log = LoggerFactory.getLogger(DynamicDataSource. class ); /** * 默认数据源 */ public static final String DEFAULT_DS = "read_ds" ; private static final ThreadLocal<String> contextHolder = new ThreadLocal<>(); public static void setDB(String dbType) { // 设置数据源名 log.info( "切换到{}数据源" , dbType); contextHolder.set(dbType); } public static void clearDB() { contextHolder.remove(); } // 清除数据源名 @Override protected Object determineCurrentLookupKey() { return contextHolder.get(); } } |
5 线程池配置数据源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
|
@Configuration public class DruidConfig { private Logger logger = LoggerFactory.getLogger(DruidConfig. class ); @Value ( "${spring.datasource.master.url}" ) private String masterUrl; @Value ( "${spring.datasource.salve1.url}" ) private String salve1Url; @Value ( "${spring.datasource.salve2.url}" ) private String salve2Url; @Value ( "${spring.datasource.username}" ) private String username; @Value ( "${spring.datasource.password}" ) private String password; @Value ( "${spring.datasource.driver-class-name}" ) private String driverClassName; @Value ( "${spring.datasource.initialSize}" ) private int initialSize; @Value ( "${spring.datasource.minIdle}" ) private int minIdle; @Value ( "${spring.datasource.maxActive}" ) private int maxActive; @Value ( "${spring.datasource.maxWait}" ) private int maxWait; @Value ( "${spring.datasource.timeBetweenEvictionRunsMillis}" ) private int timeBetweenEvictionRunsMillis; @Value ( "${spring.datasource.minEvictableIdleTimeMillis}" ) private int minEvictableIdleTimeMillis; @Value ( "${spring.datasource.validationQuery}" ) private String validationQuery; @Value ( "${spring.datasource.testWhileIdle}" ) private boolean testWhileIdle; @Value ( "${spring.datasource.testOnBorrow}" ) private boolean testOnBorrow; @Value ( "${spring.datasource.testOnReturn}" ) private boolean testOnReturn; @Value ( "${spring.datasource.filters}" ) private String filters; @Value ( "${spring.datasource.logSlowSql}" ) private String logSlowSql; @Bean public ServletRegistrationBean druidServlet() { logger.info( "init Druid Servlet Configuration " ); ServletRegistrationBean reg = new ServletRegistrationBean(); reg.setServlet( new StatViewServlet()); reg.addUrlMappings( "/druid/*" ); reg.addInitParameter( "loginUsername" , username); reg.addInitParameter( "loginPassword" , password); reg.addInitParameter( "logSlowSql" , logSlowSql); return reg; } @Bean public FilterRegistrationBean filterRegistrationBean() { FilterRegistrationBean filterRegistrationBean = new FilterRegistrationBean(); filterRegistrationBean.setFilter( new WebStatFilter()); filterRegistrationBean.addUrlPatterns( "/*" ); filterRegistrationBean.addInitParameter( "exclusions" , "*.js,*.gif,*.jpg,*.png,*.css,*.ico,/druid/*" ); filterRegistrationBean.addInitParameter( "profileEnable" , "true" ); return filterRegistrationBean; } @Bean public DataSource druidDataSource() { DruidDataSource datasource = new DruidDataSource(); datasource.setUrl(masterUrl); datasource.setUsername(username); datasource.setPassword(password); datasource.setDriverClassName(driverClassName); datasource.setInitialSize(initialSize); datasource.setMinIdle(minIdle); datasource.setMaxActive(maxActive); datasource.setMaxWait(maxWait); datasource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis); datasource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis); datasource.setValidationQuery(validationQuery); datasource.setTestWhileIdle(testWhileIdle); datasource.setTestOnBorrow(testOnBorrow); datasource.setTestOnReturn(testOnReturn); try { datasource.setFilters(filters); } catch (SQLException e) { logger.error( "druid configuration initialization filter" , e); } Map<Object, Object> dsMap = new HashMap(); dsMap.put( "read_ds_1" , druidDataSource_read1()); dsMap.put( "read_ds_2" , druidDataSource_read2()); dsMap.put( "write_ds" , datasource); DynamicDataSource dynamicDataSource = new DynamicDataSource(); dynamicDataSource.setTargetDataSources(dsMap); return dynamicDataSource; } public DataSource druidDataSource_read1() { DruidDataSource datasource = new DruidDataSource(); datasource.setUrl(salve1Url); datasource.setUsername(username); datasource.setPassword(password); datasource.setDriverClassName(driverClassName); datasource.setInitialSize(initialSize); datasource.setMinIdle(minIdle); datasource.setMaxActive(maxActive); datasource.setMaxWait(maxWait); datasource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis); datasource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis); datasource.setValidationQuery(validationQuery); datasource.setTestWhileIdle(testWhileIdle); datasource.setTestOnBorrow(testOnBorrow); datasource.setTestOnReturn(testOnReturn); try { datasource.setFilters(filters); } catch (SQLException e) { logger.error( "druid configuration initialization filter" , e); } return datasource; } public DataSource druidDataSource_read2() { DruidDataSource datasource = new DruidDataSource(); datasource.setUrl(salve2Url); datasource.setUsername(username); datasource.setPassword(password); datasource.setDriverClassName(driverClassName); datasource.setInitialSize(initialSize); datasource.setMinIdle(minIdle); datasource.setMaxActive(maxActive); datasource.setMaxWait(maxWait); datasource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis); datasource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis); datasource.setValidationQuery(validationQuery); datasource.setTestWhileIdle(testWhileIdle); datasource.setTestOnBorrow(testOnBorrow); datasource.setTestOnReturn(testOnReturn); try { datasource.setFilters(filters); } catch (SQLException e) { logger.error( "druid configuration initialization filter" , e); } return datasource; } } |
6 数据源注解:在service层通过数据源注解来指定数据源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|