MySQL集群方案

MySQL集群方案

概述

MySQL集群是为了解决单机数据库在高并发、大数据量场景下的性能瓶颈和可用性问题。主要包括主从复制、读写分离、分库分表等方案,每种方案都有其适用场景和实现复杂度。

主从复制

复制原理

基于Binlog的异步复制

sql
-- 主库配置
[mysqld]
server-id = 1
log-bin = mysql-bin
binlog-format = ROW
binlog-do-db = myapp

-- 从库配置  
[mysqld]
server-id = 2
relay-log = relay-bin
read-only = 1
-- 主库配置
[mysqld]
server-id = 1
log-bin = mysql-bin
binlog-format = ROW
binlog-do-db = myapp

-- 从库配置  
[mysqld]
server-id = 2
relay-log = relay-bin
read-only = 1

复制流程

java
// MySQL主从复制流程示意
public class MasterSlaveReplication {
    
    public void replicationProcess() {
        // 1. 主库写入Binlog
        master.writeBinlog(sqlStatement);
        
        // 2. 从库IO线程读取Binlog
        slave.ioThread.readBinlogFromMaster();
        
        // 3. 从库IO线程写入Relay Log
        slave.ioThread.writeRelayLog(binlogEvent);
        
        // 4. 从库SQL线程读取Relay Log
        slave.sqlThread.readRelayLog();
        
        // 5. 从库SQL线程执行SQL语句
        slave.sqlThread.executeSql(sqlStatement);
    }
}
// MySQL主从复制流程示意
public class MasterSlaveReplication {
    
    public void replicationProcess() {
        // 1. 主库写入Binlog
        master.writeBinlog(sqlStatement);
        
        // 2. 从库IO线程读取Binlog
        slave.ioThread.readBinlogFromMaster();
        
        // 3. 从库IO线程写入Relay Log
        slave.ioThread.writeRelayLog(binlogEvent);
        
        // 4. 从库SQL线程读取Relay Log
        slave.sqlThread.readRelayLog();
        
        // 5. 从库SQL线程执行SQL语句
        slave.sqlThread.executeSql(sqlStatement);
    }
}

配置主从复制

sql
-- 1. 主库创建复制用户
CREATE USER 'repl'@'%' IDENTIFIED BY 'password';
GRANT REPLICATION SLAVE ON *.* TO 'repl'@'%';
FLUSH PRIVILEGES;

-- 2. 获取主库状态
SHOW MASTER STATUS;
-- 记录File和Position

-- 3. 从库配置复制
CHANGE MASTER TO
    MASTER_HOST='192.168.1.100',
    MASTER_USER='repl',
    MASTER_PASSWORD='password',
    MASTER_LOG_FILE='mysql-bin.000001',
    MASTER_LOG_POS=154;

-- 4. 启动从库复制
START SLAVE;

-- 5. 检查复制状态
SHOW SLAVE STATUS\G
-- 1. 主库创建复制用户
CREATE USER 'repl'@'%' IDENTIFIED BY 'password';
GRANT REPLICATION SLAVE ON *.* TO 'repl'@'%';
FLUSH PRIVILEGES;

-- 2. 获取主库状态
SHOW MASTER STATUS;
-- 记录File和Position

-- 3. 从库配置复制
CHANGE MASTER TO
    MASTER_HOST='192.168.1.100',
    MASTER_USER='repl',
    MASTER_PASSWORD='password',
    MASTER_LOG_FILE='mysql-bin.000001',
    MASTER_LOG_POS=154;

-- 4. 启动从库复制
START SLAVE;

-- 5. 检查复制状态
SHOW SLAVE STATUS\G

复制模式

异步复制(默认)

sql
-- 主库不等待从库确认,性能最好但可能丢失数据
-- 适用于对一致性要求不高的场景
-- 主库不等待从库确认,性能最好但可能丢失数据
-- 适用于对一致性要求不高的场景

半同步复制

sql
-- 安装半同步复制插件
INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master.so';
INSTALL PLUGIN rpl_semi_sync_slave SONAME 'semisync_slave.so';

-- 主库启用半同步复制
SET GLOBAL rpl_semi_sync_master_enabled = 1;
SET GLOBAL rpl_semi_sync_master_timeout = 1000;

-- 从库启用半同步复制
SET GLOBAL rpl_semi_sync_slave_enabled = 1;
-- 安装半同步复制插件
INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master.so';
INSTALL PLUGIN rpl_semi_sync_slave SONAME 'semisync_slave.so';

-- 主库启用半同步复制
SET GLOBAL rpl_semi_sync_master_enabled = 1;
SET GLOBAL rpl_semi_sync_master_timeout = 1000;

-- 从库启用半同步复制
SET GLOBAL rpl_semi_sync_slave_enabled = 1;

并行复制

sql
-- MySQL 5.7+支持并行复制
SET GLOBAL slave_parallel_type = 'LOGICAL_CLOCK';
SET GLOBAL slave_parallel_workers = 4;
SET GLOBAL slave_preserve_commit_order = 1;
-- MySQL 5.7+支持并行复制
SET GLOBAL slave_parallel_type = 'LOGICAL_CLOCK';
SET GLOBAL slave_parallel_workers = 4;
SET GLOBAL slave_preserve_commit_order = 1;

读写分离

应用层读写分离

java
@Configuration
public class DataSourceConfig {
    
    @Bean
    @Primary
    public DataSource masterDataSource() {
        HikariDataSource dataSource = new HikariDataSource();
        dataSource.setJdbcUrl("jdbc:mysql://master:3306/myapp");
        dataSource.setUsername("app_user");
        dataSource.setPassword("password");
        return dataSource;
    }
    
    @Bean
    public DataSource slaveDataSource() {
        HikariDataSource dataSource = new HikariDataSource();
        dataSource.setJdbcUrl("jdbc:mysql://slave:3306/myapp");
        dataSource.setUsername("app_user");
        dataSource.setPassword("password");
        return dataSource;
    }
    
    @Bean
    public DataSource routingDataSource() {
        RoutingDataSource routingDataSource = new RoutingDataSource();
        Map<Object, Object> dataSourceMap = new HashMap<>();
        dataSourceMap.put("master", masterDataSource());
        dataSourceMap.put("slave", slaveDataSource());
        
        routingDataSource.setTargetDataSources(dataSourceMap);
        routingDataSource.setDefaultTargetDataSource(masterDataSource());
        return routingDataSource;
    }
}

// 动态数据源路由
public class RoutingDataSource extends AbstractRoutingDataSource {
    
    @Override
    protected Object determineCurrentLookupKey() {
        return DataSourceContextHolder.getDataSourceType();
    }
}

// 数据源上下文
public class DataSourceContextHolder {
    private static final ThreadLocal<String> contextHolder = new ThreadLocal<>();
    
    public static void setDataSourceType(String dataSourceType) {
        contextHolder.set(dataSourceType);
    }
    
    public static String getDataSourceType() {
        return contextHolder.get();
    }
    
    public static void clearDataSourceType() {
        contextHolder.remove();
    }
}

// 服务层使用
@Service
public class UserService {
    
    @Autowired
    private UserRepository userRepository;
    
    // 写操作使用主库
    @Transactional
    public void createUser(User user) {
        DataSourceContextHolder.setDataSourceType("master");
        try {
            userRepository.save(user);
        } finally {
            DataSourceContextHolder.clearDataSourceType();
        }
    }
    
    // 读操作使用从库
    @Transactional(readOnly = true)
    public User getUser(Long id) {
        DataSourceContextHolder.setDataSourceType("slave");
        try {
            return userRepository.findById(id);
        } finally {
            DataSourceContextHolder.clearDataSourceType();
        }
    }
}
@Configuration
public class DataSourceConfig {
    
    @Bean
    @Primary
    public DataSource masterDataSource() {
        HikariDataSource dataSource = new HikariDataSource();
        dataSource.setJdbcUrl("jdbc:mysql://master:3306/myapp");
        dataSource.setUsername("app_user");
        dataSource.setPassword("password");
        return dataSource;
    }
    
    @Bean
    public DataSource slaveDataSource() {
        HikariDataSource dataSource = new HikariDataSource();
        dataSource.setJdbcUrl("jdbc:mysql://slave:3306/myapp");
        dataSource.setUsername("app_user");
        dataSource.setPassword("password");
        return dataSource;
    }
    
    @Bean
    public DataSource routingDataSource() {
        RoutingDataSource routingDataSource = new RoutingDataSource();
        Map<Object, Object> dataSourceMap = new HashMap<>();
        dataSourceMap.put("master", masterDataSource());
        dataSourceMap.put("slave", slaveDataSource());
        
        routingDataSource.setTargetDataSources(dataSourceMap);
        routingDataSource.setDefaultTargetDataSource(masterDataSource());
        return routingDataSource;
    }
}

// 动态数据源路由
public class RoutingDataSource extends AbstractRoutingDataSource {
    
    @Override
    protected Object determineCurrentLookupKey() {
        return DataSourceContextHolder.getDataSourceType();
    }
}

// 数据源上下文
public class DataSourceContextHolder {
    private static final ThreadLocal<String> contextHolder = new ThreadLocal<>();
    
    public static void setDataSourceType(String dataSourceType) {
        contextHolder.set(dataSourceType);
    }
    
    public static String getDataSourceType() {
        return contextHolder.get();
    }
    
    public static void clearDataSourceType() {
        contextHolder.remove();
    }
}

// 服务层使用
@Service
public class UserService {
    
    @Autowired
    private UserRepository userRepository;
    
    // 写操作使用主库
    @Transactional
    public void createUser(User user) {
        DataSourceContextHolder.setDataSourceType("master");
        try {
            userRepository.save(user);
        } finally {
            DataSourceContextHolder.clearDataSourceType();
        }
    }
    
    // 读操作使用从库
    @Transactional(readOnly = true)
    public User getUser(Long id) {
        DataSourceContextHolder.setDataSourceType("slave");
        try {
            return userRepository.findById(id);
        } finally {
            DataSourceContextHolder.clearDataSourceType();
        }
    }
}

中间件读写分离

yaml
# MyCAT配置示例
schema:
  name: myapp
  checkSQLschema: false
  sqlMaxLimit: 100
  dataNode: dn1

dataNode:
  name: dn1
  dataHost: dh1
  database: myapp

dataHost:
  name: dh1
  maxCon: 1000
  minCon: 10
  balance: 1  # 读写分离策略
  writeType: 0
  dbType: mysql
  dbDriver: native
  writeHost:
    host: master
    url: 192.168.1.100:3306
    user: mycat
    password: password
  readHost:
    host: slave1
    url: 192.168.1.101:3306
    user: mycat
    password: password
# MyCAT配置示例
schema:
  name: myapp
  checkSQLschema: false
  sqlMaxLimit: 100
  dataNode: dn1

dataNode:
  name: dn1
  dataHost: dh1
  database: myapp

dataHost:
  name: dh1
  maxCon: 1000
  minCon: 10
  balance: 1  # 读写分离策略
  writeType: 0
  dbType: mysql
  dbDriver: native
  writeHost:
    host: master
    url: 192.168.1.100:3306
    user: mycat
    password: password
  readHost:
    host: slave1
    url: 192.168.1.101:3306
    user: mycat
    password: password

分库分表

垂直拆分

垂直分库

sql
-- 按业务模块拆分数据库
-- 用户数据库
CREATE DATABASE user_db;
USE user_db;
CREATE TABLE users (...);
CREATE TABLE user_profiles (...);

-- 订单数据库
CREATE DATABASE order_db;  
USE order_db;
CREATE TABLE orders (...);
CREATE TABLE order_items (...);

-- 商品数据库
CREATE DATABASE product_db;
USE product_db;
CREATE TABLE products (...);
CREATE TABLE categories (...);
-- 按业务模块拆分数据库
-- 用户数据库
CREATE DATABASE user_db;
USE user_db;
CREATE TABLE users (...);
CREATE TABLE user_profiles (...);

-- 订单数据库
CREATE DATABASE order_db;  
USE order_db;
CREATE TABLE orders (...);
CREATE TABLE order_items (...);

-- 商品数据库
CREATE DATABASE product_db;
USE product_db;
CREATE TABLE products (...);
CREATE TABLE categories (...);

垂直分表

sql
-- 将大表按字段拆分
-- 用户基本信息表
CREATE TABLE users (
    id BIGINT PRIMARY KEY,
    username VARCHAR(50),
    email VARCHAR(100),
    created_at TIMESTAMP
);

-- 用户扩展信息表
CREATE TABLE user_profiles (
    user_id BIGINT PRIMARY KEY,
    avatar VARCHAR(255),
    bio TEXT,
    preferences JSON,
    FOREIGN KEY (user_id) REFERENCES users(id)
);
-- 将大表按字段拆分
-- 用户基本信息表
CREATE TABLE users (
    id BIGINT PRIMARY KEY,
    username VARCHAR(50),
    email VARCHAR(100),
    created_at TIMESTAMP
);

-- 用户扩展信息表
CREATE TABLE user_profiles (
    user_id BIGINT PRIMARY KEY,
    avatar VARCHAR(255),
    bio TEXT,
    preferences JSON,
    FOREIGN KEY (user_id) REFERENCES users(id)
);

水平拆分

水平分库

java
// 分库策略
@Component
public class DatabaseShardingStrategy {
    
    private static final int DB_COUNT = 4;
    
    public String determineDatabase(Long userId) {
        int shardIndex = (int) (userId % DB_COUNT);
        return "user_db_" + shardIndex;
    }
}

// 分库配置
@Configuration
public class ShardingDataSourceConfig {
    
    @Bean
    public DataSource shardingDataSource() {
        Map<String, DataSource> dataSourceMap = new HashMap<>();
        
        for (int i = 0; i < 4; i++) {
            HikariDataSource dataSource = new HikariDataSource();
            dataSource.setJdbcUrl("jdbc:mysql://db" + i + ":3306/user_db_" + i);
            dataSource.setUsername("app_user");
            dataSource.setPassword("password");
            dataSourceMap.put("user_db_" + i, dataSource);
        }
        
        // 使用ShardingSphere配置分库分表规则
        return ShardingSphereDataSourceFactory.createDataSource(dataSourceMap, rules, props);
    }
}
// 分库策略
@Component
public class DatabaseShardingStrategy {
    
    private static final int DB_COUNT = 4;
    
    public String determineDatabase(Long userId) {
        int shardIndex = (int) (userId % DB_COUNT);
        return "user_db_" + shardIndex;
    }
}

// 分库配置
@Configuration
public class ShardingDataSourceConfig {
    
    @Bean
    public DataSource shardingDataSource() {
        Map<String, DataSource> dataSourceMap = new HashMap<>();
        
        for (int i = 0; i < 4; i++) {
            HikariDataSource dataSource = new HikariDataSource();
            dataSource.setJdbcUrl("jdbc:mysql://db" + i + ":3306/user_db_" + i);
            dataSource.setUsername("app_user");
            dataSource.setPassword("password");
            dataSourceMap.put("user_db_" + i, dataSource);
        }
        
        // 使用ShardingSphere配置分库分表规则
        return ShardingSphereDataSourceFactory.createDataSource(dataSourceMap, rules, props);
    }
}

水平分表

sql
-- 按时间分表
CREATE TABLE orders_202301 (
    id BIGINT PRIMARY KEY,
    user_id BIGINT,
    order_date DATE,
    amount DECIMAL(10,2)
) PARTITION BY RANGE (TO_DAYS(order_date)) (
    PARTITION p202301 VALUES LESS THAN (TO_DAYS('2023-02-01')),
    PARTITION p202302 VALUES LESS THAN (TO_DAYS('2023-03-01')),
    PARTITION p202303 VALUES LESS THAN (TO_DAYS('2023-04-01'))
);

-- 按用户ID分表
CREATE TABLE user_orders_0 LIKE user_orders;
CREATE TABLE user_orders_1 LIKE user_orders;
CREATE TABLE user_orders_2 LIKE user_orders;
CREATE TABLE user_orders_3 LIKE user_orders;
-- 按时间分表
CREATE TABLE orders_202301 (
    id BIGINT PRIMARY KEY,
    user_id BIGINT,
    order_date DATE,
    amount DECIMAL(10,2)
) PARTITION BY RANGE (TO_DAYS(order_date)) (
    PARTITION p202301 VALUES LESS THAN (TO_DAYS('2023-02-01')),
    PARTITION p202302 VALUES LESS THAN (TO_DAYS('2023-03-01')),
    PARTITION p202303 VALUES LESS THAN (TO_DAYS('2023-04-01'))
);

-- 按用户ID分表
CREATE TABLE user_orders_0 LIKE user_orders;
CREATE TABLE user_orders_1 LIKE user_orders;
CREATE TABLE user_orders_2 LIKE user_orders;
CREATE TABLE user_orders_3 LIKE user_orders;

分片策略

范围分片

java
// 按ID范围分片
public class RangeShardingStrategy {
    
    public String determineTable(Long id) {
        if (id <= 1000000) {
            return "users_0";
        } else if (id <= 2000000) {
            return "users_1";
        } else {
            return "users_2";
        }
    }
}
// 按ID范围分片
public class RangeShardingStrategy {
    
    public String determineTable(Long id) {
        if (id <= 1000000) {
            return "users_0";
        } else if (id <= 2000000) {
            return "users_1";
        } else {
            return "users_2";
        }
    }
}

哈希分片

java
// 按哈希值分片
public class HashShardingStrategy {
    
    private static final int TABLE_COUNT = 4;
    
    public String determineTable(Long id) {
        int shardIndex = (int) (id % TABLE_COUNT);
        return "users_" + shardIndex;
    }
}
// 按哈希值分片
public class HashShardingStrategy {
    
    private static final int TABLE_COUNT = 4;
    
    public String determineTable(Long id) {
        int shardIndex = (int) (id % TABLE_COUNT);
        return "users_" + shardIndex;
    }
}

一致性哈希

java
// 一致性哈希分片
public class ConsistentHashSharding {
    
    private final TreeMap<Long, String> ring = new TreeMap<>();
    private final int virtualNodes = 150;
    
    public void addNode(String node) {
        for (int i = 0; i < virtualNodes; i++) {
            long hash = hash(node + ":" + i);
            ring.put(hash, node);
        }
    }
    
    public String getNode(String key) {
        long hash = hash(key);
        Map.Entry<Long, String> entry = ring.ceilingEntry(hash);
        if (entry == null) {
            entry = ring.firstEntry();
        }
        return entry.getValue();
    }
}
// 一致性哈希分片
public class ConsistentHashSharding {
    
    private final TreeMap<Long, String> ring = new TreeMap<>();
    private final int virtualNodes = 150;
    
    public void addNode(String node) {
        for (int i = 0; i < virtualNodes; i++) {
            long hash = hash(node + ":" + i);
            ring.put(hash, node);
        }
    }
    
    public String getNode(String key) {
        long hash = hash(key);
        Map.Entry<Long, String> entry = ring.ceilingEntry(hash);
        if (entry == null) {
            entry = ring.firstEntry();
        }
        return entry.getValue();
    }
}

高可用方案

MySQL Group Replication

sql
-- 配置Group Replication
[mysqld]
server-id = 1
gtid-mode = ON
enforce-gtid-consistency = ON
binlog-checksum = NONE
log-slave-updates = ON
log-bin = binlog
binlog-format = ROW

plugin-load-add = group_replication.so
group_replication_group_name = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
group_replication_start_on_boot = OFF
group_replication_local_address = "192.168.1.100:33061"
group_replication_group_seeds = "192.168.1.100:33061,192.168.1.101:33061,192.168.1.102:33061"

-- 启动Group Replication
SET GLOBAL group_replication_bootstrap_group = ON;
START GROUP_REPLICATION;
SET GLOBAL group_replication_bootstrap_group = OFF;
-- 配置Group Replication
[mysqld]
server-id = 1
gtid-mode = ON
enforce-gtid-consistency = ON
binlog-checksum = NONE
log-slave-updates = ON
log-bin = binlog
binlog-format = ROW

plugin-load-add = group_replication.so
group_replication_group_name = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
group_replication_start_on_boot = OFF
group_replication_local_address = "192.168.1.100:33061"
group_replication_group_seeds = "192.168.1.100:33061,192.168.1.101:33061,192.168.1.102:33061"

-- 启动Group Replication
SET GLOBAL group_replication_bootstrap_group = ON;
START GROUP_REPLICATION;
SET GLOBAL group_replication_bootstrap_group = OFF;

MySQL InnoDB Cluster

javascript
// MySQL Shell配置InnoDB Cluster
// 1. 创建集群
var cluster = dba.createCluster('myCluster');

// 2. 添加实例
cluster.addInstance('[email protected]:3306');
cluster.addInstance('[email protected]:3306');

// 3. 检查集群状态
cluster.status();

// 4. 配置MySQL Router
mysqlrouter --bootstrap root@192.168.1.100:3306 --user=mysqlrouter
// MySQL Shell配置InnoDB Cluster
// 1. 创建集群
var cluster = dba.createCluster('myCluster');

// 2. 添加实例
cluster.addInstance('[email protected]:3306');
cluster.addInstance('[email protected]:3306');

// 3. 检查集群状态
cluster.status();

// 4. 配置MySQL Router
mysqlrouter --bootstrap root@192.168.1.100:3306 --user=mysqlrouter

故障切换

java
// 自动故障切换实现
@Component
public class FailoverManager {
    
    private final List<DataSource> dataSources;
    private volatile DataSource currentDataSource;
    
    @Scheduled(fixedDelay = 5000)
    public void healthCheck() {
        for (DataSource dataSource : dataSources) {
            if (isHealthy(dataSource)) {
                if (currentDataSource != dataSource) {
                    switchDataSource(dataSource);
                }
                return;
            }
        }
        
        // 所有数据源都不可用
        handleAllDataSourcesDown();
    }
    
    private boolean isHealthy(DataSource dataSource) {
        try (Connection conn = dataSource.getConnection()) {
            return conn.isValid(3);
        } catch (SQLException e) {
            return false;
        }
    }
    
    private void switchDataSource(DataSource newDataSource) {
        log.info("Switching to new data source: {}", newDataSource);
        currentDataSource = newDataSource;
        // 通知应用层数据源已切换
        publishDataSourceSwitchEvent(newDataSource);
    }
}
// 自动故障切换实现
@Component
public class FailoverManager {
    
    private final List<DataSource> dataSources;
    private volatile DataSource currentDataSource;
    
    @Scheduled(fixedDelay = 5000)
    public void healthCheck() {
        for (DataSource dataSource : dataSources) {
            if (isHealthy(dataSource)) {
                if (currentDataSource != dataSource) {
                    switchDataSource(dataSource);
                }
                return;
            }
        }
        
        // 所有数据源都不可用
        handleAllDataSourcesDown();
    }
    
    private boolean isHealthy(DataSource dataSource) {
        try (Connection conn = dataSource.getConnection()) {
            return conn.isValid(3);
        } catch (SQLException e) {
            return false;
        }
    }
    
    private void switchDataSource(DataSource newDataSource) {
        log.info("Switching to new data source: {}", newDataSource);
        currentDataSource = newDataSource;
        // 通知应用层数据源已切换
        publishDataSourceSwitchEvent(newDataSource);
    }
}

性能优化

连接池优化

java
// HikariCP配置优化
@Configuration
public class DataSourceOptimization {
    
    @Bean
    public DataSource optimizedDataSource() {
        HikariConfig config = new HikariConfig();
        config.setJdbcUrl("jdbc:mysql://localhost:3306/myapp");
        config.setUsername("app_user");
        config.setPassword("password");
        
        // 连接池大小
        config.setMaximumPoolSize(20);
        config.setMinimumIdle(5);
        
        // 连接超时
        config.setConnectionTimeout(30000);
        config.setIdleTimeout(600000);
        config.setMaxLifetime(1800000);
        
        // 连接测试
        config.setConnectionTestQuery("SELECT 1");
        config.setValidationTimeout(5000);
        
        return new HikariDataSource(config);
    }
}
// HikariCP配置优化
@Configuration
public class DataSourceOptimization {
    
    @Bean
    public DataSource optimizedDataSource() {
        HikariConfig config = new HikariConfig();
        config.setJdbcUrl("jdbc:mysql://localhost:3306/myapp");
        config.setUsername("app_user");
        config.setPassword("password");
        
        // 连接池大小
        config.setMaximumPoolSize(20);
        config.setMinimumIdle(5);
        
        // 连接超时
        config.setConnectionTimeout(30000);
        config.setIdleTimeout(600000);
        config.setMaxLifetime(1800000);
        
        // 连接测试
        config.setConnectionTestQuery("SELECT 1");
        config.setValidationTimeout(5000);
        
        return new HikariDataSource(config);
    }
}

查询优化

sql
-- 分片查询优化
-- 避免跨分片查询
SELECT * FROM users WHERE user_id = 12345;  -- 单分片查询

-- 避免全表扫描
SELECT * FROM orders WHERE order_date BETWEEN '2023-01-01' AND '2023-01-31'
AND shard_key = 'value';  -- 带分片键的查询

-- 使用分片键进行JOIN
SELECT u.*, o.* 
FROM users u 
JOIN orders o ON u.user_id = o.user_id  -- 相同分片键
WHERE u.user_id = 12345;
-- 分片查询优化
-- 避免跨分片查询
SELECT * FROM users WHERE user_id = 12345;  -- 单分片查询

-- 避免全表扫描
SELECT * FROM orders WHERE order_date BETWEEN '2023-01-01' AND '2023-01-31'
AND shard_key = 'value';  -- 带分片键的查询

-- 使用分片键进行JOIN
SELECT u.*, o.* 
FROM users u 
JOIN orders o ON u.user_id = o.user_id  -- 相同分片键
WHERE u.user_id = 12345;

监控和运维

复制监控

sql
-- 监控复制延迟
SHOW SLAVE STATUS\G

-- 关键指标
Seconds_Behind_Master    -- 复制延迟(秒)
Slave_IO_Running        -- IO线程状态
Slave_SQL_Running       -- SQL线程状态
Last_Error              -- 最后错误信息

-- 监控复制位点
SHOW MASTER STATUS;     -- 主库位点
SHOW SLAVE STATUS\G     -- 从库位点
-- 监控复制延迟
SHOW SLAVE STATUS\G

-- 关键指标
Seconds_Behind_Master    -- 复制延迟(秒)
Slave_IO_Running        -- IO线程状态
Slave_SQL_Running       -- SQL线程状态
Last_Error              -- 最后错误信息

-- 监控复制位点
SHOW MASTER STATUS;     -- 主库位点
SHOW SLAVE STATUS\G     -- 从库位点

性能监控

java
// 自定义监控指标
@Component
public class DatabaseMetrics {
    
    private final MeterRegistry meterRegistry;
    
    @EventListener
    public void handleQueryEvent(QueryExecutionEvent event) {
        Timer.Sample sample = Timer.start(meterRegistry);
        sample.stop(Timer.builder("database.query.time")
            .tag("database", event.getDatabase())
            .tag("table", event.getTable())
            .register(meterRegistry));
            
        meterRegistry.counter("database.query.count",
            "database", event.getDatabase(),
            "status", event.isSuccess() ? "success" : "error")
            .increment();
    }
}
// 自定义监控指标
@Component
public class DatabaseMetrics {
    
    private final MeterRegistry meterRegistry;
    
    @EventListener
    public void handleQueryEvent(QueryExecutionEvent event) {
        Timer.Sample sample = Timer.start(meterRegistry);
        sample.stop(Timer.builder("database.query.time")
            .tag("database", event.getDatabase())
            .tag("table", event.getTable())
            .register(meterRegistry));
            
        meterRegistry.counter("database.query.count",
            "database", event.getDatabase(),
            "status", event.isSuccess() ? "success" : "error")
            .increment();
    }
}

最佳实践

  1. 架构设计:根据业务特点选择合适的集群方案
  2. 分片策略:选择合适的分片键,避免热点数据
  3. 数据一致性:在性能和一致性之间找到平衡
  4. 监控告警:建立完善的监控体系
  5. 容量规划:提前规划容量,避免紧急扩容
  6. 备份策略:制定完善的备份和恢复策略
  7. 故障演练:定期进行故障演练,验证高可用方案

总结

MySQL集群方案需要根据业务需求、数据量、并发量等因素综合考虑。从简单的主从复制到复杂的分库分表,每种方案都有其适用场景。在实际应用中,往往需要多种方案的组合使用,以达到最佳的性能和可用性效果。