master
dengwendi 2023-11-15 15:02:14 +08:00
commit 7a23a8660b
12 changed files with 1618 additions and 0 deletions

847
Makefile Normal file
View File

@ -0,0 +1,847 @@
BUILD_DIR = $(shell pwd)/build
CN_CONF = $(BUILD_DIR)/run/polardbx-sql/conf/server.properties
DN_BASE_DIR = $(BUILD_DIR)/run/polardbx-engine
DN_DATA_DIR = $(DN_BASE_DIR)/data
DN_CONF = $(DN_DATA_DIR)/my.cnf
CDC_CONF = $(BUILD_DIR)/run/polardbx-cdc/polardbx-binlog.standalone/conf/config.properties
CN_STARTUP = $(BUILD_DIR)/run/polardbx-sql/bin/startup.sh
CDC_STARTUP = $(BUILD_DIR)/run/polardbx-cdc/polardbx-binlog.standalone/bin/start.sh
UNAME_S = $(shell uname -s)
OS = $(shell lsb_release -si)
V = $(shell lsb_release -r | awk '{print $$2}'|awk -F"." '{print $$1}')
CPU_CORES = $(shell cat /proc/cpuinfo | grep processor| wc -l)
export CFLAGS := -O3 -g -fexceptions -static-libgcc -fno-omit-frame-pointer -fno-strict-aliasing
export CXXFLAGS := -O3 -g -fexceptions -static-libgcc -fno-omit-frame-pointer -fno-strict-aliasing
.PHONY: polardb-x
polardb-x: gms dn cn cdc configs
cd $(BUILD_DIR)/run ; \
if [ -d "bin" ]; then \
rm -rf bin; \
fi; \
mkdir bin; \
echo "$$START_SCRIPT" > bin/polardb-x.sh; \
chmod +x bin/polardb-x.sh
chmod +x $(BUILD_DIR)/run/polardbx-sql/bin/startup.sh
chmod +x $(BUILD_DIR)/run/polardbx-cdc/polardbx-binlog.standalone/bin/daemon.sh
.PHONY: gms
gms: sources deps
. /etc/profile; \
cd $(BUILD_DIR)/polardbx-engine; \
cmake . \
-DFORCE_INSOURCE_BUILD=ON \
-DSYSCONFDIR:PATH="$(BUILD_DIR)/run/polardbx-engine/u01/mysql" \
-DCMAKE_INSTALL_PREFIX:PATH="$(BUILD_DIR)/run/polardbx-engine/u01/mysql" \
-DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo \
-DWITH_NORMANDY_CLUSTER=ON \
-DWITH_7U:BOOL=OFF \
-DWITH_PROTOBUF:STRING=bundled \
-DINSTALL_LAYOUT=STANDALONE \
-DMYSQL_MAINTAINER_MODE=0 \
-DWITH_EMBEDDED_SERVER=0 \
-DWITH_SSL=openssl \
-DWITH_ZLIB=bundled \
-DWITH_MYISAM_STORAGE_ENGINE=1 \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_PARTITION_STORAGE_ENGINE=1 \
-DWITH_CSV_STORAGE_ENGINE=1 \
-DWITH_ARCHIVE_STORAGE_ENGINE=1 \
-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
-DWITH_FEDERATED_STORAGE_ENGINE=1 \
-DWITH_PERFSCHEMA_STORAGE_ENGINE=1 \
-DWITH_EXAMPLE_STORAGE_ENGINE=0 \
-DWITH_TEMPTABLE_STORAGE_ENGINE=1 \
-DWITH_XENGINE_STORAGE_ENGINE=0 \
-DUSE_CTAGS=0 \
-DWITH_EXTRA_CHARSETS=all \
-DWITH_DEBUG=0 \
-DENABLE_DEBUG_SYNC=0 \
-DENABLE_DTRACE=0 \
-DENABLED_PROFILING=1 \
-DENABLED_LOCAL_INFILE=1 \
-DWITH_BOOST="extra/boost/boost_1_70_0.tar.gz"; \
make -j $(CPU_CORES) && make install
rm -rf $(BUILD_DIR)/run/polardbx-engine/u01/mysql/mysql-test
.PHONY: dn
dn: gms
.PHONY: cdc
cdc: sources deps cn
. /etc/profile; \
cd $(BUILD_DIR)/polardbx-cdc; \
mvn -U clean install -Dmaven.test.skip=true -DfailIfNoTests=false -e -P release; \
mkdir $(BUILD_DIR)/run/polardbx-cdc; \
cp polardbx-cdc-assemble/target/polardbx-binlog.tar.gz $(BUILD_DIR)/run/polardbx-cdc/; \
cd $(BUILD_DIR)/run/polardbx-cdc/; \
tar xzvf polardbx-binlog.tar.gz; \
rm -f polardbx-binlog.tar.gz
.PHONY: cn
cn: sources deps
. /etc/profile; \
cd $(BUILD_DIR)/polardbx-sql; \
mvn install -DskipTests -D env=release; \
mkdir $(BUILD_DIR)/run/polardbx-sql; \
cp target/polardbx-server-*.tar.gz $(BUILD_DIR)/run/polardbx-sql/; \
cd $(BUILD_DIR)/run/polardbx-sql; \
tar xzvf polardbx-server-*.tar.gz; \
rm -f xzvf polardbx-server-*.tar.gz
.PHONY: configs
configs: gms dn cdc cn
# config gms & dn
mkdir -p $(DN_DATA_DIR)
echo "$$MY_CNF" > $(DN_CONF)
mkdir -p $(DN_DATA_DIR)/data
mkdir -p $(DN_DATA_DIR)/log
mkdir -p $(DN_DATA_DIR)/run
mkdir -p $(DN_DATA_DIR)/tmp
mkdir -p $(DN_DATA_DIR)/mysql
# start gms
if [ -e "$(DN_DATA_DIR)/data/auto.cnf" ]; then \
echo "gms root account already initialized."; \
else \
$(BUILD_DIR)/run/polardbx-engine/u01/mysql/bin/mysqld --defaults-file=$(DN_CONF) --initialize-insecure; \
fi ; \
$(BUILD_DIR)/run/polardbx-engine/u01/mysql/bin/mysqld --defaults-file=$(DN_CONF) -D
# config cn
awk -F"=" '/^serverPort/{$$2="=8527";print;next}1' $(CN_CONF) > tmp && mv tmp $(CN_CONF)
awk -F"=" '/^metaDbAddr/{$$2="=127.0.0.1:4886";print;next}1' $(CN_CONF) > tmp && mv tmp $(CN_CONF)
awk -F"=" '/^metaDbXprotoPort/{$$2="=34886";print;next}1' $(CN_CONF) > tmp && mv tmp $(CN_CONF)
awk -F"=" '/^galaxyXProtocol/{$$2="=2";print;next}1' $(CN_CONF) > tmp && mv tmp $(CN_CONF)
cd $(BUILD_DIR)/run/polardbx-sql/; \
META=`bin/startup.sh -I -P asdf1234ghjk5678 -d 127.0.0.1:4886:34886 -u polardbx_root -S "123456" 2>&1`; \
echo "meta: $${META}"; \
echo "$${META}" | grep "metaDbPass" >> meta.tmp; \
META_DB_PASS=`cat meta.tmp | grep "metaDbPass"`; \
echo "metadb password: $${META_DB_PASS}"; \
ps aux|grep "$(BUILD_DIR)/run/polardbx-engine/u01/mysql/bin/mysqld" | grep -v "grep" | awk '{print $$2}' |xargs kill -9; \
if [ "" = "$${META_DB_PASS}" ]; then \
echo "meta db init failed."; \
exit 1; \
fi; \
cat meta.tmp >> $(CN_CONF)
# config cdc
cd $(BUILD_DIR)/run/polardbx-sql/; \
META_DB_PASS=`cat meta.tmp | awk -F"=" '{print $$2}'`; \
awk -F"=" '/^useEncryptedPassword/{$$2="=true";print;next}1' $(CDC_CONF) > tmp && mv tmp $(CDC_CONF); \
awk -F"=" '/^polardbx.instance.id/{$$2="=polardbx-polardbx";print;next}1' $(CDC_CONF) > tmp && mv tmp $(CDC_CONF); \
awk -F"=" '/^metaDb_url/{$$2="=jdbc:mysql://127.0.0.1:4886/polardbx_meta_db_polardbx?useSSL=false";print;next}1' $(CDC_CONF) > tmp && mv tmp $(CDC_CONF); \
awk -F"=" '/^metaDb_username/{$$2="=my_polarx";print;next}1' $(CDC_CONF) > tmp && mv tmp $(CDC_CONF); \
sed 's/metaDb_password.*//g' $(CDC_CONF) > tmp && mv tmp $(CDC_CONF); \
cat meta.tmp >> $(CDC_CONF); \
sed 's/metaDbPasswd/metaDb_password/g' $(CDC_CONF) > tmp && mv tmp $(CDC_CONF); \
awk -F"=" '/^polarx_url/{$$2="=jdbc:mysql://127.0.0.1:8527/__cdc__";print;next}1' $(CDC_CONF) > tmp && mv tmp $(CDC_CONF); \
awk -F"=" '/^polarx_username/{$$2="=polardbx_root";print;next}1' $(CDC_CONF) > tmp && mv tmp $(CDC_CONF); \
awk -F"=" '/^polarx_password/{$$2="=UY1tQsgNvP8GJGGP8vHKKA==";print;next}1' $(CDC_CONF) > tmp && mv tmp $(CDC_CONF); \
sed -i 's/admin/polarx/g' $(CDC_CONF); \
rm meta.tmp
.PHONY: sources
sources: deps
mkdir -p $(BUILD_DIR)
cd $(BUILD_DIR); \
if [ -d "polardbx-sql" ]; then \
echo "polardbx-sql exsits."; \
else \
git clone https://github.com/polardb/polardbx-sql.git; \
cd polardbx-sql; \
git submodule update --init; \
fi
cd $(BUILD_DIR); \
if [ -d "polardbx-engine" ]; then \
echo "polardbx-engine exists."; \
else \
git clone https://github.com/polardb/polardbx-engine.git; \
cd polardbx-engine; \
wget https://boostorg.jfrog.io/artifactory/main/release/1.70.0/source/boost_1_70_0.tar.gz; \
mkdir -p extra/boost; \
cp boost_1_70_0.tar.gz extra/boost/; \
if [ "$(UNAME_S)" = "Darwin" ]; then \
echo "$${VERSION_PATCH}" >> macos.patch; \
git apply macos.patch; \
rm macos.patch; \
fi ; \
fi
cd $(BUILD_DIR); \
if [ -d "polardbx-cdc" ]; then \
echo "polardbx-cdc exists."; \
else \
git clone https://github.com/polardb/polardbx-cdc.git; \
fi
.PHONY: deps
deps:
ifeq ($(UNAME_S), Darwin)
@echo "Install the following tools and libraries before your building.\n"
@echo "tools : cmake3, make, automake, gcc, g++, bison, git, jdk11+, maven3"
@echo "libraries : openssl1.1 \n\n"
@echo "Press any key to continue..."
@read -n 1
else
ifeq ($(OS), CentOS)
sudo yum remove -y cmake
sudo yum install -y epel-release
sudo yum install -y wget java-11-openjdk-devel cmake3 automake bison openssl-devel ncurses-devel libaio-devel mysql
ifeq ($(V), 8)
sudo yum install -y libtirpc-devel dnf-plugins-core
sudo yum config-manager --set-enabled PowerTools
sudo yum install -y rpcgen
sudo yum groupinstall -y "Development Tools"
sudo yum install -y gcc gcc-c++
endif
ifeq ($(V), 7)
if [ -e "/usr/bin/cmake" ]; then \
sudo rm /usr/bin/cmake -f ; \
fi
sudo ln -s /usr/bin/cmake3 /usr/bin/cmake
sudo yum install -y centos-release-scl
sudo yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-binutils
if ! grep "source /opt/rh/devtoolset-7/enable" /etc/profile; then \
echo "source /opt/rh/devtoolset-7/enable" | sudo tee -a /etc/profile ; \
fi
endif
endif
ifneq ($(filter $(OS), Ubuntu CentOS),)
if [ ! -d /opt/apache-maven-3.9.1 ]; then \
sudo wget https://mirrors.aliyun.com/apache/maven/maven-3/3.9.1/binaries/apache-maven-3.9.1-bin.tar.gz -P /tmp && \
sudo tar xf /tmp/apache-maven-3.9.1-bin.tar.gz -C /opt && \
sudo rm -f /tmp/apache-maven-3.9.1-bin.tar.gz && \
sudo ln -fs /opt/apache-maven-3.9.1 /opt/maven && \
echo 'export M2_HOME=/opt/maven' | sudo tee /etc/profile.d/maven.sh && \
echo 'export PATH=$${M2_HOME}/bin:$${PATH}' | sudo tee -a /etc/profile.d/maven.sh && \
sudo chmod +x /etc/profile.d/maven.sh && \
echo '<mirror>' | sudo tee /opt/maven/conf/settings.xml && \
echo '<id>aliyunmaven</id>' | sudo tee -a /opt/maven/conf/settings.xml && \
echo '<mirrorOf>*</mirrorOf>' | sudo tee -a /opt/maven/conf/settings.xml && \
echo '<name>aliyun public</name>' | sudo tee -a /opt/maven/conf/settings.xml && \
echo '<url>https://maven.aliyun.com/repository/public</url>' | sudo tee -a /opt/maven/conf/settings.xml && \
echo '</mirror>' | sudo tee -a /opt/maven/conf/settings.xml; \
fi
if ! grep "source /etc/profile.d/maven.sh" /etc/profile; then \
echo "source /etc/profile.d/maven.sh" | sudo tee -a /etc/profile ; \
fi
endif
ifeq ($(OS), Ubuntu)
sudo apt-get update
sudo apt-get install -y git openjdk-8-jdk make automake cmake bison pkg-config libaio-dev libncurses5-dev \
libsasl2-dev libldap2-dev libssl-dev gcc-7 g++-7 mysql-client
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 60 \
--slave /usr/bin/g++ g++ /usr/bin/g++-7
sudo update-alternatives --config gcc
endif
endif
clean:
rm -rf $(BUILD_DIR)/run
cleanAll:
rm -rf $(BUILD_DIR)
# long variables
define START_SCRIPT
#!/bin/bash
PROG_NAME=$$0
ACTION=$$1
usage() {
echo "Usage: $${PROG_NAME} [start | restart | stop]"
exit 1;
}
if [ $$# -lt 1 ]; then
usage
fi
if [ x"$$mem_size" = "x" ]; then
export mem_size=4096
fi
buffer_pool_size_byte=$$(echo "scale=0; $$mem_size*1024*1024*0.3/1" | bc)
awk -v size=$$buffer_pool_size_byte -F"=" '/^innodb_buffer_pool_size/{$$2="="size;print;next}1' $(DN_CONF) > tmp && mv tmp $(DN_CONF)
retry() {
retry_interval=5
retry_cnt=0
retry_limit=10
succeed=0
while [ $${retry_cnt} -lt $${retry_limit} ]; do
if [[ $$1 ]]; then
succeed=1
return 0
fi
echo "Fail to $$1, retry..."
((retry_cnt++))
sleep "$${retry_interval}"
done
if [ $${succeed} -eq 0 ]; then
echo "$$1 failed."
return 1
fi
return 0
}
start() {
start_dn
echo "start cn..."
$(BUILD_DIR)/run/polardbx-sql/bin/startup.sh -P asdf1234ghjk5678
echo "cn starts."
echo "start cdc..."
$(BUILD_DIR)/run/polardbx-cdc/polardbx-binlog.standalone/bin/daemon.sh start
echo "cdc starts."
if ! retry "mysql -h127.1 -P8527 -upolardbx_root -e 'SELECT 1'"; then
echo "cn starts failed."
exit 1
fi
echo "try polardb-x by:"
echo "mysql -h127.1 -P8527 -upolardbx_root"
}
start_dn() {
echo "start gms & dn..."
$(BUILD_DIR)/run/polardbx-engine/u01/mysql/bin/mysqld --defaults-file=$(DN_CONF) -D
if ! retry "mysql -h127.1 -P4886 -uroot -e 'SELECT 1'"; then
echo "gms and dn start failed."
exit 1
fi
echo "gms and dn are running."
}
stop() {
echo "stop cdc..."
ps aux | grep "DaemonBootStrap" | grep -v "grep" | awk '{print $$2}'| xargs kill -9
ps aux | grep "TaskBootStrap" | grep -v "grep" | awk '{print $$2}'| xargs kill -9
ps aux | grep "DumperBootStrap" | grep -v "grep" | awk '{print $$2}'| xargs kill -9
echo "cdc is stopped."
echo "stop cn..."
ps aux | grep "TddlLauncher" | grep -v "grep" | awk '{print $$2}' | xargs kill -9
if [ -f "$(BUILD_DIR)/run/polardbx-sql/bin/tddl.pid" ]; then
rm $(BUILD_DIR)/run/polardbx-sql/bin/tddl.pid
fi
echo "cn is stopped."
echo "stop dn & gms..."
ps aux | grep "$(BUILD_DIR)/run/polardbx-engine/u01/mysql/bin/mysqld" | grep -v "grep" | awk '{print $$2}'| xargs kill -15
sleep 10
echo "dn & gms are stopped."
}
case "$${ACTION}" in
start)
start
;;
stop)
stop
;;
restart)
stop
sleep 1
start
;;
start_dn)
start_dn
;;
*)
usage
;;
esac
endef
export START_SCRIPT
define MY_CNF
[mysqld]
auto_increment_increment = 1
auto_increment_offset = 1
autocommit = ON
automatic_sp_privileges = ON
avoid_temporal_upgrade = OFF
back_log = 3000
binlog_cache_size = 1048576
binlog_checksum = CRC32
binlog_order_commits = OFF
binlog_row_image = full
binlog_rows_query_log_events = ON
binlog_stmt_cache_size = 32768
binlog_transaction_dependency_tracking = WRITESET
block_encryption_mode = "aes-128-ecb"
bulk_insert_buffer_size = 4194304
character_set_server = utf8
concurrent_insert = 2
connect_timeout = 10
datadir = $(DN_DATA_DIR)/data
default_authentication_plugin = mysql_native_password
default_storage_engine = InnoDB
default_time_zone = +8:00
default_week_format = 0
delay_key_write = ON
delayed_insert_limit = 100
delayed_insert_timeout = 300
delayed_queue_size = 1000
disconnect_on_expired_password = ON
div_precision_increment = 4
end_markers_in_json = OFF
enforce_gtid_consistency = ON
eq_range_index_dive_limit = 200
event_scheduler = OFF
expire_logs_days = 0
explicit_defaults_for_timestamp = OFF
flush_time = 0
ft_max_word_len = 84
ft_min_word_len = 4
ft_query_expansion_limit = 20
general_log = OFF
general_log_file = $(DN_DATA_DIR)/log/general.log
group_concat_max_len = 1024
gtid_mode = ON
host_cache_size = 644
init_connect = ''
innodb_adaptive_flushing = ON
innodb_adaptive_flushing_lwm = 10
innodb_adaptive_hash_index = OFF
innodb_adaptive_max_sleep_delay = 150000
innodb_autoextend_increment = 64
innodb_autoinc_lock_mode = 2
innodb_buffer_pool_chunk_size = 33554432
innodb_buffer_pool_dump_at_shutdown = ON
innodb_buffer_pool_dump_pct = 25
innodb_buffer_pool_instances = 8
innodb_buffer_pool_load_at_startup = ON
innodb_change_buffer_max_size = 25
innodb_change_buffering = none
innodb_checksum_algorithm = crc32
innodb_cmp_per_index_enabled = OFF
innodb_commit_concurrency = 0
innodb_compression_failure_threshold_pct = 5
innodb_compression_level = 6
innodb_compression_pad_pct_max = 50
innodb_concurrency_tickets = 5000
innodb_data_file_purge = ON
innodb_data_file_purge_interval = 100
innodb_data_file_purge_max_size = 128
innodb_data_home_dir = $(DN_DATA_DIR)/mysql
innodb_deadlock_detect = ON
innodb_disable_sort_file_cache = ON
innodb_equal_gcn_visible = 0
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
innodb_flush_neighbors = 0
innodb_flush_sync = ON
innodb_ft_cache_size = 8000000
innodb_ft_enable_diag_print = OFF
innodb_ft_enable_stopword = ON
innodb_ft_max_token_size = 84
innodb_ft_min_token_size = 3
innodb_ft_num_word_optimize = 2000
innodb_ft_result_cache_limit = 2000000000
innodb_ft_sort_pll_degree = 2
innodb_ft_total_cache_size = 640000000
innodb_io_capacity = 20000
innodb_io_capacity_max = 40000
innodb_lock_wait_timeout = 50
innodb_log_buffer_size = 16777216
innodb_log_checksums = ON
innodb_log_file_size = 134217728
innodb_log_group_home_dir = $(DN_DATA_DIR)/mysql
innodb_lru_scan_depth = 8192
innodb_max_dirty_pages_pct = 75
innodb_max_dirty_pages_pct_lwm = 0
innodb_max_purge_lag = 0
innodb_max_purge_lag_delay = 0
innodb_max_undo_log_size = 1073741824
innodb_monitor_disable =
innodb_monitor_enable =
innodb_old_blocks_pct = 37
innodb_old_blocks_time = 1000
innodb_online_alter_log_max_size = 134217728
innodb_open_files = 20000
innodb_optimize_fulltext_only = OFF
innodb_page_cleaners = 4
innodb_print_all_deadlocks = ON
innodb_purge_batch_size = 300
innodb_purge_rseg_truncate_frequency = 128
innodb_purge_threads = 4
innodb_random_read_ahead = OFF
innodb_read_ahead_threshold = 0
innodb_read_io_threads = 4
innodb_rollback_on_timeout = OFF
innodb_rollback_segments = 128
innodb_snapshot_update_gcn = 1
innodb_sort_buffer_size = 1048576
innodb_spin_wait_delay = 6
innodb_stats_auto_recalc = ON
innodb_stats_method = nulls_equal
innodb_stats_on_metadata = OFF
innodb_stats_persistent = ON
innodb_stats_persistent_sample_pages = 20
innodb_stats_transient_sample_pages = 8
innodb_status_output = OFF
innodb_status_output_locks = OFF
innodb_strict_mode = ON
innodb_sync_array_size = 16
innodb_sync_spin_loops = 30
innodb_table_locks = ON
innodb_tcn_cache_level = block
innodb_thread_concurrency = 0
innodb_thread_sleep_delay = 0
innodb_write_io_threads = 4
interactive_timeout = 7200
key_buffer_size = 16777216
key_cache_age_threshold = 300
key_cache_block_size = 1024
key_cache_division_limit = 100
lc_time_names = en_US
local_infile = OFF
lock_wait_timeout = 1800
log-bin-index = $(DN_DATA_DIR)/mysql/mysql-bin.index
log_bin = $(DN_DATA_DIR)/mysql/mysql-bin.log
log_bin_trust_function_creators = ON
log_bin_use_v1_row_events = 0
log_error = $(DN_DATA_DIR)/log/alert.log
log_error_verbosity = 2
log_queries_not_using_indexes = OFF
log_slave_updates = 0
log_slow_admin_statements = ON
log_slow_slave_statements = ON
log_throttle_queries_not_using_indexes = 0
long_query_time = 1
loose_ccl_max_waiting_count = 0
loose_ccl_queue_bucket_count = 4
loose_ccl_queue_bucket_size = 64
loose_ccl_wait_timeout = 86400
loose_cluster-id = 1234
loose_cluster-info = 127.0.0.1:14886@1
loose_consensus_auto_leader_transfer = ON
loose_consensus_auto_reset_match_index = ON
loose_consensus_election_timeout = 10000
loose_consensus_io_thread_cnt = 8
loose_consensus_large_trx = ON
loose_consensus_log_cache_size = 536870912
loose_consensus_max_delay_index = 10000
loose_consensus_max_log_size = 20971520
loose_consensus_max_packet_size = 131072
loose_consensus_prefetch_cache_size = 268435456
loose_consensus_worker_thread_cnt = 8
loose_galaxyx_port = 32886
loose_implicit_primary_key = 1
loose_information_schema_stats_expiry = 86400
loose_innodb_buffer_pool_in_core_file = OFF
loose_innodb_commit_cleanout_max_rows = 9999999999
loose_innodb_doublewrite_pages = 64
loose_innodb_lizard_stat_enabled = OFF
loose_innodb_log_compressed_pages = ON
loose_innodb_log_optimize_ddl = OFF
loose_innodb_log_write_ahead_size = 4096
loose_innodb_multi_blocks_enabled = ON
loose_innodb_numa_interleave = OFF
loose_innodb_parallel_read_threads = 1
loose_innodb_undo_retention = 1800
loose_innodb_undo_space_reserved_size = 1024
loose_innodb_undo_space_supremum_size = 102400
loose_internal_tmp_mem_storage_engine = TempTable
loose_new_rpc = ON
loose_optimizer_switch = index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on
loose_optimizer_trace = enabled=off,one_line=off
loose_optimizer_trace_features = greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on
loose_performance-schema_instrument = 'wait/lock/metadata/sql/mdl=ON'
loose_performance_point_lock_rwlock_enabled = ON
loose_performance_schema-instrument = 'memory/%%=COUNTED'
loose_performance_schema_accounts_size = 10000
loose_performance_schema_consumer_events_stages_current = ON
loose_performance_schema_consumer_events_stages_history = ON
loose_performance_schema_consumer_events_stages_history_long = ON
loose_performance_schema_consumer_events_statements_current = OFF
loose_performance_schema_consumer_events_statements_history = OFF
loose_performance_schema_consumer_events_statements_history_long = OFF
loose_performance_schema_consumer_events_transactions_current = OFF
loose_performance_schema_consumer_events_transactions_history = OFF
loose_performance_schema_consumer_events_transactions_history_long = OFF
loose_performance_schema_consumer_events_waits_current = OFF
loose_performance_schema_consumer_events_waits_history = OFF
loose_performance_schema_consumer_events_waits_history_long = OFF
loose_performance_schema_consumer_global_instrumentation = OFF
loose_performance_schema_consumer_statements_digest = OFF
loose_performance_schema_consumer_thread_instrumentation = OFF
loose_performance_schema_digests_size = 10000
loose_performance_schema_error_size = 0
loose_performance_schema_events_stages_history_long_size = 0
loose_performance_schema_events_stages_history_size = 0
loose_performance_schema_events_statements_history_long_size = 0
loose_performance_schema_events_statements_history_size = 0
loose_performance_schema_events_transactions_history_long_size = 0
loose_performance_schema_events_transactions_history_size = 0
loose_performance_schema_events_waits_history_long_size = 0
loose_performance_schema_events_waits_history_size = 0
loose_performance_schema_hosts_size = 10000
loose_performance_schema_instrument = '%%=OFF'
loose_performance_schema_max_cond_classes = 0
loose_performance_schema_max_cond_instances = 10000
loose_performance_schema_max_digest_length = 0
loose_performance_schema_max_digest_sample_age = 0
loose_performance_schema_max_file_classes = 0
loose_performance_schema_max_file_handles = 0
loose_performance_schema_max_file_instances = 1000
loose_performance_schema_max_index_stat = 10000
loose_performance_schema_max_memory_classes = 0
loose_performance_schema_max_metadata_locks = 10000
loose_performance_schema_max_mutex_classes = 0
loose_performance_schema_max_mutex_instances = 10000
loose_performance_schema_max_prepared_statements_instances = 1000
loose_performance_schema_max_program_instances = 10000
loose_performance_schema_max_rwlock_classes = 0
loose_performance_schema_max_rwlock_instances = 10000
loose_performance_schema_max_socket_classes = 0
loose_performance_schema_max_socket_instances = 1000
loose_performance_schema_max_sql_text_length = 0
loose_performance_schema_max_stage_classes = 0
loose_performance_schema_max_statement_classes = 0
loose_performance_schema_max_statement_stack = 1
loose_performance_schema_max_table_handles = 10000
loose_performance_schema_max_table_instances = 1000
loose_performance_schema_max_table_lock_stat = 10000
loose_performance_schema_max_thread_classes = 0
loose_performance_schema_max_thread_instances = 10000
loose_performance_schema_session_connect_attrs_size = 0
loose_performance_schema_setup_actors_size = 10000
loose_performance_schema_setup_objects_size = 10000
loose_performance_schema_users_size = 10000
loose_persist_binlog_to_redo = OFF
loose_persist_binlog_to_redo_size_limit = 1048576
loose_rds_audit_log_buffer_size = 16777216
loose_rds_audit_log_enabled = OFF
loose_rds_audit_log_event_buffer_size = 8192
loose_rds_audit_log_row_limit = 100000
loose_rds_audit_log_version = MYSQL_V1
loose_recovery_apply_binlog = OFF
loose_replica_read_timeout = 3000
loose_rpc_port = 34886
loose_session_track_system_variables = "*"
loose_session_track_transaction_info = OFF
loose_slave_parallel_workers = 32
low_priority_updates = 0
lower_case_table_names = 1
master_info_file = $(DN_DATA_DIR)/mysql/master.info
master_info_repository = TABLE
master_verify_checksum = OFF
max_allowed_packet = 1073741824
max_binlog_cache_size = 18446744073709551615
max_binlog_stmt_cache_size = 18446744073709551615
max_connect_errors = 65536
max_connections = 5532
max_error_count = 1024
max_execution_time = 0
max_heap_table_size = 67108864
max_join_size = 18446744073709551615
max_length_for_sort_data = 4096
max_points_in_geometry = 65536
max_prepared_stmt_count = 16382
max_seeks_for_key = 18446744073709551615
max_sort_length = 1024
max_sp_recursion_depth = 0
max_user_connections = 5000
max_write_lock_count = 102400
min_examined_row_limit = 0
myisam_sort_buffer_size = 262144
mysql_native_password_proxy_users = OFF
net_buffer_length = 16384
net_read_timeout = 30
net_retry_count = 10
net_write_timeout = 60
ngram_token_size = 2
open_files_limit = 65535
opt_indexstat = ON
opt_tablestat = ON
optimizer_prune_level = 1
optimizer_search_depth = 62
optimizer_trace_limit = 1
optimizer_trace_max_mem_size = 1048576
optimizer_trace_offset = -1
performance_schema = ON
port = 4886
preload_buffer_size = 32768
query_alloc_block_size = 8192
query_prealloc_size = 8192
range_alloc_block_size = 4096
range_optimizer_max_mem_size = 8388608
read_rnd_buffer_size = 442368
relay_log = $(DN_DATA_DIR)/mysql/slave-relay.log
relay_log_index = $(DN_DATA_DIR)/mysql/slave-relay-log.index
relay_log_info_file = $(DN_DATA_DIR)/mysql/slave-relay-log.info
relay_log_info_repository = TABLE
relay_log_purge = OFF
relay_log_recovery = OFF
replicate_same_server_id = OFF
loose_rotate_log_table_last_name =
server_id = 1234
session_track_gtids = OFF
session_track_schema = ON
session_track_state_change = OFF
sha256_password_proxy_users = OFF
show_old_temporals = OFF
skip_slave_start = OFF
skip_ssl = ON
slave_exec_mode = strict
slave_load_tmpdir = $(DN_DATA_DIR)/tmp
slave_net_timeout = 4
slave_parallel_type = LOGICAL_CLOCK
slave_pending_jobs_size_max = 1073741824
slave_sql_verify_checksum = OFF
slave_type_conversions =
slow_launch_time = 2
slow_query_log = OFF
slow_query_log_file = $(DN_DATA_DIR)/mysql/slow_query.log
socket = $(DN_DATA_DIR)/run/mysql.sock
sort_buffer_size = 868352
sql_mode = NO_ENGINE_SUBSTITUTION
stored_program_cache = 256
sync_binlog = 1
sync_master_info = 10000
sync_relay_log = 1
sync_relay_log_info = 10000
table_open_cache_instances = 16
temptable_max_ram = 1073741824
thread_cache_size = 100
thread_stack = 262144
tls_version = TLSv1,TLSv1.1,TLSv1.2
tmp_table_size = 2097152
tmpdir = $(DN_DATA_DIR)/tmp
transaction_alloc_block_size = 8192
transaction_isolation = REPEATABLE-READ
transaction_prealloc_size = 4096
transaction_write_set_extraction = XXHASH64
updatable_views_with_limit = YES
wait_timeout = 28800
innodb_buffer_pool_size = 644245094
[mysqld_safe]
pid_file = $(DN_DATA_DIR)/run/mysql.pid
endef
export MY_CNF
define VERSION_PATCH
diff --git a/VERSION b/MYSQL_VERSION
similarity index 100%
rename from VERSION
rename to MYSQL_VERSION
diff --git a/cmake/mysql_version.cmake b/cmake/mysql_version.cmake
index bed6e9f0..b76b7ba4 100644
--- a/cmake/mysql_version.cmake
+++ b/cmake/mysql_version.cmake
@@ -28,17 +28,17 @@ SET(SHARED_LIB_MAJOR_VERSION "21")
SET(SHARED_LIB_MINOR_VERSION "1")
SET(PROTOCOL_VERSION "10")
-# Generate "something" to trigger cmake rerun when VERSION changes
+# Generate "something" to trigger cmake rerun when MYSQL_VERSION changes
CONFIGURE_FILE(
- $${CMAKE_SOURCE_DIR}/VERSION
+ $${CMAKE_SOURCE_DIR}/MYSQL_VERSION
$${CMAKE_BINARY_DIR}/VERSION.dep
)
-# Read value for a variable from VERSION.
+# Read value for a variable from MYSQL_VERSION.
MACRO(MYSQL_GET_CONFIG_VALUE keyword var)
IF(NOT $${var})
- FILE (STRINGS $${CMAKE_SOURCE_DIR}/VERSION str REGEX "^[ ]*$${keyword}=")
+ FILE (STRINGS $${CMAKE_SOURCE_DIR}/MYSQL_VERSION str REGEX "^[ ]*$${keyword}=")
IF(str)
STRING(REPLACE "$${keyword}=" "" str $${str})
STRING(REGEX REPLACE "[ ].*" "" str "$${str}")
@@ -59,7 +59,7 @@ MACRO(GET_MYSQL_VERSION)
IF(NOT DEFINED MAJOR_VERSION OR
NOT DEFINED MINOR_VERSION OR
NOT DEFINED PATCH_VERSION)
- MESSAGE(FATAL_ERROR "VERSION file cannot be parsed.")
+ MESSAGE(FATAL_ERROR "MYSQL_VERSION file cannot be parsed.")
ENDIF()
SET(VERSION
@@ -80,7 +80,7 @@ MACRO(GET_MYSQL_VERSION)
SET(CPACK_PACKAGE_VERSION_PATCH $${PATCH_VERSION})
IF(WITH_NDBCLUSTER)
- # Read MySQL Cluster version values from VERSION, these are optional
+ # Read MySQL Cluster version values from MYSQL_VERSION, these are optional
# as by default MySQL Cluster is using the MySQL Server version
MYSQL_GET_CONFIG_VALUE("MYSQL_CLUSTER_VERSION_MAJOR" CLUSTER_MAJOR_VERSION)
MYSQL_GET_CONFIG_VALUE("MYSQL_CLUSTER_VERSION_MINOR" CLUSTER_MINOR_VERSION)
@@ -89,12 +89,12 @@ MACRO(GET_MYSQL_VERSION)
# Set MySQL Cluster version same as the MySQL Server version
# unless a specific MySQL Cluster version has been specified
- # in the VERSION file. This is the version used when creating
+ # in the MYSQL_VERSION file. This is the version used when creating
# the cluster package names as well as by all the NDB binaries.
IF(DEFINED CLUSTER_MAJOR_VERSION AND
DEFINED CLUSTER_MINOR_VERSION AND
DEFINED CLUSTER_PATCH_VERSION)
- # Set MySQL Cluster version to the specific version defined in VERSION
+ # Set MySQL Cluster version to the specific version defined in MYSQL_VERSION
SET(MYSQL_CLUSTER_VERSION "$${CLUSTER_MAJOR_VERSION}")
SET(MYSQL_CLUSTER_VERSION
"$${MYSQL_CLUSTER_VERSION}.$${CLUSTER_MINOR_VERSION}")
@@ -106,7 +106,7 @@ MACRO(GET_MYSQL_VERSION)
ENDIF()
ELSE()
# Set MySQL Cluster version to the same as MySQL Server, possibly
- # overriding the extra version with value specified in VERSION
+ # overriding the extra version with value specified in MYSQL_VERSION
# This might be used when MySQL Cluster is still released as DMR
# while MySQL Server is already GA.
SET(MYSQL_CLUSTER_VERSION
diff --git a/plugin/galaxy/CMakeLists.txt b/plugin/galaxy/CMakeLists.txt.bak
similarity index 100%
rename from plugin/galaxy/CMakeLists.txt
rename to plugin/galaxy/CMakeLists.txt.bak
diff --git a/plugin/performance_point/CMakeLists.txt b/plugin/performance_point/CMakeLists.txt.bak
similarity index 100%
rename from plugin/performance_point/CMakeLists.txt
rename to plugin/performance_point/CMakeLists.txt.bak
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 9fe6d12d..eea38fa7 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -869,6 +869,8 @@ bool opt_large_files = sizeof(my_off_t) > 4;
static bool opt_autocommit; ///< for --autocommit command-line option
static get_opt_arg_source source_autocommit;
+
+bool opt_performance_point_enabled = false;
/*
Used with --help for detailed option
*/
diff --git a/sql/package/package_cache.cc b/sql/package/package_cache.cc
index 8a81734e..30ec6a08 100644
--- a/sql/package/package_cache.cc
+++ b/sql/package/package_cache.cc
@@ -76,7 +76,7 @@ static const T *find_package_element(const std::string &schema_name,
return Package::instance()->lookup_element<T>(schema_name, element_name);
}
/* Template instantiation */
-template static const Proc *find_package_element(
+template const Proc *find_package_element(
const std::string &schema_name, const std::string &element_name);
/**
endef
export VERSION_PATCH

177
README.md Normal file
View File

@ -0,0 +1,177 @@
[![LICENSE](https://img.shields.io/badge/License-Apache%202.0-green.svg)](https://github.com/polardb/polardbx-sql/blob/main/LICENSE)
[![Language](https://img.shields.io/badge/Language-Java-blue.svg)](https://www.java.com/)
[中文文档](https://github.com/polardb/polardbx-sql/blob/main/docs/zh_CN/README.md)
[Windows下编译运行PolarDB-X](compile_and_run_polardbx_on_windows.md)
## What is PolarDB-X
PolarDB-X is a cloud native distributed SQL Database designed for high concurrency, massive storage, complex querying scenarios. It has a shared-nothing architecture in which computing is decoupled from storage. It supports horizontal scaling, distributed transactions and Hybrid Transactional and Analytical Processing (HTAP) workloads, and is characterized by enterprise-class, cloud native, high availability, highly compatiable with MySQL and its ecosystem.
PolarDB-X was originally created to solve the database's scalability bottleneck of Alibaba Tmall's "Double Eleven" core transaction system, and has grown with AliCloud along the way, and is a mature and stable database system that has been verified by many customers' core business systems.
The core features of PolarDB-X include:
- Horizontal Scalability
PolarDB-X is designed with Shared-nothing architecture, supporting multiple Hash and Range data sharding algorithms and achieving transparent horizontal scaling through implicit primary key sharding and dynamic scheduling of data shard.
- Distributed Transactions
PolarDB-X adopts MVCC + TSO approach and 2PC protocol to implement distributed transactions. Transactions meet ACID characteristics, support RC/RR isolation levels, and achieve high performance through optimizations such as one-stage commit, read-only transaction, and asynchronous commit.
- HTAP
PolarDB-X supports analytical queries through native MPP capability, and achieves strong isolation of OLTP and OLAP traffic through CPU quota constraint, memory pooling, storage resource separation, etc.
- Enterprise-class
PolarDB-X has many capabilities designed for enterprise scenarios, such as SQL Concurrency Control, SQL Advisor, TDE, Triple Authority Seperation, Flashback Query, etc.
- Cloud Native
PolarDB-X has years of cloud native practice on AliCloud, supports managing cluster resources via K8S Operator, supports public cloud, hybrid cloud, private cloud and other forms for deployment.
- High Availability
PolarDB-X achieves strong data consistency through Multi-Paxos protocol, supports cross-data center deployment, and improves system availability through Table Group, Geo-locality, etc.
- Compatiable with MySQL and Its Ecosystem
The goal of PolarDB-X is to be fully compatible with MySQL, which currently includes MySQL protocol, most of MySQL SQL syntax, Collations, transaction isolation level, binary log, etc.
## Quick Start
### To quick start with PolarDB-X
PolarDB-X supports one-click installation by PXD tool, through which you can quickly try the functions of PolarDB-X.
See the [PXD Quick Start](https://github.com/polardb/polardbx-sql/blob/main/docs/en/quickstart.md).
### To quick start with PolarDB-X on Kubernetes
PolarDB-X provides K8S deployment mode, through which you can customize the configuration of PolarDB-X cluster.
See the [K8S Quick Start](https://github.com/polardb/polardbx-operator#quick-start).
### To start developing PolarDB-X
Try the following steps under a non-root user to build and run PolarDB-X from the source code.
>NOTE: CentOS 7&8 and Ubuntu 18 and higher are currently supported.
0. Prerequisites
Make sure the following packages are installed (for most cases, they are installed already):
```shell
# For CentOS
yum install -y sudo git make redhat-lsb-core
# For Ubuntu
apt-get update && apt-get install -y sudo git make lsb-release
```
Create and switch to a non-root user with sudo privilege.
```shell
useradd -ms /bin/bash polarx
echo "polarx:polarx" | chpasswd
echo "polarx ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
su - polarx
git clone https://github.com/polardb/polardbx.git
cd polardbx
```
1. Build
```shell
make
```
`make` will download all repositories, install build tools and libraries, build and install PolarDB-X.
Repositories are downloaded to `./build` dir and binaries are installed to `./build/run`.
You can run `make clean` to remove the installation and try build PolarDB-X again.
You can also run `make cleanAll` to remove everything under `./build`.
This process may take more than half an hour depending on your network bandwidth.
2. Run
```shell
./build/run/bin/polardb-x.sh start
```
Wait about 1 minute and use `mysql -h127.0.0.1 -P8527 -upolardbx_root` to connect to PolarDB-X.
3. Stop
```shell
./build/run/bin/polardb-x.sh stop
```
The core features of PolarDB-X community version will be consistent with the commercial version, and more manuals can be found in [the documentations of the commercial version](https://www.alibabacloud.com/help/doc-detail/71252.htm). The documentations of the community version are being compiled and will be released to the public in the near future.
## Architecture
![image.png](https://github.com/polardb/polardbx-sql/blob/main/docs/architecture.png)
PolarDB-X has a shared-nothing architecture in which compute and storage is decoupled, and the system consists of 4 core components.
- CN (Compute Node)
The Compute Node is the entry point of the system and is stateless, which includes modules such as SQL parser, optimizer, and executor. It is responsible for distributed data routing, 2PC coordination, global secondary index maintenance, etc. It also provides enterprise features such as SQL concurrency control and triple authority separation.
- DN (Data Node)
The Data Node is responsible for data persistence, providing strong data consistency based on the Multi-Paxos protocol, while maintaining distributed transaction visibility through MVCC.
- GMS (Global Meta Service)
The Gloal Meta Service is responsible for maintaining globally consistent Table/Schema, Statistics and other system Meta information, maintaining security information such as accounts and permissions, and providing global timing services (i.e. TSO).
- CDC (Change Data Capture)
The CDC Node provides change data capture capability that is fully compatible with the MySQL binary log format and MySQL DUMP protocol, and master-slave replication capability that is compatible with the MySQL Replication protocol.
PolarDB-X provides tool to manage the above components through K8S Operator, and the RPC between the CN and DN can be done through private protocol component. The corresponding repositories of these components are as follows.
| **Component Name** | **Repository** |
| --- | --- |
| CN (Compute Node) | [polardbx-sql](https://github.com/polardb/polardbx-sql) |
| GMS (Global Meta Service) | [polardbx-engine](https://github.com/polardb/polardbx-engine) |
| DN (Data Node) | [polardbx-engine](https://github.com/polardb/polardbx-engine) |
| CDC (Change Data Capture) | [polardbx-cdc](https://github.com/polardb/polardbx-cdc) |
| RPC | [polardbx-glue](https://github.com/polardb/polardbx-glue) |
| K8S Operator | [polardbx-operator](https://github.com/polardb/polardbx-operator) |
## What is polardbx-sql
polardbx-sql is one component of PolarDB-X, namely CN (Compute Node).
## Licensing
polardbx-sql is under the Apache License 2.0. See the [License](https://github.com/polardb/polardbx-sql/blob/main/LICENSE) file for details.
## Contributing
You are welcome to make contributions to PolarDB-X. We appreciate all the contributions. For more information about how to start development and pull requests, see [contributing](https://github.com/polardb/polardbx-sql/blob/main/CONTRIBUTING.md).
## Community
You can join these groups and chats to discuss and ask PolarDB-X related questions:
- DingTalk Group: [32432897](https://h5.dingtalk.com/circle/healthCheckin.html?dtaction=os&corpId=dingc5456617ca6ab502e1cc01e222598659&1b3d4=1ec1b&cbdbhh=qwertyuiop#/)
![DingTalk Group](docs/images/dingtalk_group.jpg)
- WeChat Group: 阿里云 PolarDB-X 开源交流群 (Contact group manager to get into wechat group. Managers' ID: oldbread3, hustfxj, agapple0002)
![WeChat Manager 1](docs/images/wechat_manager_a.jpg) ![WeChat Manager 2](docs/images/wechat_manager_b.jpg) ![WeChat Manager 3](docs/images/wechat_manager_c.jpg)
## Acknowledgements
polardbx-sql references from many open source projects, such as Calcite, Presto etc. Sincere thanks to these projects and contributors.
##

View File

@ -0,0 +1,236 @@
# Windows下编译运行PolarDB-X
## 安装WSL
参考官方文档https://docs.microsoft.com/en-us/windows/wsl/install
## 安装CentOS 7
微软商店里没有提供CentOS 7可以在这里下载https://github.com/mishamosher/CentOS-WSL/releases
验证过的版本是https://github.com/mishamosher/CentOS-WSL/releases/tag/7.9-2111
解压后运行CentOS7.exe右键以管理员身份运行即可完成安装再次运行CentOS7.exe即可打开一个终端。
## 环境准备
1. 安装wget
```
yum install wget -y
```
2. 使用阿里云的yum仓库
```
cd /etc/yum.repos.d/ && \
wget -O CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo && \
yum clean all
cd /root
```
3. 安装工具链
```
yum install -y git
yum install -y centos-release-scl
yum install -y mysql
yum install -y java-1.8.0-openjdk-devel
yum install -y make automake openssl-devel ncurses-devel bison libaio-devel
yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-binutils
echo "source /opt/rh/devtoolset-7/enable" >>/etc/profile && source /etc/profile
```
4. 安装cmake
仓库里没有cmake3需要从源码编译安装
```
wget https://cmake.org/files/v3.23/cmake-3.23.2.tar.gz && tar -zxvf cmake-3.23.2.tar.gz && cd cmake-3.23.2/
./bootstrap && gmake && gmake install
```
5. 创建admin用户
CN与DN都不允许以root用户启动需要创建一个用户。
```
useradd -ms /bin/bash admin && \
echo "admin:admin" | chpasswd && \
echo "admin ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers && \
su admin
cd /home/admin
```
6. 安装maven
仓库中的maven版本太老了装一个最新版本。
```
wget https://dlcdn.apache.org/maven/maven-3/3.8.6/binaries/apache-maven-3.8.6-bin.tar.gz && tar -zxvf apache-maven-3.8.6-bin.tar.gz
echo 'PATH=/home/admin/apache-maven-3.8.6/bin:$PATH' >> /home/admin/.bashrc && \
echo "export PATH" >> /home/admin/.bashrc && \
source /home/admin/.bashrc
```
国内使用阿里云的maven仓库比较快https://developer.aliyun.com/mvn/guide
修改maven的配置文件
```
vi /home/admin/apache-maven-3.8.6/conf/settings.xml
```
在`<mirrors></mirrors>`标签中添加 mirror 子节点:
```
<mirror>
<id>aliyunmaven</id>
<mirrorOf>*</mirrorOf>
<name>阿里云公共仓库</name>
<url>https://maven.aliyun.com/repository/public</url>
</mirror>
```
## 编译PolarDB-X
1. 下载编译工程
```
git clone https://github.com/ApsaraDB/PolarDB-X.git
cd PolarDB-X
```
2. 编译
```
make
```
注意:如果机器内存<=16G请修改PolarDB-X/Makefile中编译的并行度否则容易出现OOM`CPU_CORES` 修改为 2
![image](https://user-images.githubusercontent.com/2645985/173988137-dc514bdc-342f-4a4e-ae05-88f0ff44898a.png)
3. 运行
```
./build/run/bin/polardb-x.sh start
```
4. 停止
```
./build/run/bin/polardb-x.sh stop
```
## 使用IDEA开发GalaxySQLCN
我们使用IDEA来运行CN的代码并使用CentOS中启动的GalaxyEngine节点作为GMS与DN节点。
1. WSL与Windows是同一个LAN内的两个IP在上述的Demo中GMS中记录的DN的IP为`127.0.0.1`Windows无法直接通过这个IP来访问与GMS/DN因此需要获取到WSL的LAN IP并做相应的替换。
在CentOS中执行`ip addr`记录eth0中的IP如本例中IP为`172.27.47.106`
```
ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: bond0: <BROADCAST,MULTICAST,MASTER> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 7a:44:78:58:e8:32 brd ff:ff:ff:ff:ff:ff
3: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 7a:e5:bd:1e:a3:ba brd ff:ff:ff:ff:ff:ff
4: tunl0@NONE: <NOARP> mtu 1480 qdisc noop state DOWN group default qlen 1000
link/ipip 0.0.0.0 brd 0.0.0.0
5: sit0@NONE: <NOARP> mtu 1480 qdisc noop state DOWN group default qlen 1000
link/sit 0.0.0.0 brd 0.0.0.0
6: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:15:5d:05:db:9b brd ff:ff:ff:ff:ff:ff
inet 172.27.47.106/20 brd 172.27.47.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::215:5dff:fe05:db9b/64 scope link
valid_lft forever preferred_lft forever
```
2. 在CentOS上登录GMS并修改DN的IP。
```
mysql -h127.1 -P4886 -uroot polardbx_meta_db_polardbx -e 'update storage_info set ip="172.27.47.106";'
```
3. Kill掉CentOS中的CN进程避免冲突
```
[admin@DESKTOP-UGTN860 PolarDB-X]$ jps
13174 Jps
15625 DaemonBootStrap
16105 DumperBootStrap
16139 TaskBootStrap
17334 TddlLauncher
[admin@DESKTOP-UGTN860 PolarDB-X]$ kill -9 17334
```
4. 在Windows中clone代码
```
git clone https://github.com/ApsaraDB/galaxysql.git
cd galaxysql
git submodule update --init
```
5. 在IDEA中打开该maven工程并修改IDEA的maven仓库为阿里云的maven仓库
![image](https://user-images.githubusercontent.com/2645985/173986060-a0cdba7e-04b6-46bf-a76c-66582c83d630.png)
6. 调整IDEA编译的内存上限
![image](https://user-images.githubusercontent.com/2645985/173986190-f647d8d7-4188-4f29-854c-a906f0686ca2.png)
7. 使用CentOS中的`PolarDB-X/build/run/galaxysql/conf/server.properties`内容覆盖IDEA中CN的`galaxysql\polardbx-server\src\main\resources`,并将`metaDbAddr`中的`127.0.0.1`修改为WSL的IP
![image](https://user-images.githubusercontent.com/2645985/173987557-9b2f72aa-25a9-4149-b1c9-8a05cd26c19d.png)
同时,修改代码中 polardbx-server/src/main/resources/server.properties 文件:
a. 将`metaDbAddr` 改为 `127.0.0.1:4886`
b. 将`metaDbXprotoPort` 改为 `34886`
c. 将`galaxyXProtocol` 改为 `2`
d. 在shell中执行这行命令以获取`metaDbPasswd``mysql -h127.1 -P4886 -uroot -padmin -D polardbx_meta_db_polardbx -e "select passwd_enc from storage_info where inst_kind=2"`
e. 增加`metaDbPasswd=<查询到的密码>` 。
8. 运行一次`com.alibaba.polardbx.server.TddlLauncher`,此时会启动失败
9. 修改`TddlLauncher`的`Run/Debug Configurations`,添加`dnPasswordKey=asdf1234ghjk5678
`到环境变量中:
![image](https://user-images.githubusercontent.com/2645985/173987036-5aa9560f-c1b7-4451-b164-82c457b0b597.png)
![image](https://user-images.githubusercontent.com/2645985/173987081-4767f56b-20ce-43a7-9cff-f35fa01ab5ca.png)
![image](https://user-images.githubusercontent.com/2645985/173987109-7ca46936-7f87-4c16-a0fe-73c7d5ab9bde.png)
10. 再次运行`com.alibaba.polardbx.server.TddlLauncher`即可
11. mysql终端可以连上本地的CN了
```
mysql -h 127.0.0.1 -upolardbx_root -p123456 -P8527
```
## 固定WSL的IP地址
注意WSL2每次重启都会重新分配一个IP这会导致GMS与`server.properties`中记录的IP失效。有一个折中的方法执行以下命令会分别为WSL2与Windows分配一个指定的IP地址
```
wsl -d CentOS7 -u root ip addr add 192.168.50.2/24 broadcast 192.168.50.255 dev eth0 label eth0:1
netsh interface ip add address “vEthernet (WSL)” 192.168.50.1 255.255.255.0
```
这样可以将GMS与`server.properties`中的IP固定为`192.168.50.2`即可。
确保每次重启WSL后运行该命令。

25
docker/Dockerfile Normal file
View File

@ -0,0 +1,25 @@
FROM centos:7
ARG BUILD_PATH=/home/polarx/polardbx/build
RUN yum install sudo hostname telnet net-tools vim tree less file java-11-openjdk-devel -y && \
yum install openssl-devel ncurses-devel libaio-devel mysql -y && \
yum clean all && rm -rf /var/cache/yum && rm -rf /var/tmp/yum-*
RUN useradd -ms /bin/bash polarx && \
echo "polarx:polarx" | chpasswd && \
echo "polarx ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers && \
echo "export BUILD_PATH=$BUILD_PATH" >> /etc/profile && \
echo 'PATH="$BUILD_PATH/run/polardbx-engine/u01/mysql/bin:$BUILD_PATH/run/bin:$PATH"' >> /etc/profile && \
echo "export PATH" >> /etc/profile
USER polarx
WORKDIR /home/polarx
ADD --chown=polarx run.tar.gz $BUILD_PATH
COPY --chown=polarx entrypoint.sh entrypoint.sh
ENV TZ Asia/Shanghai
ENV BUILD_PATH=$BUILD_PATH
ENTRYPOINT /home/polarx/entrypoint.sh $BUILD_PATH/run

165
docker/README.md Normal file
View File

@ -0,0 +1,165 @@
## 简介
PolarDB-X 是一款分布式数据库系统,其核心组件由 CN、DN、GMS 和 CDC 四个部分组成,实际运行中,每个组件是一个单独的进程。
本文围绕 3 个场景介绍 PolarDB-X 的镜像使用方式。
## 1. 基于docker 快速体验 PolarDB-X
基于 PolarDB-X Docker 镜像,可快速在本地运行一个 PolarDB-X 实例并开始体验。
首先将镜像下载到本地:
```shell
docker pull polardbx/polardb-x
```
之后运行如下命令启动一个 PolarDB-X 容器建议docker内存>=12GB (CN/DN/CDC各自分配mem_size=4096)
```shell
docker run -d --name polardb-x -m 12GB -p 3306:8527 -v /etc/localtime:/etc/localtime polardbx/polardb-x
```
等待之后即可通过 MySQL Client 连接到 PolarDB-X
```shell
mysql -h127.0.0.1 -upolardbx_root -p123456
```
PolarDB-X 高度兼容 MySQL 语法,与分布式相关的特性会对 SQL 语法进行扩展,可通过以下 SQL 指令初步体验 PolarDB-X:
```mysql
# 检查GMS
select * from information_schema.schemata;
# 创建分区表
create database polarx_example mode='auto';
use polarx_example;
create table example (
`id` bigint(11) auto_increment NOT NULL,
`name` varchar(255) DEFAULT NULL,
`score` bigint(11) DEFAULT NULL,
primary key (`id`)
) engine=InnoDB default charset=utf8
partition by hash(id)
partitions 8;
insert into example values(null,'lily',375),(null,'lisa',400),(null,'ljh',500);
select * from example;
show topology from example;
# 检查CDC
show master status;
show binlog events in 'binlog.000001' from 4;
# 检查DN和CN
show storage;
show mpp;
```
以上过程在本地运行了一个 PolarDB-X 容器容器中运行了1个CN进程1个DN进程该进程同时扮演GMS角色和一个CDC进程并且使用默认参数进行了系统初始化初始化完成后通过8527端口对外提供服务。
## 场景2. 手工调整 docker 内组件配置
您可以通过传递环境变量 `mem_size` 来控制 CN 和 CDC 的内存占用CN 和 CDC 会***分别***占用不超过 `mem_size(MB)` 的内存。
同时DN 的 buffer pool size 将设置为 `0.3*mem_size` 。此外DN 的 my.cnf 文件以及数据文件位于容器内 `/home/polarx/polardbx/build/run/polardbx-engine/data` 这个目录下。
您可以将该目录挂载到本地,然后暂停 (stop) 容器,修改 mycnf再启动 (start) 容器。接下来,我们用一个例子说明这些配置项:
1. 首先运行 polardb-x 容器,传递 mem_size 环境变量,并将数据目录挂载到本地:
```shell
docker run -d --name polardb-x -p 3306:8527 --env mem_size=8192 -v /etc/localtime:/etc/localtime -v polardbx-data:/home/polarx/polardbx/build/run/polardbx-engine/data polardbx/polardb-x
```
上述指令,使得 CN 、DN、 CDC 分别占用不超过 8GB 内存,即一共占用不超过 24GB 内存。
同时DN 的 `innodb_buffer_pool_size` 将设置为 `0.3*8192 MB`,最终取整为 2560MB。
2. 如果要修改 my.cnf待容器启动后先暂停容器的运行
```shell
docker stop polardb-x
```
3. 找到本地挂载的目录
```shell
docker volume inspect polardbx-data
```
通过上述指令找到 `Mountpoint`,进入该目录,修改其中的 `my.cnf` 然后保存
4. 最后再重新启动容器
```shell
docker start polardb-x
```
## 场景3. 基于 polardbx-sql 进行开发
polardbx-engine即 DN 是 MySQL 8.x 的一个分支,可参考 MySQL 官方文档进行相关开发工作。
本文主要讲解如何用 IntelliJ IDEA + PolarDB-X Docker 镜像搭建 polardbx-sql即 CN 开发环境。
### 启动 DN&GMS 容器
CN 的运行依赖DN和GMSGMS可以看做一个扮演特殊角色的DN所以在进行CN开发时可用一个容器同时扮演DN和GMS的角色。运行这样一个容器的命令如下
```shell
docker run -d --name polardb-x --env mode=dev -p 4886:4886 -p 34886:34886 -v /etc/localtime:/etc/localtime -v polardb-x-data:/home/polarx/polardbx/build/run/polardbx-engine/data polardbx/polardb-x
```
该命令会启动一个名叫 polardb-x 的容器,通过环境变量 `mode` 设置容器运行模式为开发模式(即 `mode=dev`)并将 MySQL 协议端口和私有协议端口暴露出来以供 CN 使用。
数据卷映射可以将数据保存下来,以便后续使用。
`mode` 所有取值见最后一个小节。
之后开始配置 CN 相关的内容。
### 配置 server.properties
首先修改代码中 polardbx-server/src/main/resources/server.properties 文件:
1. 将`serverPort`改为 `8527`
2. 将`metaDbAddr` 改为 `127.0.0.1:4886`
3. 将`metaDbXprotoPort` 改为 `34886`
4. 将`galaxyXProtocol` 改为 `2`
5. 执行以下命令以获取`metaDbPasswd``docker exec polardb-x bash -c 'mysql -h127.1 -P4886 -uroot -D polardbx_meta_db_polardbx -e "select passwd_enc from storage_info where inst_kind=2"'`
6. 增加`metaDbPasswd=<查询到的密码>` 。
然后开始配置 IntelliJ IDEA 相关参数。
### 配置 IntelliJ IDEA
设置环境变量Environment Variables `dnPasswordKey=asdf1234ghjk5678`
由于 CN 编译过程比较耗内存,所以需要同时设置以下两个编译参数:
6. Preference-Compiler-Build process heap size 设置为 4096.
7. Preference-Build, Execution, -Build tools - maven - importing - VM options for importer 设置为 -Xmx2048m -Xms2048m.
至此 CN 的运行环境便配置好了,之后可以启动 `TddlLauncher` 进行相关开发和调试。
### 远程部署 DN本地开发 CN
当然,上述过程也可以在远程机器上部署 docker 容器来运行 polardbx-engine
对外开放相应端口488634886然后
1. 修改本地的 resources/server.properties 文件中的相应 ip
把 127.0.0.1 改成远程机器 ip。
2. 登录远程机器,执行 `docker exec -it polardb-x bash` 登进 DN 容器。
3. 修改 storage_info 的 ip 为远程机器 ip`mysql -h127.1 -P4886 -uroot -Dpolardbx_meta_db_polardbx -e "update storage_info set ip='<远程机器 ip>'"`。
4. 本地启动 `TddlLauncher` 即可。
## 附录
### 1. mode 取值及含义
| mode 取值 | 含义 |
|----------|------------------------------------------------|
| play | 默认值,即体验模式,该模式会初始化并启动一个完整的 PolarDB-X 实例 |
| dev | 开发模式该模式会在容器内部初始化并启动一个DN进程该进程同时会扮演GMS角色 |
| dev-dist | (尚未支持)分布式开发模式部分特性需要在多DN的场景下进行开发和测试此时可以启动多个DN |
| cn | (尚未支持)cn模式用于生产环境该容器内仅启动一个 CN 进程 |
| dn | (尚未支持)dn模式用于生产环境该容器内仅启动一个 DN 进程 |
| gms | (尚未支持)gms模式用于生产环境该容器内仅启动一个 GMS 进程 |
| cdc | (尚未支持)cdc模式用于生产环境该容器内仅启动一个 CDC 进程 |
### 2. docker build
```shell
git clone https://github.com/polardb/polardbx.git
make
cd docker && sh image-build.sh /home/polarx/polardbx/build
```

152
docker/entrypoint.sh Executable file
View File

@ -0,0 +1,152 @@
#!/bin/bash
# Copyright 2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source /etc/profile
sudo chown -R polarx:polarx $BUILD_PATH
RUN_PATH=$1
POLARDBX_SQL_HOME="$RUN_PATH"/polardbx-sql
POLARDBX_CDC_HOME="$RUN_PATH"/polardbx-cdc/polardbx-binlog.standalone
if [ x"$mode" = "x" ]; then
mode="play"
fi
function cn_pid() {
ps auxf | grep java | grep TddlLauncher | cut -d ' ' -f 1
}
function cdc_pid() {
ps auxf | grep java | grep DaemonBootstrap | cut -d ' ' -f 1
}
function dn_pid() {
ps aux | grep mysqld | grep -v "grep" | awk '{print $2}'
}
function get_pid() {
if [ x"$mode" = x"play" ]; then
cn_pid
elif [ x"$mode" = x"dev" ]; then
dn_pid
else
echo "mode=$mode does not support yet."
echo ""
fi
}
function stop_all() {
polardb-x.sh stop
rm -f $POLARDBX_SQL_HOME/bin/*.pid
rm -f $POLARDBX_CDC_HOME/bin/*.pid
}
function start_polardb_x() {
echo "start polardb-x"
polardb-x.sh start
}
function start_gms_and_dn() {
echo "start gms and dn"
polardb-x.sh start_dn
}
function start_process() {
echo "start with mode=$mode"
if [ x"$mode" = x"play" ]; then
start_polardb_x
elif [ x"$mode" = x"dev" ]; then
start_gms_and_dn
else
echo "mode=$mode does not support yet."
fi
}
last_pid=0
function report_pid() {
pid=$(get_pid)
if [ -z "$pid" ]; then
echo "Process dead. Exit."
last_pid=0
return 1
else
if [[ $pid -ne $last_pid ]]; then
echo "Process alive: " "$pid"
fi
last_pid=pid
fi
return 0
}
function watch() {
while report_pid; do
sleep 5
done
}
function start() {
# Start
stop_all
start_process
}
function waitterm() {
local PID
# any process to block
tail -f /dev/null &
PID="$!"
# setup trap, could do nothing, or just kill the blocker
trap "kill -TERM ${PID}" TERM INT
# wait for signal, ignore wait exit code
wait "${PID}" || true
# clear trap
trap - TERM INT
# wait blocker, ignore blocker exit code
wait "${PID}" 2>/dev/null || true
}
# Retry start and watch
retry_interval=30
retry_cnt=0
retry_limit=10
if [[ "$#" -ge 2 ]]; then
retry_limit=$2
fi
while [[ $retry_cnt -lt $retry_limit ]]; do
start
if report_pid; then
break
fi
((retry_cnt++))
if [[ $retry_cnt -lt $retry_limit ]]; then
sleep $retry_interval
fi
done
waitterm
stop_all
# Abort.
exit 1

16
docker/image-build.sh Normal file
View File

@ -0,0 +1,16 @@
set -eux
echo "Make sure you have already run make successfully"
build_path=$1
rm -rf tmp && mkdir -p tmp
cp -f entrypoint.sh tmp/
cp -f Dockerfile tmp/
old_pwd=$(pwd)
cd "$build_path"
tar -zcf run.tar.gz run
mv run.tar.gz "$old_pwd"/tmp/run.tar.gz
cd "$old_pwd"/tmp
sudo docker build -t all-in-one --network host . -f Dockerfile --build-arg BUILD_PATH="$build_path"
cd "$old_pwd" && rm -rf tmp

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB