replies log MYSQLl server - mysql

MYQSL rep.
I don't know why my MYSQL is not working
DATA CONF:
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
port = 3306
basedir = /usr
lc-messages-dir =/usr/share/mysql
skip-external-locking//
gtid_mode = ON
enforce-gtid_consistency
log-slave-updates
binlog_format = mixed
CHANGE MASTER TOMASTER_HOST='',MASTER_PORT=3306,MASTER_USER='',MASTER_PASSWORD='',
MASTER_AUTO_POSITION=1,GET_MASTER_PUBLIC_KEY=1

Related

Mysql query error: Fail to read any response from the server, the underlying connection might get lost unexpectedly

I use vertx java to build an app and writing tests for mysql queries. I can successful get results before but after I modified my.cnf because of other stuffs. I cannot no long get returned results but "Fail to read any response from the server, the underlying connection might get lost unexpectedly". I searched google, but I cannot figure out exactly where is wrong and how to fix it.
I would post my.cnf file:
[client]
port = 3306
socket = /tmp/mysql.sock
# This was formally known as [safe_mysqld]. Both versions are currently parsed.
[mysqld_safe]
socket = /tmp/mysql.sock
nice = 0
syslog
[mysqld]
# Basic Settings
port = 3306
user = root
pid-file = /usr/local/mysql/data/mysqld.local.pid
socket = /tmp/mysql.sock
port = 3306
basedir = /usr/local/mysql/
datadir = /usr/local/mysql/data
tmpdir = /tmp
lc-messages-dir = /usr/local/mysql/share
skip-external-locking
# Engine
default-storage-engine = InnoDB
# Listening IP
bind-address = 0.0.0.0
# Safety
max-connect-errors = 1000000
max_allowed_packet = 64M
skip-name-resolve
sysdate-is-now = 1
innodb = FORCE
innodb-strict-mode = 1
tls_version = TLSv1,TLSv1.1,TLSv1.2
wait_timeout = 28800
interactive_timeout = 10000
# Buffers
sort_buffer_size = 4M
read_buffer_size = 2M
join_buffer_size = 8M
read_rnd_buffer_size = 16M
# MyISAM
key-buffer-size = 32M
# CACHES AND LIMITS #
tmp-table-size = 128M
max-heap-table-size = 128M
# Bin logs
binlog-format = ROW
log_bin = /tmp/binlog
max_binlog_size = 100M
server-id = 1 # randomize it incase of multiple servers
# InnoDB
innodb-buffer-pool-size = 2048M
innodb_buffer_pool_instances = 8
innodb_log_buffer_size = 8M
innodb-log-files-in-group = 2
innodb-log-file-size = 256M
innodb-file-per-table = 1
innodb-flush-log-at-trx-commit = 1
innodb-flush-method = O_DIRECT
# With virtual synchrony redundancy, make write queries faster
innodb_doublewrite = 1
# LOGGING
general_log_file = /usr/local/mysql/mysql.log
log-error = /usr/local/mysql/data/mysqld.local.err
log-queries-not-using-indexes = 1
[mysqldump]
quick
quote-names
max_allowed_packet = 64M
[isamchk]
key_buffer = 16M
#
# * IMPORTANT: Additional settings that can override those from this file!
# The files must end with '.cnf', otherwise they'll be ignored.
#
Can you tell me what's wrong with the configuration and what should I change? Thanks

tokudb insert speed with binlog on way slower than innodb?

TokuDB alone beat InnoDB in our benchmarks by about 25% but when I turn master-slave on InnoDB now beats TokuDB by about 20%
Any idea what's going on
here's the conf that's running on a r4.8xlarge aws machine
[mysql_safe]
malloc-lib=/usr/include/jemalloc
[mysqld]
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
server-id = 1
log_bin = /var/log/mysql/mysql-bin.log
binlog_do_db = mt
sync_binlog=1
binlog_format=ROW
binlog_row_image=FULL
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
explicit_defaults_for_timestamp
#malloc-lib=/usr/include/jemalloc
key_buffer_size= 250G
read_buffer_size=2G
read_rnd_buffer_size=50M
join_buffer_size=25M
tmp_table_size = 5G
sort_buffer_size = 2G
query_cache_limit = 10M
query_cache_size = 100M
innodb_buffer_pool_instances=64
tokudb_fanout = 128
tokudb_commit_sync = 0
tokudb_fsync_log_period = 1000
tokudb_directio = 1
# Instead of skip-networking the default is now to listen only on
# localhost which is more compatible and is not less secure.
innodb-flush-method = O_DIRECT
innodb-log-files-in-group = 2
innodb-log-file-size = 512M
innodb-flush-log-at-trx-commit = 2
innodb-file-per-table = 1
innodb-buffer-pool-size = 200G
log-error = /var/log/mysql/error.log
# Recommended in standard MySQL setup
sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_ALL_TABLES
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0

Import large table in mysql

I'm doing a import in mysql with this bash script:
ddl="set names utf8; "
ddl="$ddl set global net_buffer_length=1000000;"
ddl="$ddl set global net_write_timeout=1000000;"
ddl="$ddl set global max_allowed_packet=1000000000; "
ddl="$ddl set global key_buffer_size=1000000000; "
ddl="$ddl set global connect_timeout=100000; "
ddl="$ddl set global wait_timeout=100000; "
ddl="$ddl SET foreign_key_checks = 0; "
ddl="$ddl SET UNIQUE_CHECKS = 0; "
ddl="$ddl SET AUTOCOMMIT = 0; "
ddl="$ddl USE ${database}; "
ddl="$ddl source $reducedfile; "
ddl="$ddl SET foreign_key_checks = 1; "
ddl="$ddl SET UNIQUE_CHECKS = 1; "
ddl="$ddl SET AUTOCOMMIT = 1; "
ddl="$ddl COMMIT ; "
echo "Import started"
time mysql -h 127.0.0.1 -u root -proot -e "$ddl"
I have table which contains 15 columns, no indexes (only a primary key) and around 350k records. a simple log table.. the info is very basic just ints and dates.
When i try to import this table i'm getting the famous message 'Lost connection to MySQL server during query'. Does anybody know where i can find the error why mysql is aborting? there is nothing useful in /var/log/mysql/error.log only that it's restarting.
When i reduce the amount of records to 20 it imports just fine. Not sure where to look now to find out whats the problem.. the table is to large for my config is the only conclusion for now but i've set all the params before import and this is my.cnf
[mysqld]
max_connections = 1000
#
# * Basic Settings
#
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
#
# Instead of skip-networking the default is now to listen only on
# localhost which is more compatible and is not less secure.
bind-address = 127.0.0.1
#
# * Fine Tuning
#
key_buffer = 16M
max_allowed_packet = 256M
thread_stack = 192K
thread_cache_size = 8
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched
myisam-recover = BACKUP
#max_connections = 100
#table_cache = 64
#thread_concurrency = 10
#
# * Query Cache Configuration
#
query_cache_limit = 1M
query_cache_size = 16M

24 cores and MYSQL is using 1 on INSERT

Hi my server have 24 cores and 32GB of memory.
Am doing multiple "INSERT INTO SELECT" of 50 millions row at a time.
This takes about 15h a query but it is ticking along at 100% of only one CPU, I'am trying to get mySQL(5.5)(InnoDB) to use more of the resources.
I have read multiple threads about it, but I do not get it to work.
Most info is about adding innodb_thread_concurrency = 0
But I still get no results.
port = 3306
socket = /var/run/mysqld/mysqld.sock
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /media/ssd/db
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
innodb_buffer_pool_size=26G
innodb_thread_concurrency = 0
bind-address = 127.0.0.1
#
# * Fine Tuning
#
key_buffer = 1000M
max_allowed_packet = 160M
thread_stack = 192K
thread_cache_size = 8
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched
myisam-recover = BACKUP
table_cache = 800
query_cache_limit = 5000M
query_cache_size = 1600M
join_buffer_size = 1000M
log_error = /var/log/mysql/error.log
# Here you can see queries with especially long duration
log_slow_queries = /var/log/mysql/mysql-slow.log
long_query_time = 2
Try this parameters:
innodb_io_capacity=5000 (or even 20000 depending on your IO subsystem)
innodb_buffer_pool_size=4G (for example)
innodb_log_file_size=1G
innodb_write_io_threads = 64
innodb_read_io_threads = 64
innodb_thread_concurrency = 0

Django, nginx and uWSGI caching results until uWSGI/MySQL restart

I've written a server app in Django and serve an API to a mobile app with Tastypie and serving the DB with a local MySQL server.
It seems like queries are cached until the process is killed or ended. If I create a new user in the backend it will first appear in the list if I restart uWSGI or MySQL or if I log into the backend from a different browser.
Mysql process list
41 example localhost:58747 example 13 Sleep
42 example localhost:58748 example 16 Sleep
Also if I kill the processes which are Sleep'ed it will also trigger a refresh of the data.
uWSGI config
[uwsgi]
vhost = true
plugins = python
socket = /tmp/example.com.sock
master = true
enable-threads = true
processes = 2
wsgi-file = /var/sites/example-server/example/example/wsgi.py
virtualenv = /var/sites/example-server/PYTHON_ENV
chdir = /var/sites/example-server/example
touch-reload = /var/sites/example-server/example/reload
nginx config
server {
client_max_body_size 20M;
listen 80;
server_name example.com;
access_log /var/log/nginx/example.com_access.log;
error_log /var/log/nginx/example.com_error.log;
location / {
uwsgi_pass unix:///tmp/example.com.sock;
include uwsgi_params;
}
location /media/ {
alias /var/sites/example-server/example/example/media/;
}
location /static/ {
alias /var/sites/example-server/example/example/static/;
}
}
my.cnf
[client]
port = 3306
socket = /var/run/mysqld/mysqld.sock
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
bind-address = 127.0.0.1
key_buffer = 16M
max_allowed_packet = 16M
thread_stack = 192K
thread_cache_size = 8
myisam-recover = BACKUP
query_cache_limit = 1M
query_cache_size = 16M
log_error = /var/log/mysql/error.log
expire_logs_days = 10
max_binlog_size = 100M
mysqldump]
quick
quote-names
max_allowed_packet = 16M
[mysql]
[isamchk]
key_buffer = 16M
transaction-isolation = READ-COMMITTED
!includedir /etc/mysql/conf.d/
What can I do to make this problem go away?
Cheers
Morten
I had the same behavior and found this post https://plus.google.com/u/0/101898908470597791359/posts/AuMJdgEo93k
Adding this line on settings.py (only the OPTIONS key) on Django:
DATABASES = {
'default': {
'OPTIONS': { "init_command": "SET storage_engine=INNODB, SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED", }
}
}
seems to have resolved the problem.