user#host:~# mysql -V - mysql Ver 14.14 Distrib 5.7.25-28, for debian-linux-gnu (x86_64) using 7.0 running under debian-9,9
user#host:~# uname -a - Linux 4.9.0-8-amd64 #1 SMP Debian 4.9.144-3.1 (2019-02-19) x86_64 GNU/Linux
user#host:~# perl -MDBI -e 'print $DBI::VERSION ."\n";' - 1.636
user#host:~# perl -v This is perl 5, version 24, subversion 1 (v5.24.1) built for x86_64-linux-gnu-thread-multi
mysql> SHOW CREATE TABLE tbl1;
tbl1 | CREATE TABLE `tbl1` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`main_id` bigint(20) NOT NULL DEFAULT '0',
`debet` varchar(255) NOT NULL DEFAULT '',
`kurs` double(20,4) NOT NULL DEFAULT '0.0000',
`summ` double(20,2) NOT NULL DEFAULT '0.00',
`is_sync` int(11) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`),
KEY `main_id` (`main_id`)
) ENGINE=InnoDB AUTO_INCREMENT=70013000018275 DEFAULT CHARSET=utf8
mysql> SELECT * FROM tbl1 WHERE id=70003020040132;
-+---------------+----------------+-------+--------+---------+---------+
| id | main_id | debet | kurs | summ | is_sync |
+----------------+----------------+-------+--------+---------+---------+
| 70003020040132 | 70003020038511 | | 0.0000 | 1798.00 | 0 |
+----------------+----------------+-------+--------+---------+---------+
But when I get this data by perl::DBI module I lose precisions, and values 0.0000 and 1798.00 becomes 0 and 1798.
Code is next:
####
#These 3 subs are connecting to DB, executing query and get data by fetchall_arrayref and coverting undef to NULL.
####
sub DB_connect {
# DataBase Handler
my $dbh = DBI->connect("DBI:mysql:$DBNAME", $DBUSER, $DBPWD,{RaiseError => 0, PrintError => 0, mysql_enable_utf8 => 1}) or die "Error connecting to database: $DBI::errstr";
return $dbh;
}
sub DB_executeQuery {
# Executes SQL query. Return reference to array, or array, according to argv[0]
# argv[0] - "A" returns array, "R" - reference to array
# argv[1] - DB handler from DB_connect
# argv[2] - query to execute
my $choice=shift #_;
my $dbh=shift #_;
my $query=shift #_;
print "$query\n" if $DEBUG>2;
my $sth=$dbh->prepare($query) or die "Error preparing $query for execution: $DBI::errstr";
$sth->execute;
my $retval = $sth->fetchall_arrayref;
if ($choice eq "A" ) {
my #ret_arr=();
foreach my $value (#{ $retval }) {
push #ret_arr,#{ $value };
}
return #ret_arr;
}
elsif ($choice eq "R") {
return $retval;
}
}
sub undef2null {
# argv[1] - reference ro array of values where undef
# values has to be changed to NULL
# Returns array of prepared values: (...) (...) ...
my $ref=shift #_;
my #array=();
foreach my $row (#{ $ref }) {
my $str="";
foreach my $val ( #{ $row} ) {
if (! defined ( $val )) {
$str="$str, NULL";
}
else {
# Escape quotes and other symbols listed in square brackets
$val =~ s/([\"\'])/\\$1/g;
$str="$str, \'$val\'";
}
}
# Remove ', ' at the beginning of each VALUES substring
$str=substr($str,2);
push #array,"($str)";
} # End foreach my $row (#{ $ref_values })
return #array;
} # End undef2null
#### Main call
#...
# Somewhere in code I get data from DB and print it to out file
my #arr_values=();
my #arr_col_names=DB_executeQuery("A",$dbh,qq(SELECT column_name FROM `information_schema`.`columns` WHERE `table_schema` = '$DBNAME' AND `table_name` = '#{ $table }'));
#arr_ids=DB_executeQuery("A",$dbh,qq(SELECT `id` FROM `#{ $table }` WHERE `is_sync`=0));
my $ref_values=DB_executeQuery("R",$dbh,"SELECT * FROM \`#{ $table }\` WHERE \`id\` IN(".join(",",#arr_ids).")");
#arr_values=undef2null($ref_values);
print FOUT "REPLACE INTO \`#{ $table }\` (`".join("`, `",#arr_col_names)."`) VALUES ".(join ", ",#arr_values).";\n";
and as a result I get next string:
REPLACE INTO `pko_plat` (`id`, `main_id`, `debet`, `kurs`, `summ`, `is_sync`) VALUES ('70003020040132', '70003020038511', '', '0', '1798', '0')
in DB it was 0.0000 - become 0, was 1798.00, become 1798
Perl's DBI documentation says it gets data 'as is' into strings, no translations made. But, then, who rounded values?
The rounding you see is happening because of the way you create the columns.
`kurs` double(20,4) NOT NULL DEFAULT '0.0000'
`summ` double(20,2) NOT NULL DEFAULT '0.00'
If you look at the mysql floating point type documentation you will see that you are using the non-standard syntax double(m, d), where the two parameters define how the float is being output.
So in your case the values stored in summ will be displayed with 2 digits behind the point. This means that when perl gets a value out of the table which is 1.0001 in the database, the value that perl gets delivered by the database is rounded to the set number of digits (in this case .00).
Perl in turn interprets this value ("1.00") as a float, and when printed, will not show any trailing zeroes. If you want these you should accommodate for this in your output.
For example: print sprintf("%.2f\n", $summ);
The way I see it you have two ways you can go (if you want to avoid this loss of precision):
Only add numbers with the correct precision to the database (so for 'summ' only two trailing digits, and four for 'kurs'.)
Alter your table creation to the standard syntax for floats and determine the output formatting in Perl (which you will be doing either way):
`kurs` double() NOT NULL DEFAULT '0.0'
Related
Using PowerShell 4.0 and SQL-Server, I want to merge records from one database to another. I use export-csv and import-csv. I export from one database to a csv file, and import from the csv file to a temporary table in another database, for subsequent MERGE.
TableA is
([ID] int not null,
[Name] varchar(25) not null,
[StartDate] datetime null,
[Department] varchar(25) not null)
Values are ID=1, Name=Joe, StartDate=NULL, Department=Sales
exportTable.ps1 (Ignoring database config)
Invoke Sqlcmd ("SELECT FROM TableA WHERE ID=1") | Export-Csv -path a.csv -NoTypeInformation -Append
This results in a.csv
"ID","Name","StartDate","Department"
"1","Joe","","Sales"
import Table.ps1
CreateTable TableATemporary
([ID] int not null,
[Name] varchar(25) not null,
[StartDate] datetime null)
Import-Csv a.csv | ForEach-Object {
$allValues = "'"+($_.Psobject.Properties.Value -join "','") + "'"
Invoke Sqlcmd ("INSERT INTO TableATemporary VALUES $allValues")
This gives a table of
Values are ID=1, Name=Joe, StartDate=1900-01-01 00:00:00:000, Department=Sales
Rather than a null entry, the datetime field is a default value because the field in the csv file is ""
Is there any way for the Export-Csv cmdlet to write nothing to the csv file for the empty fields in the database, instead of "" ?
Import-Csv always returns blank strings, but there are plenty of ways to set those values to $null if they're empty. For example, here I check for blank values before joining them:
Import-Csv a.csv | ForEach-Object {
# convert empty strings to null
$allValues = '"' + (($_.Psobject.Properties.Value | ForEach-Object {
if($_){"'$_'"} else{''} }) -join ',') + '"'
Invoke-Sqlcmd ("INSERT INTO TableATemporary VALUES $allValues")
}
Now it no longer sets empty strings in $allvalues:
"'1','Joe',,'Sales'"
I recommend using Write-SqlTableData for importing rather than running sqlcmd for each row, but it's just an efficiency thing.
Cpt.Whale's helpful answer shows how to unconditionally represent empty-string field values without embedded quoting ('') [update: should be NULL] in the argument list constructed for the SQL statement.
If you want explicit control over which fields should act this way, you can try the following (simplified example):
[pscustomobject] #{ ID = '1'; Name = 'Joe'; StartDate = ''; Department = '' } |
ForEach-Object {
$_.psobject.Properties.ForEach({
$quoteChar = "'"
$name, $value = $_.Name, $_.Value
# Determine whether to use (unquoted) NULL if the value is the empty string.
switch ($name) {
'StartDate' { if ($value -eq '') { $quoteChar = ''; $value = 'NULL' } }
# Handle other fields here, as needed.
}
'{0}{1}{0}' -f $quoteChar, $value, $quoteChar
}) -join ','
}
Output (note how the empty-string for StartDate resulted in unquoted NULL, whereas the one for Department is the quoted empty string):
'1','Joe',NULL,''
Note:
Ideally, whether to quote or not should be guided by the data types of the values, but CSV data is by definition all string-typed.
You can implement a data-type mapping of sorts by extending the switch statement above, so that, say, numeric fields are always used unquoted (e.g., as a switch branch for a numeric Age field: 'Age' { $quoteChar = '' }).
I am trying to use FullCalendar v4 and cannot post data to MySQL. I have narrowed the problem down to $start and $end having UTC on the end and MySQL won't take it even though my datatype is TIMESTAMP. If I manually assign standard datetime data (without UTC) to $start and $end it will post to table. I have the statements commented out in the event.php that work, by overriding the data in the $POST.
My thought is I have something askew in MySQL that is causing the TIMESTAMP datatype to actually be a DATETIME datatype. I have deleted and created the table with SQL Statement shown below.
Running -> MySQL 8.01.5, Windows Server 2016, PHP 7.2.7, using jQuery
...
CREATE TABLE calendarsystem.events (
id int(11) NOT NULL AUTO_INCREMENT,
title VARCHAR(45) NOT NULL ,
start TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
end TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
resourceId VARCHAR(45) NOT NULL ,
PRIMARY KEY (id)
);
...
The code to add_event.php:
<?php
$title = $_POST['title'];
$start = $_POST['start'];
$end = $_POST['end'];
$resourceId = $_POST['resourceId'];
//$title = 'wtf';
//$start = '2019-03-25 16:00:00';
//$end = '2019-03-25T17:00:00';
//$resourceId = 'b';
try {
require "db_config.php";
} catch(Exception $e) {
exit('Unable to connect to database.');
}
$sql = "INSERT INTO events (title, start, end, resourceId) VALUES (:title, :start, :end, :resourceId )";
$q = $bdd->prepare($sql);
q->execute(array(':title'=>$title,':start'=>$start,':end'=>$end,':resourceId'=>$resourceId));
?>
...
If I open MySQL Workbench and try to add the data with the UTC copied from the output window of chrome I get the following error when applying:
Operation failed: There was an error while applying the SQL script to the database.
Executing:
INSERT INTO calendarsystem.events (start, end, title, resourceId) VALUES ('2019-03-25T14:00:00-05:00', '2019-03-25T15:00:00-05:00', 'xxx', 'b');
ERROR 1292: 1292: Incorrect datetime value: '2019-03-25T14:00:00-05:00' for column 'start' at row 1
SQL Statement:
INSERT INTO calendarsystem.events (start, end, title, resourceId) VALUES ('2019-03-25T14:00:00-05:00', '2019-03-25T15:00:00-05:00', 'xxx', 'b')
Sorry the post formatting is crappy
I think MySQL isn't recognizing the 'T' character or the trailing time offset in the string value.
'2019-03-25T15:00:00-05:00'
^ ^^^^^^
One way to fix the problem would be to remove that T character and the offset
'2019-03-25 15:00:00'
^
We expect MySQL to recognize a string in that format.
Alternatively, we could use STR_TO_DATE function with appropriate format model so MySQL can interpret datetime/timestamp values from strings in different formats.
The relevant topic in the MySQL Referenced Manual is here:
https://dev.mysql.com/doc/refman/8.0/en/date-and-time-literals.html
Consider the following table:
CREATE TABLE t1 (f1 VARCHAR(255));
Then, be it ruby:
#!/usr/bin/env ruby
require 'json'
require 'sequel'
require 'mysql2'
DB = Sequel.connect(
:adapter => 'mysql2',
:database => 'd1',
:user => '<user>',
:password => '<password>',
:encoding => 'utf8')
v1 = '{"a":"b\ud83c\udf4ec"}'
v2 = JSON.parse(v1)
p v2['a']
DB[:t1].truncate
DB[:t1].insert(f1: v2['a']);
p DB[:t1].first[:f1]
or php:
#!/usr/bin/env php
<?php
$dbh = new PDO('mysql:dbname=d1', '<user>', '<password>', [
PDO::MYSQL_ATTR_INIT_COMMAND => 'SET NAMES utf8',
PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION,
]);
$dbh->exec('TRUNCATE TABLE t1');
$v1 = '{"a":"b\ud83c\udf4ec"}';
$v2 = json_decode($v1);
var_dump($v2->a);
$sth = $dbh->prepare("INSERT INTO t1 VALUES (?)");
$sth->execute([$v2->a]);
$sth = $dbh->query("SELECT * FROM t1");
var_dump($sth->fetch()['f1']);
what gets in the database is b. I'm running mysql-5.1 and the documentation says:
MySQL 5.1 supports two character sets for storing Unicode data:
ucs2, the UCS-2 encoding of the Unicode character set using 16 bits per character.
utf8, a UTF-8 encoding of the Unicode character set using one to three bytes per character.
These two character sets support the characters from the Basic Multilingual Plane (BMP) of Unicode Version 3.0. BMP characters have these characteristics:
Their code values are between 0 and 65535 (or U+0000 .. U+FFFF).
What am I doing wrong?
UPD
$ mysql -BNe 'SHOW CREATE TABLE t1' d1
t1 CREATE TABLE `t1` (\n `f1` varchar(255) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8
It appears those two escape sequences represent only one character: RED APPLE (U+1F34E). The first one being a surrogate. And surrogates are:
The UCS uses surrogates to address characters outside the initial Basic Multilingual Plane without resorting to more than 16 bit byte representations.
So that must be it, the resulting character is outside the BMP. And is not supported by mysql's utf8 character set as such.
in my MySQL 5.1 (from debian) doing
CREATE TABLE t1 (f1 VARCHAR(255));
is effectively creating a LATIN1 table :
mysql> show CREATE TABLE t1 ;
+-------+---------------------------------------------------------------------------------------------+
| Table | Create Table |
+-------+---------------------------------------------------------------------------------------------+
| t1 | CREATE TABLE `t1` (
`f1` varchar(255) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1 |
+-------+---------------------------------------------------------------------------------------------+
So please check first that your MySQL really defaults to UTF-8.
Then, MySQL is known to NOT be able to store every character from BMP table. I don't find references about that, but saw it earlier.
So much that from mysql 5.5.3 introduced a new utf8mb4 full-unicode support character set as statu As stated here : https://dev.mysql.com/doc/refman/5.5/en/charset-unicode-upgrading.html
Finally, even if BMP is saying they are between 0 and 0xFFFF it doesn't mean they are using all of this space as stated here : https://en.wikipedia.org/wiki/Plane_%28Unicode%29#Basic_Multilingual_Plane
original MySQl Tbl_driver
delimiter $$
CREATE TABLE `tbl_driver` (
`_id` int(11) NOT NULL AUTO_INCREMENT,
`Driver_Code` varchar(45) NOT NULL,
`Driver_Name` varchar(45) NOT NULL,
`AddBy_ID` int(11) NOT NULL,
PRIMARY KEY (`_id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1$$
mysql2sqlite.sh
#!/bin/sh
# Converts a mysqldump file into a Sqlite 3 compatible file. It also extracts the MySQL `KEY xxxxx` from the
# CREATE block and create them in separate commands _after_ all the INSERTs.
# Awk is choosen because it's fast and portable. You can use gawk, original awk or even the lightning fast mawk.
# The mysqldump file is traversed only once.
# Usage: $ ./mysql2sqlite mysqldump-opts db-name | sqlite3 database.sqlite
# Example: $ ./mysql2sqlite --no-data -u root -pMySecretPassWord myDbase | sqlite3 database.sqlite
# Thanks to and #artemyk and #gkuenning for their nice tweaks.
mysqldump --compatible=ansi --skip-extended-insert --compact "$#" | \
awk '
BEGIN {
FS=",$"
print "PRAGMA synchronous = OFF;"
print "PRAGMA journal_mode = MEMORY;"
print "BEGIN TRANSACTION;"
}
# CREATE TRIGGER statements have funny commenting. Remember we are in trigger.
/^\/\*.*CREATE.*TRIGGER/ {
gsub( /^.*TRIGGER/, "CREATE TRIGGER" )
print
inTrigger = 1
next
}
# The end of CREATE TRIGGER has a stray comment terminator
/END \*\/;;/ { gsub( /\*\//, "" ); print; inTrigger = 0; next }
# The rest of triggers just get passed through
inTrigger != 0 { print; next }
# Skip other comments
/^\/\*/ { next }
# Print all `INSERT` lines. The single quotes are protected by another single quote.
/INSERT/ {
gsub( /\\\047/, "\047\047" )
gsub(/\\n/, "\n")
gsub(/\\r/, "\r")
gsub(/\\"/, "\"")
gsub(/\\\\/, "\\")
gsub(/\\\032/, "\032")
print
next
}
# Print the `CREATE` line as is and capture the table name.
/^CREATE/ {
print
if ( match( $0, /\"[^\"]+/ ) ) tableName = substr( $0, RSTART+1, RLENGTH-1 )
}
# Replace `FULLTEXT KEY` or any other `XXXXX KEY` except PRIMARY by `KEY`
/^ [^"]+KEY/ && !/^ PRIMARY KEY/ { gsub( /.+KEY/, " KEY" ) }
# Get rid of field lengths in KEY lines
/ KEY/ { gsub(/\([0-9]+\)/, "") }
# Print all fields definition lines except the `KEY` lines.
/^ / && !/^( KEY|\);)/ {
gsub( /AUTO_INCREMENT|auto_increment/, "" )
gsub( /(CHARACTER SET|character set) [^ ]+ /, "" )
gsub( /DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP|default current_timestamp on update current_timestamp/, "" )
gsub( /(COLLATE|collate) [^ ]+ /, "" )
gsub(/(ENUM|enum)[^)]+\)/, "text ")
gsub(/(SET|set)\([^)]+\)/, "text ")
gsub(/UNSIGNED|unsigned/, "")
if (prev) print prev ","
prev = $1
}
# `KEY` lines are extracted from the `CREATE` block and stored in array for later print
# in a separate `CREATE KEY` command. The index name is prefixed by the table name to
# avoid a sqlite error for duplicate index name.
/^( KEY|\);)/ {
if (prev) print prev
prev=""
if ($0 == ");"){
print
} else {
if ( match( $0, /\"[^"]+/ ) ) indexName = substr( $0, RSTART+1, RLENGTH-1 )
if ( match( $0, /\([^()]+/ ) ) indexKey = substr( $0, RSTART+1, RLENGTH-1 )
key[tableName]=key[tableName] "CREATE INDEX \"" tableName "_" indexName "\" ON \"" tableName "\" (" indexKey ");\n"
}
}
# Print all `KEY` creation lines.
END {
for (table in key) printf key[table]
print "END TRANSACTION;"
}
'
exit 0
when execute this script, my sqlite database become like this
Sqlite Tbl_Driver
CREATE TABLE "tbl_driver" (
"_id" int(11) NOT NULL ,
"Driver_Code" varchar(45) NOT NULL,
"Driver_Name" varchar(45) NOT NULL,
"AddBy_ID" int(11) NOT NULL,
PRIMARY KEY ("_id")
)
i want to change "_id" int(11) NOT NULL ,
become like this "_id" int(11) NOT NULL PRIMARY KEY AUTO_INCREMENT,
or
become like this "_id" int(11) NOT NULL AUTO_INCREMENT,
with out primary key also can
any idea to modify this script?
The AUTO_INCREMENT keyword is specific to MySQL.
SQLite has a keyword AUTOINCREMENT (without the underscore) which means the column auto-generates monotonically increasing values that have never been used before in the table.
If you leave out the AUTOINCREMENT keyword (as the script you show does currently), SQLite assigns the ROWID to a new row, which means it will be a value 1 greater than the current greatest ROWID in the table. This could re-use values if you delete rows from the high end of the table and then insert new rows.
See http://www.sqlite.org/autoinc.html for more details.
If you want to modify this script to add the AUTOINCREMENT keyword, it looks like you could modify this line:
gsub( /AUTO_INCREMENT|auto_increment/, "" )
To this:
gsub( /AUTO_INCREMENT|auto_increment/, "AUTOINCREMENT" )
Re your comments:
Okay I tried it on a dummy table using sqlite3.
sqlite> create table foo (
i int autoincrement,
primary key (i)
);
Error: near "autoincrement": syntax error
Apparently SQLite requires that autoincrement follow a column-level primary key constraint. It's not happy with the MySQL convention of putting the pk constraint at the end, as a table-level constraint. That's supported by the syntax diagrams in the SQLite documentation for CREATE TABLE.
Let's try putting primary key before autoincrement.
sqlite> create table foo (
i int primary key autoincrement
);
Error: AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY
And apparently SQLite doesn't like "INT", it prefers "INTEGER":
sqlite> create table foo (
i integer primary key autoincrement
);
sqlite>
Success!
So your awk script is not able to translate MySQL table DDL into SQLite as easily as you thought it would.
Re your comments:
You're trying to duplicate the work of a Perl module called SQL::Translator, which is a lot of work. I'm not going to write a full working script for you.
To really solve this, and make a script that can automate all syntax changes to make the DDL compatible with SQLite, you would need to implement a full parser for SQL DDL. This is not practical to do in awk.
I recommend that you use your script for some of the cases of keyword substitution, and then if further changes are necessary, fix them by hand in a text editor.
Also consider making compromises. If it's too difficult to reformat the DDL to use the AUTOINCREMENT feature in SQLite, consider if the default ROWID functionality is close enough. Read the link I posted above to understand the differences.
I found a weird solution but it works with PHP Doctrine.
Create a Mysql database.
Create Doctrine 2 Entities From database, make up all consistences.
Doctrine 2 has a feature that compare the Entities to database and fix database to validate to entities.
Exporting the database by mysql2sqlite.sh does exactly what you describe.
so then you configure the doctrine driver to use the sqlite db and:
by composer:
vendor/bin/doctrine-module orm:schema-tool:update --force
It fix up the auto increment without need to do in hand.
I have a php code which parse XML files and store the parsed information in my database(MySQL) on my Linux server.
all my tables collation is 'utf8_unicode_ci' even my database collation is 'utf8_unicode_ci'
I have a lot of languages in my XML files (Turkish, Swedish, french, ...) and I want the special characters in these languages to be stored in their original form but my problem is that an XML pattern like this :
<match date="24.08.2012" time="17:00" status="FT" venue="Stadiumi Loro Boriçi (Shkodër)" venue_id="1557" static_id="1305963" id="1254357">
the venue value will be stored in my database like this:
Stadiumi Loro Boriçi (Shkodër)
can anybody help me to store the value in my database as it is ???
Try to use "SET NAMES utf8" command before the query. As the manual says:
SET NAMES indicates what character set the client will use to send SQL statements to the server.
I tried to imitate your case. Created a table test1:
DROP TABLE IF EXISTS `test1`;
CREATE TABLE `test1` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`value` varchar(20) CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;
And tried to insert a string with French special characters and Chinese. For me it worked ok even without SET NAMES.
<?php
$str = "çëÙùÛûÜüÔô汉语漢語";
try {
$conn = new PDO('mysql:host=localhost;dbname=test', 'user', '');
$conn->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
$conn->exec("SET NAMES utf8");
$stmt = $conn->prepare('INSERT INTO test1 SET value = :value');
$stmt->execute(array('value' => $str));
//select the inserted row
$stmt = $conn->prepare('SELECT * FROM test1 WHERE id = :id');
$stmt->execute(array('id' => 1));
while($row = $stmt->fetch(PDO::FETCH_ASSOC)) {
print_r($row);
}
}
catch(PDOException $e) {
echo 'ERROR: ' . $e->getMessage();
}
It worked correctly, printing this:
Array
(
[id] => 1
[value] => çëÙùÛûÜüÔô汉语漢語
)
Also when you are testing don't read directly from console, redirect the output into a file and open it with a text editor:
php test.php > 1.txt