GROUP_CONCAT with ORDER BY , but results are not orderd - mysql

SELECT
*,
(SELECT
GROUP_CONCAT(url SEPARATOR '$$' )
FROM project_photos
WHERE project_id = projects.id
ORDER BY priority) AS images
FROM projects
WHERE catID = 2
LIMIT 0,5
above query works fine but the images coloumn are not ordered as priority , unable to understand why it is happening
// Structure for table project is
CREATE TABLE `projects` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`catID` int(11) NOT NULL,
`title` varchar(255) NOT NULL,
`description` varchar(400) NOT NULL,
`url` varchar(255) DEFAULT NULL,
`tags` varchar(255) DEFAULT NULL,
`featured` varchar(3) NOT NULL DEFAULT 'No',
`featured_url` varchar(255) DEFAULT NULL,
`order` int(11) DEFAULT NULL,
`created_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
KEY `catID` (`catID`),
CONSTRAINT `FK_catID` FOREIGN KEY (`catID`) REFERENCES `category` (`catID`) ON DELETE CASCADE ON UPDATE CASCADE
) ENGINE=InnoDB AUTO_INCREMENT=48 DEFAULT CHARSET=latin1;
// Structure for table project_photos is
CREATE TABLE `project_photos` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`url` varchar(250) DEFAULT NULL,
`project_id` int(11) DEFAULT NULL,
`priority` int(11) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=347 DEFAULT CHARSET=utf8;

Ordering using ORDER BY is done after applying aggregate functions like GROUP_CONCAT(). To sort the result of GROUP_CONCAT() put it inside the argument:
SELECT
GROUP_CONCAT(url ORDER BY priority SEPARATOR '$$')
FROM project_photos
WHERE project_id = projects.id

Try use ORDER BY inside the GROUP_CONCAT
SELECT
*,
(SELECT
GROUP_CONCAT(url ORDER BY priority SEPARATOR '$$' )
FROM project_photos
WHERE project_id = projects.id
) AS images
FROM projects
WHERE catID = 2
LIMIT 0,5

You should put the order by clause inside the group concat :
GROUP_CONCAT(url SEPARATOR '$$' ORDER BY priority)
The order by outside is ordering the different rows. Since you have only one, it does nothing. Inside the group concat, it will order the elements of the group created by the group by clause.

Related

Getting Group by with left join to work in MySQL 5.7

Im trying to get a query to only return a single row for each unique ID.
given the following:
CREATE TABLE `groups` ( `id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(45) DEFAULT NULL, `type` int(1) DEFAULT NULL,
`date` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB
AUTO_INCREMENT=5 DEFAULT CHARSET=latin1;
CREATE TABLE `groupmembers` ( `id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(255) DEFAULT NULL, `readmessage` int(1) DEFAULT
NULL, `status` int(1) DEFAULT NULL, `group_id` int(20) DEFAULT
NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT
CHARSET=latin1;
CREATE TABLE `messages` ( `id` bigint(10) NOT NULL AUTO_INCREMENT,
`send_to` varchar(50) NOT NULL DEFAULT '', `name` varchar(50) NOT
NULL DEFAULT '', `email` varchar(50) NOT NULL DEFAULT '',
`subject` varchar(100) NOT NULL DEFAULT 'No Subject', `message`
longtext NOT NULL, `timestamp` timestamp NOT NULL DEFAULT
CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `message_read`
enum('Yes','No') NOT NULL DEFAULT 'No', `send_time` datetime NOT
NULL DEFAULT '1970-01-01 00:00:00',
`hide_message` enum('0','1') NOT NULL, `grouper` int(20) DEFAULT
NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT
CHARSET=latin1;
insert into groups (type,date) values ('0',now());
insert into groupmembers (username,group_id) values ('user1',5),('user2',5);
insert into messages (send_to,name,send_time,grouper,message) values
('user1','user2','2019-10-09 19:18:12',5,'hello');
insert into messages (send_to,name,send_time,grouper,message) values
('','user2','2019-10-10 09:18:39',5,'hello');
In mysql 5.6 this query works as anticipated:
select groups.id,send_to,messages.name,max(send_time) from groups
inner join messages on groups.id=messages.grouper left join
groupmembers on groups.id=groupmembers.group_id where
groupmembers.username='user1' group by groups.id ORDER BY
MAX(send_time)
in mysql 5.7 this returns an error
Error Code: 1055. Expression #2 of SELECT list is not in GROUP BY
clause and contains nonaggregated column 'messages.send_to' which is
not functionally dependent on columns in GROUP BY clause; this is
incompatible with sql_mode=only_full_group_by
What do I need to do to get this to work in MySQL 5.7 without adjusting the SQL Mode?
See SQL Fiddle here: http://sqlfiddle.com/#!9/9061da/4
It works as expected on the SQL fiddle as that is running 5.6.
Just a guess...
SELECT DISTINCT g.id
, m.send_to
, m.name
, m.send_time
FROM groups g
JOIN messages m
ON m.grouper = g.id
JOIN
( SELECT send_to
, name
, MAX(send_time) send_time
FROM messages m
GROUP
BY send_to
, name
) x
ON x.send_to = m.send_to
AND x.name = m.name
AND x.send_time = m.send_time
JOIN groupmembers gm
ON gm.group_id = g.id
WHERE gm.username = 'user1'
ORDER
BY send_time;

MySQL Select Query running very slow

I have a MySQL query that works but is very slow. I am guessing due to the amount of joins.
SELECT
order_header.order_head_id,
order_header.order_date,
order_header.status,
suppliers.supplier,
categories.category,
order_header.user,
order_header.sage_ref,
SUM(order_lines.total_price) AS price
FROM
order_header
LEFT JOIN
order_lines ON order_header.order_head_id = order_lines.order_head_id
LEFT JOIN
suppliers ON order_header.supplier_id = suppliers.supp_id
LEFT JOIN
categories ON order_header.category = categories.cat_id
WHERE
order_header.status LIKE '%'
AND order_header.order_head_id LIKE '%'
AND order_header.user LIKE '%'
GROUP BY order_header.order_head_id
ORDER BY order_head_id DESC
LIMIT 50;
Results of the EXPLAIN query
SHOW CREATE TABLE results
CREATE TABLE `categories` (
`cat_id` int(11) NOT NULL AUTO_INCREMENT,
`category` varchar(45) DEFAULT NULL,
`status` varchar(45) DEFAULT NULL,
PRIMARY KEY (`cat_id`)
) ENGINE=InnoDB AUTO_INCREMENT=63 DEFAULT CHARSET=latin1
CREATE TABLE `order_header` (
`order_head_id` int(11) NOT NULL AUTO_INCREMENT,
`status` varchar(45) DEFAULT NULL,
`category` varchar(45) NOT NULL,
`order_date` date DEFAULT NULL,
`supplier_id` varchar(45) NOT NULL,
`user` varchar(45) DEFAULT NULL,
`sage_ref` varchar(45) DEFAULT NULL,
`query_notes` varchar(500) DEFAULT NULL,
PRIMARY KEY (`order_head_id`)
) ENGINE=InnoDB AUTO_INCREMENT=2249 DEFAULT CHARSET=latin1
CREATE TABLE `order_lines` (
`order_lines_id` int(11) NOT NULL AUTO_INCREMENT,
`order_head_id` int(11) DEFAULT NULL,
`qty` int(11) DEFAULT NULL,
`description` varchar(255) DEFAULT NULL,
`unit_price` decimal(65,2) DEFAULT NULL,
`total_price` decimal(65,2) DEFAULT NULL,
PRIMARY KEY (`order_lines_id`)
) ENGINE=InnoDB AUTO_INCREMENT=3981 DEFAULT CHARSET=latin1
CREATE TABLE `suppliers` (
`supp_id` int(11) NOT NULL AUTO_INCREMENT,
`supplier` varchar(255) DEFAULT NULL,
`status` varchar(225) DEFAULT NULL,
PRIMARY KEY (`supp_id`)
) ENGINE=InnoDB AUTO_INCREMENT=161 DEFAULT CHARSET=latin1
SQL Version 5.6.30
I am not great on MySQL and was wondering if anyone can see a way to improve the query so that it runs quicker.
Your help would be gratefully appreciated.
Many thanks,
John
It can make sense to wrap the first (left) join into a GROUP BY subquery. GROUP BY and LIMIT will limit the number of row which will be used in the following two joins:
SELECT
x.order_head_id,
x.order_date,
x.status,
suppliers.supplier,
categories.category,
x.user,
x.sage_ref,
x.price
FROM (
SELECT
order_header.supplier_id,
order_header.category,
order_header.order_head_id,
order_header.order_date,
order_header.status,
order_header.user,
order_header.sage_ref,
SUM(order_lines.total_price) AS price
FROM order_header
LEFT JOIN order_lines ON order_header.order_head_id = order_lines.order_head_id
WHERE order_header.status LIKE '%'
AND order_header.order_head_id LIKE '%'
AND order_header.user LIKE '%'
GROUP BY order_header.order_head_id
ORDER BY order_head_id DESC
LIMIT 50
) x
LEFT JOIN suppliers ON x.supplier_id = suppliers.supp_id
LEFT JOIN categories ON x.category = categories.cat_id
ORDER BY order_head_id DESC

Optimize MySQL query used to find matches between two tables

On a project I'm working on, I have two tables:
consumption: Contains historical orders from customers with fields specifying the features of the product they have bought (one product per row)
product: Contains current product stock
The database engine is InnoDB.
Goals:
The application must show matches from both sides, I mean:
When I list current products stock, I want to show a column that displays how many historical orders match with a particular product
When I list the historical orders, I want to see how many products match with a particular historical order
Database structure for consumption and product tables plus other related tables:
CREATE TABLE `consumption` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`created_by_id` INT(11) NULL DEFAULT NULL,
`client_id` INT(11) NOT NULL,
`data_import_id` INT(11) NULL DEFAULT NULL,
`tmp_consumption_id` INT(11) NULL DEFAULT NULL,
`material_id` INT(11) NULL DEFAULT NULL,
`quality_id` INT(11) NULL DEFAULT NULL,
`thick` DECIMAL(10,3) NULL DEFAULT NULL,
`thick_max` DECIMAL(10,3) NULL DEFAULT NULL,
`width` DECIMAL(10,2) NULL DEFAULT NULL,
`width_max` DECIMAL(10,2) NULL DEFAULT NULL,
`long` INT(11) NULL DEFAULT NULL,
`long_max` INT(11) NULL DEFAULT NULL,
`purchase_price` DECIMAL(10,2) NULL DEFAULT NULL,
`sale_price` DECIMAL(10,2) NULL DEFAULT NULL,
`comments` VARCHAR(255) NULL DEFAULT NULL,
`annual_consumption` DECIMAL(10,3) NULL DEFAULT NULL,
`type` ENUM('consumption','request') NULL DEFAULT 'consumption',
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
`covering_grammage` VARCHAR(64) NULL DEFAULT NULL,
`asp_sup_acab` VARCHAR(64) NULL DEFAULT NULL,
PRIMARY KEY (`id`),
INDEX `fk_consumption_client1` (`client_id`),
INDEX `created_by_id` (`created_by_id`),
INDEX `material_id` (`material_id`),
INDEX `quality_id` (`quality_id`),
CONSTRAINT `consumption_ibfk_1` FOREIGN KEY (`material_id`) REFERENCES `material` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION,
CONSTRAINT `consumption_ibfk_2` FOREIGN KEY (`quality_id`) REFERENCES `quality` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION,
CONSTRAINT `fk_consumption_client1` FOREIGN KEY (`client_id`) REFERENCES `client` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=30673
;
CREATE TABLE `product` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`warehouse_id` INT(11) NULL DEFAULT NULL,
`created_by_id` INT(11) NULL DEFAULT NULL,
`data_import_id` INT(11) NULL DEFAULT NULL,
`tmp_product_id` INT(11) NULL DEFAULT NULL,
`code` VARCHAR(32) NOT NULL,
`material_id` INT(11) NULL DEFAULT NULL,
`quality_id` INT(11) NULL DEFAULT NULL,
`covering_id` INT(11) NULL DEFAULT NULL,
`finish_id` INT(11) NULL DEFAULT NULL,
`source` VARCHAR(128) NULL DEFAULT NULL,
`thickness` DECIMAL(10,3) NULL DEFAULT NULL,
`width` INT(11) NULL DEFAULT NULL,
`tons` DECIMAL(10,3) NULL DEFAULT NULL,
`re` INT(11) NULL DEFAULT NULL,
`rm` INT(11) NULL DEFAULT NULL,
`a_percent` INT(11) NULL DEFAULT NULL,
`comments` VARCHAR(255) NULL DEFAULT NULL,
`price` DECIMAL(10,2) NULL DEFAULT NULL,
`deleted` TINYINT(1) NOT NULL DEFAULT '0',
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
PRIMARY KEY (`id`),
INDEX `warehouse_id` (`warehouse_id`),
INDEX `material_id` (`material_id`),
INDEX `quality_id` (`quality_id`),
INDEX `covering_id` (`covering_id`),
INDEX `finish_id` (`finish_id`),
CONSTRAINT `product_ibfk_1` FOREIGN KEY (`material_id`) REFERENCES `material` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION,
CONSTRAINT `product_ibfk_2` FOREIGN KEY (`quality_id`) REFERENCES `quality` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION,
CONSTRAINT `product_ibfk_3` FOREIGN KEY (`covering_id`) REFERENCES `covering` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION,
CONSTRAINT `product_ibfk_4` FOREIGN KEY (`finish_id`) REFERENCES `finish` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION,
CONSTRAINT `product_ibfk_5` FOREIGN KEY (`warehouse_id`) REFERENCES `warehouse` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=740
;
CREATE TABLE `client` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`zone_id` INT(11) NULL DEFAULT NULL,
`zone2_id` INT(11) NULL DEFAULT NULL,
`code` VARCHAR(64) NOT NULL,
`business_name` VARCHAR(255) NULL DEFAULT NULL,
`fiscal_name` VARCHAR(255) NULL DEFAULT NULL,
`nif` VARCHAR(15) NULL DEFAULT NULL,
`contact_short_name` VARCHAR(128) NULL DEFAULT NULL,
`contact_full_name` VARCHAR(128) NULL DEFAULT NULL,
`email` VARCHAR(255) NULL DEFAULT NULL,
`group` VARCHAR(255) NULL DEFAULT NULL,
`status` TINYINT(1) NOT NULL DEFAULT '1',
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `code_UNIQUE` (`code`),
INDEX `zone_id` (`zone_id`),
INDEX `zone2_id` (`zone2_id`),
CONSTRAINT `client_ibfk_1` FOREIGN KEY (`zone_id`) REFERENCES `zone` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=443
;
CREATE TABLE `client_group` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`code` VARCHAR(15) NOT NULL,
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `code` (`code`)
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=49
;
CREATE TABLE `client_has_group` (
`client_id` INT(11) NOT NULL,
`group_id` INT(11) NOT NULL,
INDEX `client_id` (`client_id`),
INDEX `group_id` (`group_id`),
CONSTRAINT `client_has_group_ibfk_1` FOREIGN KEY (`client_id`) REFERENCES `client` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION,
CONSTRAINT `client_has_group_ibfk_2` FOREIGN KEY (`group_id`) REFERENCES `client_group` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
;
CREATE TABLE `covering` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`code` VARCHAR(128) NOT NULL,
`group` VARCHAR(128) NULL DEFAULT NULL,
`equivalence` VARCHAR(128) NULL DEFAULT NULL,
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `code_UNIQUE` (`code`)
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=55
;
CREATE TABLE `finish` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`code` VARCHAR(128) NOT NULL,
`group` VARCHAR(128) NULL DEFAULT NULL,
`equivalence` VARCHAR(128) NULL DEFAULT NULL,
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `code_UNIQUE` (`code`)
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=42
;
CREATE TABLE `material` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`code` VARCHAR(128) NOT NULL,
`group` VARCHAR(128) NULL DEFAULT NULL,
`equivalence` VARCHAR(128) NULL DEFAULT NULL,
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `code_UNIQUE` (`code`),
INDEX `group` (`group`),
INDEX `equivalence` (`equivalence`)
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=46
;
CREATE TABLE `quality` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`code` VARCHAR(128) NOT NULL,
`group` VARCHAR(128) NULL DEFAULT NULL,
`equivalence` VARCHAR(128) NULL DEFAULT NULL,
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `code_UNIQUE` (`code`),
INDEX `group` (`group`),
INDEX `equivalence` (`equivalence`)
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=980
;
CREATE TABLE `user_filter` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`user_id` INT(11) NOT NULL,
`filter_type` ENUM('consumption','product') NOT NULL DEFAULT 'consumption',
`name` VARCHAR(255) NOT NULL,
`is_default` TINYINT(1) NOT NULL DEFAULT '0',
`client_status` TINYINT(1) NULL DEFAULT NULL,
`client_group` VARCHAR(45) NULL DEFAULT NULL,
`material` VARCHAR(15) NULL DEFAULT NULL,
`quality` VARCHAR(64) NULL DEFAULT NULL,
`thickness` VARCHAR(45) NULL DEFAULT NULL,
`width` VARCHAR(45) NULL DEFAULT NULL,
`tons` VARCHAR(45) NULL DEFAULT NULL,
`covering` VARCHAR(45) NULL DEFAULT NULL,
`finish` VARCHAR(45) NULL DEFAULT NULL,
`re` VARCHAR(45) NULL DEFAULT NULL,
`rm` VARCHAR(45) NULL DEFAULT NULL,
`a_percent` VARCHAR(45) NULL DEFAULT NULL,
`comments` VARCHAR(255) NULL DEFAULT NULL,
`price` VARCHAR(45) NULL DEFAULT NULL,
`warehouse` VARCHAR(45) NULL DEFAULT NULL,
`date` VARCHAR(45) NULL DEFAULT NULL,
`type` ENUM('consumption','request') NULL DEFAULT NULL,
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
PRIMARY KEY (`id`),
INDEX `fk_user_filter_user1` (`user_id`),
INDEX `filter_type` (`filter_type`),
CONSTRAINT `fk_user_filter_user1` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=5
;
CREATE TABLE `warehouse` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`name` VARCHAR(128) NOT NULL,
`zone_id` INT(11) NULL DEFAULT NULL,
`zone2_id` INT(11) NULL DEFAULT NULL,
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
PRIMARY KEY (`id`),
INDEX `zone_id` (`zone_id`),
INDEX `zone2_id` (`zone2_id`),
CONSTRAINT `warehouse_ibfk_1` FOREIGN KEY (`zone_id`) REFERENCES `zone` (`id`) ON UPDATE NO ACTION ON DELETE NO ACTION
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=37
;
CREATE TABLE `zone` (
`id` INT(11) NOT NULL AUTO_INCREMENT,
`zone2_id` INT(11) NULL DEFAULT NULL,
`name` VARCHAR(128) NOT NULL,
`date_add` DATETIME NOT NULL,
`date_upd` DATETIME NOT NULL,
PRIMARY KEY (`id`),
INDEX `zone2_id` (`zone2_id`)
)
COLLATE='utf8_general_ci'
ENGINE=InnoDB
AUTO_INCREMENT=49
;
What I have done to be able to find matches between the two tables:
I have created a LEFT JOIN query between consumption and product table (that is also joining with additional tables if required).
Looks something like this:
SELECT cons.`id` as `consumption_id`, cons.`client_id` as `consumption_client_id`, cons.`material_id` as `consumption_material_id`, cons.`quality_id` as `consumption_quality_id`, cons.`thick` as `consumption_thick`, cons.`thick_max` as `consumption_thick_max`, cons.`width` as `consumption_width`, cons.`width_max` as `consumption_width_max`, cons.`long` as `consumption_long`, cons.`long_max` as `consumption_long_max`, cons.`type` as `consumption_type`, cons.`date_add` as `consumption_date_add`, prod.`id` as `product_id`, prod.`warehouse_id` as `product_warehouse_id`, prod.`code` as `product_code`, prod.`material_id` as `product_material_id`, prod.`quality_id` as `product_quality_id`, prod.`covering_id` as `product_covering_id`, prod.`finish_id` as `product_finish_id`, prod.`thickness` as `product_thickness`, prod.`width` as `product_width`, prod.`tons` as `product_tons`
FROM consumption cons
INNER JOIN client cli
ON cli.id=cons.client_id
LEFT JOIN client_has_group cli_gr
ON cli_gr.client_id=cons.client_id
LEFT JOIN product prod
ON
(
(cons.material_id=prod.material_id)
OR
prod.material_id IN (
SELECT id FROM material WHERE `equivalence`=(
SELECT `equivalence` FROM material WHERE id=cons.material_id
)
AND `group`=(
SELECT `group` FROM material WHERE id=cons.material_id
)
)
)
AND
(
(cons.quality_id=prod.quality_id)
OR
prod.quality_id IN (
SELECT id FROM quality WHERE `equivalence`=(
SELECT `equivalence` FROM quality WHERE id=cons.quality_id
)
AND `group`=(
SELECT `group` FROM quality WHERE id=cons.quality_id
)
)
)
AND (prod.thickness >= (cons.thick - 0.1) AND prod.thickness <= (cons.thick_max + 0.1))
AND (prod.width >= (cons.width - 1000) AND prod.width <= (cons.width_max + 1000))
WHERE 1 > 0 AND prod.deleted=0 AND cli.status=1 AND cons.date_add >= '2017-10-08 00:00:00'
GROUP BY cons.id, prod.id
When I want to list products and show matches of consumptions per every product, I have a main query that simply lists the products, then I join that query with the previous query from above and count the matches grouping by product id.
SELECT t.*,
count(f.consumption_id) AS matchesCount
FROM `product` t
LEFT JOIN (...previous query here...) f ON f.product_id=t.id
GROUP BY t.id
Other notes/considerations:
The application uses a couple of fields that have the same name in both tables, in order to find matches using ON in the JOIN
The application also uses more complex business logic, for example, product material can be equal or can be inside of an equivalence table or group
The user can save personal filters, that why it is used the user_filter table, so as a user I can have multiple "searches" saved and quickly switch from one to the other
The matches have to be displayed LIVE, I mean, calculated on the fly, not by any cronjob because user filter will always change
The amount of data the application will be working with right now will be about 35k rows in consumption table and about 1.5k rows in the product table
The server where the application is hosted is a dedicated server (64GB RAM) running MySQL
I had good performance with 3k rows of consumptions and 100 products, now with 10k+ consumption and 600 products, starting to get gateway timeout from nginx. Guess queries take too long.
I already know that if the ON cause has a lot of conditions it will work faster because the results sets are smaller, but if the condition is very wide, it will give a timeout, I guess the resulting rows are too many. Maybe the join will produce millions of rows.
What I'd like to ask is:
Am I on the right path in order to do "live matches" of data between both tables? is using the JOIN a good solution? I cannot think of another way to do it.
Apart from trying to optimize queries and indexes, are there any server tweaking I could do to take full advantage of server hardware?
Any other tips or techniques from someone who has done something similar in another project?
Update 1: Adding here full query for listing products with consumption matches:
SELECT t.*,
count(f.consumption_id) AS matchesCount
FROM `product` t
LEFT JOIN (
SELECT cons.`id` as `consumption_id`, cons.`client_id` as `consumption_client_id`, cons.`material_id` as `consumption_material_id`, cons.`quality_id` as `consumption_quality_id`, cons.`thick` as `consumption_thick`, cons.`thick_max` as `consumption_thick_max`, cons.`width` as `consumption_width`, cons.`width_max` as `consumption_width_max`, cons.`long` as `consumption_long`, cons.`long_max` as `consumption_long_max`, cons.`type` as `consumption_type`, cons.`date_add` as `consumption_date_add`, prod.`id` as `product_id`, prod.`warehouse_id` as `product_warehouse_id`, prod.`code` as `product_code`, prod.`material_id` as `product_material_id`, prod.`quality_id` as `product_quality_id`, prod.`covering_id` as `product_covering_id`, prod.`finish_id` as `product_finish_id`, prod.`thickness` as `product_thickness`, prod.`width` as `product_width`, prod.`tons` as `product_tons`
FROM consumption cons
INNER JOIN client cli
ON cli.id=cons.client_id
LEFT JOIN client_has_group cli_gr
ON cli_gr.client_id=cons.client_id
LEFT JOIN product prod
ON
(
(cons.material_id=prod.material_id)
OR
prod.material_id IN (
SELECT id FROM material WHERE `equivalence`=(
SELECT `equivalence` FROM material WHERE id=cons.material_id
)
AND `group`=(
SELECT `group` FROM material WHERE id=cons.material_id
)
)
)
WHERE 1 > 0 AND prod.deleted=0 AND cli.status=1 AND cons.date_add >= '2017-10-08 00:00:00'
GROUP BY cons.id, prod.id
) f ON f.product_id=t.id
GROUP BY t.id
Query time: 00:02:41 (+ 0,078 sec. network).
Note: The subquery JOIN run separately produces 600k rows. I'm thinking to try to group it somehow in order to make it smaller.
Update 2: Major improvement achieved by making the count inside the subquery and so reducing the result set used for the JOIN
Basically the subquery instead of returning 600k+ rows, it only returns as much rows as products or consumptions, depending what you're looking for. For that, the matchesCount has been moved inside the subquery instead of outside, and the group by has been changed, depending what list you want to display.
This is how the final queries look like right now:
List consumption and count products that match each consumption:
SELECT SQL_NO_CACHE `t`.*,
IFNULL(f.matchesCount, 0) AS matchesCount
FROM `consumption` `t`
LEFT JOIN
(SELECT cons.`id` AS `consumption_id`,
cons.`client_id` AS `consumption_client_id`,
cons.`material_id` AS `consumption_material_id`,
cons.`quality_id` AS `consumption_quality_id`,
cons.`thick` AS `consumption_thick`,
cons.`thick_max` AS `consumption_thick_max`,
cons.`width` AS `consumption_width`,
cons.`width_max` AS `consumption_width_max`,
cons.`long` AS `consumption_long`,
cons.`long_max` AS `consumption_long_max`,
cons.`type` AS `consumption_type`,
cons.`date_add` AS `consumption_date_add`,
prod.`id` AS `product_id`,
prod.`warehouse_id` AS `product_warehouse_id`,
prod.`code` AS `product_code`,
prod.`material_id` AS `product_material_id`,
prod.`quality_id` AS `product_quality_id`,
prod.`covering_id` AS `product_covering_id`,
prod.`finish_id` AS `product_finish_id`,
prod.`thickness` AS `product_thickness`,
prod.`width` AS `product_width`,
prod.`tons` AS `product_tons`,
count(prod.`id`) AS matchesCount
FROM consumption cons
INNER JOIN client cli ON cli.id=cons.client_id
LEFT JOIN product prod ON ((cons.material_id=prod.material_id)
OR prod.material_id IN
(SELECT id
FROM material
WHERE `equivalence`=
(SELECT `equivalence`
FROM material
WHERE id=cons.material_id )
AND `group`=
(SELECT `group`
FROM material
WHERE id=cons.material_id ) ))
AND ((cons.quality_id=prod.quality_id)
OR prod.quality_id IN
(SELECT id
FROM quality
WHERE `equivalence`=
(SELECT `equivalence`
FROM quality
WHERE id=cons.quality_id )
AND `group`=
(SELECT `group`
FROM quality
WHERE id=cons.quality_id ) ))
AND (prod.thickness >= (cons.thick - 0.1)
AND prod.thickness <= (cons.thick_max + 0.1))
AND (prod.width >= (cons.width - 1000)
AND prod.width <= (cons.width_max + 1000))
WHERE 1 > 0
AND prod.deleted=0
AND cli.status=1
AND cons.date_add >= '2017-10-08 00:00:00'
GROUP BY cons.id) f ON f.consumption_id=t.id
GROUP BY t.id
List products and count consumptions that match each product:
SELECT SQL_NO_CACHE t.*,
IFNULL(f.matchesCount, 0) AS matchesCount
FROM `product` `t`
LEFT JOIN
(SELECT cons.`id` AS `consumption_id`,
cons.`client_id` AS `consumption_client_id`,
cons.`material_id` AS `consumption_material_id`,
cons.`quality_id` AS `consumption_quality_id`,
cons.`thick` AS `consumption_thick`,
cons.`thick_max` AS `consumption_thick_max`,
cons.`width` AS `consumption_width`,
cons.`width_max` AS `consumption_width_max`,
cons.`long` AS `consumption_long`,
cons.`long_max` AS `consumption_long_max`,
cons.`type` AS `consumption_type`,
cons.`date_add` AS `consumption_date_add`,
prod.`id` AS `product_id`,
prod.`warehouse_id` AS `product_warehouse_id`,
prod.`code` AS `product_code`,
prod.`material_id` AS `product_material_id`,
prod.`quality_id` AS `product_quality_id`,
prod.`covering_id` AS `product_covering_id`,
prod.`finish_id` AS `product_finish_id`,
prod.`thickness` AS `product_thickness`,
prod.`width` AS `product_width`,
prod.`tons` AS `product_tons`,
count(cons.`id`) AS matchesCount
FROM consumption cons
INNER JOIN client cli ON cli.id=cons.client_id
LEFT JOIN product prod ON cons.material_id=prod.material_id
AND cons.quality_id=prod.quality_id
WHERE 1 > 0
AND prod.deleted=0
AND cli.status=1
GROUP BY prod.id) f ON f.product_id=t.id
WHERE deleted=0
GROUP BY t.id
Both queries take less than 1 second to execute (each).
Note: I still use the previous queries in my application, for example, when I want a break down of the list of products that match a single consumption, or the other way around. In that case I already add a filter per consumption id or product id that reduces the size of the result set a lot.
If client_has_group is "many:1", that is the wrong way to do it. You don't need the extra table.
INT is always 4 bytes. Consider smaller datatypes. Eventually the size of the database may add to your problems.
Do you really need date_add and date_upd. They seem like clutter that you will never use.
Avoid IN ( SELECT ... ) where practical. Switch to JOIN or EXISTS.
Why so many tables with code + group + equivalence? Could they be a single group? Do you need all 3 columns? Do you need id since code is UNIQUE? There comes a point where a schema is "over-normalized" and performance suffers without helping space much.
OR is a performance killer in some contexts.
"Correlated subqueries" are useful in some situations, but this one is probably better done via a JOIN:
AND `group` = ( SELECT `group` FROM quality WHERE id=cons.quality_id )
Beware of aggregates (eg, COUNT) with JOIN; you may be getting an inflated value. This is because the JOIN happens first.
why need
LEFT JOIN client_has_group cli_gr ON cli_gr.client_id=cons.client_id
it never used
why need GROUP BY cons.id, prod.id if you select all fields maybe select only what you need
try this select, i think it will be more faster
SELECT count(*), prod.*
FROM consumption cons
INNER JOIN client cli ON cli.id=cons.client_id
INNER JOIN material m ON m.id=cons.material_id
INNER JOIN quality q ON q.id=cons.quality_id
LEFT JOIN product prod
ON
(
(cons.material_id=prod.material_id)
OR
prod.material_id IN (
SELECT id FROM material WHERE `equivalence`=m.equivalence
AND `group`=m.group
)
)
AND
(
(cons.quality_id=prod.quality_id)
OR
prod.quality_id IN (
SELECT id FROM quality WHERE `equivalence`=q.equivalence
AND `group`=q.group
)
)
AND (prod.thickness >= (cons.thick - 0.1) AND prod.thickness <= (cons.thick_max + 0.1))
AND (prod.width >= (cons.width - 1000) AND prod.width <= (cons.width_max + 1000))
WHERE 1 > 0 AND prod.deleted=0 AND cli.status=1 AND cons.date_add >= '2017-10-08 00:00:00'
group by prod.id
maybe better do calculation count in background and add this field in product and consumption table.

How can I query for rows with latest date and do an inner join on a second table?

All the examples I've seen show how to do an inner join using an alias to get rows with the latest date. I can do that with my data but I also want to do an inner join on another table and can't figure how to do both with the same query.
Here are the two tables:
CREATE TABLE `titles` (
`titleID` int(11) unsigned NOT NULL AUTO_INCREMENT,
`titlename` tinytext NOT NULL,
`url` varchar(255) DEFAULT '',
`category` int(2) unsigned NOT NULL,
`postdate` date NOT NULL,
PRIMARY KEY (`titleID`),
KEY `category` (`category`),
CONSTRAINT `titles_ibfk_1` FOREIGN KEY (`category`) REFERENCES `categories` (`catid`) ON DELETE CASCADE
) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=latin1;
CREATE TABLE `stats` (
`statid` int(11) unsigned NOT NULL AUTO_INCREMENT,
`score` decimal(3,2) DEFAULT NULL,
`views` int(11) unsigned DEFAULT NULL,
`favs` int(11) DEFAULT NULL,
`comments` int(11) DEFAULT NULL,
`updatedate` date NOT NULL,
`title` int(11) unsigned NOT NULL,
PRIMARY KEY (`statid`),
KEY `title` (`title`),
CONSTRAINT `stats_ibfk_1` FOREIGN KEY (`title`) REFERENCES `titles` (`titleID`)
) ENGINE=InnoDB AUTO_INCREMENT=13 DEFAULT CHARSET=latin1;
My goals:
1) I want a query that gives me all the latest stats for each title.
2) I want to see the text name of the title (from the titles table).
I can use this query to get the latest score for each title.
select t.score, t.views, t.favs, t.comments, t.updatedate, t.title
from stats t
inner join (
select title, max(updatedate) as updatedate
from stats
GROUP BY title
) tm on t.title = tm.title and t.updatedate = tm.updatedate
But the problem with this query is that it displays the title column from stats which is an int. I want the text name of the title.
I can do this to get the title name and the score, but then I'm not getting the row with the latest date.
select titlename, score, updatedate
from stats
inner join titles
on titleid = title
How can I write a query that achieves both my goals?
You need to join the title table in this case as
select
s1.score,
s1.views,
s1.favs,
s1.comments,
s1.updatedate,
t.titlename
from titles t
join stats s1 on s1.title = t.titleID
join (
select title, max(updatedate) as updatedate
from stats
GROUP BY title
) s2 on s2.title = s1.title and s1.updatedate = s2.updatedate

What's wrong with my group_concat SQL query?

I have three tables:
CREATE TABLE `b10g_entries` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`permalink` text NOT NULL,
`title` varchar(300) NOT NULL,
`fullcontent` text NOT NULL,
`introcontent` text NOT NULL,
`dateadded` datetime NOT NULL,
`lastedited` datetime NOT NULL,
`author` varchar(40) NOT NULL,
`comments` int(11) NOT NULL DEFAULT '0',
`published` tinyint(1) unsigned NOT NULL DEFAULT '0',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=299 DEFAULT CHARSET=utf8
CREATE TABLE `b10g_tag_map` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`tag_id` bigint(20) unsigned DEFAULT NULL,
`entry_id` bigint(20) unsigned DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=utf8
CREATE TABLE `b10g_tags` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`name` text NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8
And i'm trying to get first 25 blog entries with their tags (that's why I use many-to-many relationship) using this query:
SELECT b10g_entries.*, GROUP_CONCAT( b10g_tags.name SEPARATOR ', ')
AS tags FROM b10g_entries
LEFT JOIN b10g_tag_map ON b10g_entries.id = b10g_tag_map.entry_id
LEFT JOIN b10g_tags ON b10g_tag_map.tag_id = b10g_tags.id LIMIT 0, 25;
But I only get one record back. What's wrong with this query?
Add a GROUP BY clause.
Now, you're getting a list of ALL tags found anywhere in the set. Instead, you only want the ones within the group (by entry).
SELECT b10g_entries.*, GROUP_CONCAT( b10g_tags.name SEPARATOR ', ')
AS tags FROM b10g_entries
LEFT JOIN b10g_tag_map ON b10g_entries.id = b10g_tag_map.entry_id
LEFT JOIN b10g_tags ON b10g_tag_map.tag_id = b10g_tags.id
GROUP BY b10g_entries.id
You have a GROUP_CONCAT() aggregate function, but have not used a GROUP BY clause, so your result will be one row.
Note that in MySQL it is permissible to use a GROUP BY with only one column specified while many more appear in the SELECT list, but that is not portable to other RDBMS. So instead, I have joined b10g_entries in a second time to connect all the other columns from that table, while only using the id in the GROUP BY.
SELECT
b10g_entries_all.*,
GROUP_CONCAT( b10g_tags.name SEPARATOR ', ') AS tags
FROM
/* Main table, used gor GROUP BY aggregate */
b10g_entries
/* self join to pull in other columns without needing to put them in GROUP BY */
JOIN b10g_entries b10g_entries_all ON b10g_entries.id = b10g_entries_all.id
LEFT JOIN b10g_tag_map ON b10g_entries.id = b10g_tag_map.entry_id
LEFT JOIN b10g_tags ON b10g_tag_map.tag_id = b10g_tags.id
/* group on the entry id */
GROUP BY b10g_entries.id
LIMIT 0, 25;