Top 20 Group Ranking Query - Optimization - mysql

I am creating a reporting structure where I need to output the top 20 days of aggregate stats for each unique Company - Region. I have completed this task but feel that my code is overly complicated and I am requesting help optimizing it.
I have 2 tables involved in this process. The first lists all the possible Company - Region - Group - Subgroups. The second has hourly stats by the Group - Subgroup.
SQL Fiddle link: http://sqlfiddle.com/#!9/29a7b/1
NOTE: currently getting a SELECT command denied to user '<user>'#'<ip>' for table 'table_stats' error on my SQL Fiddle, would appreciate help resolving this as well.
table_companies declaration and dummy data:
CREATE TABLE `table_companies` (
`pk_id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`company` varchar(45) NOT NULL,
`region` varchar(45) NOT NULL,
`group` varchar(45) NOT NULL,
`subgroup` varchar(45) NOT NULL,
PRIMARY KEY (`pk_id`),
UNIQUE KEY `pk_id_id_UNIQUE` (`pk_id`)
);
INSERT INTO table_companies
(`pk_id`, `company`, `region`, `group`, `subgroup`)
VALUES
(1, 'company1', 'region1', 'group1', 'subgroup1'),
(2, 'company1', 'region1', 'group1', 'subgroup2'),
(3, 'company1', 'region2', 'group2', 'subgroup3'),
(4, 'company1', 'region3', 'group3', 'subgroup4'),
(5, 'company2', 'region1', 'group4', 'subgroup5'),
(6, 'company2', 'region3', 'group5', 'subgroup6'),
(7, 'company2', 'region3', 'group6', 'subgroup7'),
(8, 'company2', 'region4', 'group7', 'subgroup8'),
(9, 'company2', 'region5', 'group8', 'subgroup9'),
(10, 'company3', 'region6', 'group9', 'subgroup10'),
(11, 'company3', 'region7', 'group10', 'subgroup11'),
(12, 'company3', 'region8', 'group11', 'subgroup12'),
(13, 'company4', 'region9', 'group12', 'subgroup13'),
(14, 'company4', 'region10', 'group13', 'subgroup14'),
(15, 'company5', 'region11', 'group14', 'subgroup15'),
(16, 'company5', 'region12', 'group15', 'subgroup16')
;
table_stats declaration:
Simplified to only contain a couple of the hours per day for only 1 group - subgroup.
CREATE TABLE `table_stats` (
`pk_id` int(10) unsigned NOT NULL,
`date_time` datetime NOT NULL,
`group` varchar(45) NOT NULL,
`subgroup` varchar(45) NOT NULL,
`stat` int(10) unsigned NOT NULL,
PRIMARY KEY (`pk_id`),
UNIQUE KEY `pk_id_UNIQUE` (`pk_id`),
UNIQUE KEY `om_unique` (`date_time`,`group`,`subgroup`)
);
INSERT INTO table_stats
(`pk_id`, `date_time`, `group`, `subgroup`, `stat`)
VALUES
(1, '2015-12-01 06:00:00', 'group9', 'subgroup10', 14),
(2, '2015-12-01 12:00:00', 'group9', 'subgroup10', 14),
(3, '2015-12-02 06:00:00', 'group9', 'subgroup10', 2),
(4, '2015-12-02 12:00:00', 'group9', 'subgroup10', 51),
(5, '2015-12-03 06:00:00', 'group9', 'subgroup10', 30),
(6, '2015-12-03 12:00:00', 'group9', 'subgroup10', 6),
(7, '2015-12-04 06:00:00', 'group9', 'subgroup10', 9),
(8, '2015-12-04 12:00:00', 'group9', 'subgroup10', 77),
(9, '2015-12-05 06:00:00', 'group9', 'subgroup10', 70),
(10, '2015-12-05 12:00:00', 'group9', 'subgroup10', 7),
(11, '2015-12-06 06:00:00', 'group9', 'subgroup10', 38),
(12, '2015-12-06 12:00:00', 'group9', 'subgroup10', 5),
(13, '2015-12-07 06:00:00', 'group9', 'subgroup10', 86),
(14, '2015-12-07 12:00:00', 'group9', 'subgroup10', 73),
(15, '2015-12-08 06:00:00', 'group9', 'subgroup10', 45),
(16, '2015-12-08 12:00:00', 'group9', 'subgroup10', 14),
(17, '2015-12-09 06:00:00', 'group9', 'subgroup10', 66),
(18, '2015-12-09 12:00:00', 'group9', 'subgroup10', 38),
(19, '2015-12-10 06:00:00', 'group9', 'subgroup10', 12),
(20, '2015-12-10 12:00:00', 'group9', 'subgroup10', 77),
(21, '2015-12-11 06:00:00', 'group9', 'subgroup10', 21),
(22, '2015-12-11 12:00:00', 'group9', 'subgroup10', 18),
(23, '2015-12-12 06:00:00', 'group9', 'subgroup10', 28),
(24, '2015-12-12 12:00:00', 'group9', 'subgroup10', 74),
(25, '2015-12-13 06:00:00', 'group9', 'subgroup10', 20),
(26, '2015-12-13 12:00:00', 'group9', 'subgroup10', 37),
(27, '2015-12-14 06:00:00', 'group9', 'subgroup10', 66),
(28, '2015-12-14 12:00:00', 'group9', 'subgroup10', 59),
(29, '2015-12-15 06:00:00', 'group9', 'subgroup10', 26),
(30, '2015-12-15 12:00:00', 'group9', 'subgroup10', 0),
(31, '2015-12-16 06:00:00', 'group9', 'subgroup10', 77),
(32, '2015-12-16 12:00:00', 'group9', 'subgroup10', 31),
(33, '2015-12-17 06:00:00', 'group9', 'subgroup10', 59),
(34, '2015-12-17 12:00:00', 'group9', 'subgroup10', 71),
(35, '2015-12-18 06:00:00', 'group9', 'subgroup10', 7),
(36, '2015-12-18 12:00:00', 'group9', 'subgroup10', 73),
(37, '2015-12-19 06:00:00', 'group9', 'subgroup10', 72),
(38, '2015-12-19 12:00:00', 'group9', 'subgroup10', 28),
(39, '2015-12-20 06:00:00', 'group9', 'subgroup10', 50),
(40, '2015-12-20 12:00:00', 'group9', 'subgroup10', 11),
(41, '2015-12-21 06:00:00', 'group9', 'subgroup10', 71),
(42, '2015-12-21 12:00:00', 'group9', 'subgroup10', 4),
(43, '2015-12-22 06:00:00', 'group9', 'subgroup10', 78),
(44, '2015-12-22 12:00:00', 'group9', 'subgroup10', 69),
(45, '2015-12-23 06:00:00', 'group9', 'subgroup10', 83),
(46, '2015-12-23 12:00:00', 'group9', 'subgroup10', 55),
(47, '2015-12-24 06:00:00', 'group9', 'subgroup10', 71),
(48, '2015-12-24 12:00:00', 'group9', 'subgroup10', 20),
(49, '2015-12-25 06:00:00', 'group9', 'subgroup10', 90),
(50, '2015-12-25 12:00:00', 'group9', 'subgroup10', 26),
(51, '2015-12-26 06:00:00', 'group9', 'subgroup10', 1),
(52, '2015-12-26 12:00:00', 'group9', 'subgroup10', 73),
(53, '2015-12-27 06:00:00', 'group9', 'subgroup10', 4),
(54, '2015-12-27 12:00:00', 'group9', 'subgroup10', 18),
(55, '2015-12-28 06:00:00', 'group9', 'subgroup10', 4),
(56, '2015-12-28 12:00:00', 'group9', 'subgroup10', 30),
(57, '2015-12-29 06:00:00', 'group9', 'subgroup10', 56),
(58, '2015-12-29 12:00:00', 'group9', 'subgroup10', 53),
(59, '2015-12-30 06:00:00', 'group9', 'subgroup10', 33),
(60, '2015-12-31 12:00:00', 'group9', 'subgroup10', 8)
;
Query to optimize:
SELECT * FROM
(
SELECT t3.company,t3.region,t3.day, t3.day_stat,COUNT(*) as rank
FROM
(
SELECT t2.company,t2.region,DAY(t1.date_time) as day,SUM(t1.stat) as day_stat
FROM schema1.table_stats t1
INNER JOIN table_companies t2
ON t1.group=t2.group AND t1.subgroup=t2.subgroup
WHERE
MONTH(t1.date_time)=12 AND
YEAR(t1.date_time)=2015
group by t2.company,t2.region,DAY(t1.date_time)
ORDER BY t2.company,t2.region,day_stat DESC
) t3
JOIN
(
SELECT t2.company,t2.region,DAY(t1.date_time) as day,SUM(t1.stat) as day_stat
FROM schema1.table_stats t1
INNER JOIN table_companies t2
ON t1.group=t2.group AND t1.subgroup=t2.subgroup
WHERE
MONTH(t1.date_time)=12 AND
YEAR(t1.date_time)=2015
group by t2.company,t2.region,DAY(t1.date_time)
ORDER BY t2.company,t2.region,day_stat DESC
) t4
ON
t4.day_stat >= t3.day_stat AND
t4.company = t3.company AND
t4.region = t3.region
GROUP BY t3.company,t3.region,t3.day_stat
ORDER BY t3.company,t3.region,rank
) t5
WHERE t5.rank<=20
;
Summary of query: from the 2 deepest subqueries it starts by joining both tables, grouping and aggregating the stat by the company, region and day. This is also where it restricts the month and year. Then it joins this result to a duplicate of itself to be able to generate the rank. Last select limits results to top 20 for each subgroup.
Expected result:
Apologies for presenting as a SQL declaration
INSERT INTO results
(`company`, `region`, `day`, `day_stat`, `rank`)
VALUES
('company3', 'region6', 7, 159, 1),
('company3', 'region6', 22, 147, 2),
('company3', 'region6', 23, 138, 3),
('company3', 'region6', 17, 130, 4),
('company3', 'region6', 14, 125, 5),
('company3', 'region6', 25, 116, 6),
('company3', 'region6', 29, 109, 7),
('company3', 'region6', 16, 108, 8),
('company3', 'region6', 9, 104, 9),
('company3', 'region6', 12, 102, 10),
('company3', 'region6', 19, 100, 11),
('company3', 'region6', 24, 91, 12),
('company3', 'region6', 10, 89, 13),
('company3', 'region6', 4, 86, 14),
('company3', 'region6', 18, 80, 15),
('company3', 'region6', 5, 77, 16),
('company3', 'region6', 21, 75, 17),
('company3', 'region6', 26, 74, 18),
('company3', 'region6', 20, 61, 19),
('company3', 'region6', 8, 59, 20)
;
tl;dr: Apologies for the long post. Asking to optimize http://sqlfiddle.com/#!9/29a7b/1.

The modifications I've made:
Completely modified your query
Added a composite index in table_companies table on group,subgroup
Added a composite index in table_stats table on group, subgroup
Modified Query:
SELECT
C.company,
C.region,
DAY(S.date_time) day,
SUM(S.stat) day_stat
FROM table_companies C
INNER JOIN table_stats S
ON C.`group` = S.`group` AND C.subgroup = S.subgroup
WHERE MONTH(S.date_time) = 12 AND YEAR(S.date_time) = 2015
GROUP BY C.company, C.region, DAY(S.date_time)
ORDER BY day_stat DESC
LIMIT 20;
WORKING DEMO
There's no rank column in the result set. Since the results are sorted according to rank in descending order so that you can implicitly treat the position of a row in the result set as the rank. Nevertheless if you really need the rank column then here is a working demo of it
Composite index(table_companies):
ALTER TABLE `table_companies` ADD INDEX `idx_table_compnaies_group_subgroup` (
`group`,
`subgroup`
);
Composite index(table_stats):
ALTER TABLE `table_stats` ADD INDEX `idx_table_stats_group_subgroup` (
`group`,
`subgroup`
);
Explain Result:
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE S ALL idx_table_compnaies_group_subgroup 60 Using where; Using temporary; Using filesort
1 SIMPLE C ref idx_table_companies_group_subgroup idx_table_companies_group_subgroup 57 schema1.S.group,schema1.S.subgroup 1 Using index condition
Good news is MySQL can use these indexes(because these are under possible keys). Although it's showing ALL as type for table_companies. All I can say it's a small set of data. You cannot judge performance based on small set of data.
More:
I guess you have primary keys in those tables. If you don't have any then create.
EDIT:
SELECT
C.company,
C.region,
tt.day,
tt.total AS day_stat,
tt.rank
FROM table_companies C
INNER JOIN
(
SELECT
t.*,
IF(t.businessUnit = #sameBusinessUnit, #rn := #rn + 1, #rn := 1) AS rank,
#sameBusinessUnit := t.businessUnit
FROM
(
SELECT
S1.`group`,
S1.subgroup,
CONCAT(S1.`group`,S1.subgroup) AS businessUnit,
DAY(S1.date_time) AS day,
SUM(S1.stat) total
FROM table_stats S1
GROUP BY S1.group,S1.subgroup,DAY(S1.date_time)
ORDER BY total DESC
)AS t
CROSS JOIN (SELECT #rn := 1, #sameBusinessUnit := '') var
) AS tt
ON C.`group`=tt.`group` AND C.subgroup = tt.subgroup
WHERE tt.rank <= 20
ORDER BY tt.`group`,tt.`subgroup`,tt.rank;
WORKING DEMO(Version 2.0)

Just include one index for group so the join become more efficient
CREATE TABLE table_companies
(`pk_id` int, `company` varchar(8),
`region` varchar(8), `group` varchar(7), `subgroup` varchar(10),
PRIMARY KEY (`pk_id`),
UNIQUE KEY `pk_id_id_UNIQUE` (`pk_id`),
INDEX idx_group (`group`, `subgroup`)
)
;

Related

after left-join result, need a countresult per user

I have managed to create a query, which even works.
SELECT voterID,vote,nick as player
FROM
trust LEFT JOIN players
ON trust.playerID=players.playerID
ORDER BY trust.vote DESC, trust.playerID DESC
This gives me a full list of all votes, but I want to SUM the votes per player, so i get 1 row per player, with 1 total amount of trustpoints.(can be positive or negative. +1 or -1 per voted, per voter)
The table trust:
CREATE TABLE `trust` (
`rowID` int(10) UNSIGNED NOT NULL,
`playerID` int(11) UNSIGNED NOT NULL,
`voterID` int(11) UNSIGNED NOT NULL,
`vote` int(11) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
INSERT INTO `trust` (`rowID`, `playerID`, `voterID`, `vote`) VALUES
(25, 2187, 1, 1),
(26, 23193, 1, 1),
(27, 2050, 1, 1),
(29, 3714, 1, 1),
(31, 1, 2187, 1),
(32, 30363, 29937, 1),
(33, 15837, 26102, 1),
(34, 30058, 26102, 1),
(35, 30539, 26102, -1),
(36, 28382, 26102, -1),
(37, 18692, 26102, 1),
(38, 6440, 14143, 1),
(39, 15069, 8306, 1),
(40, 2050, 2187, 1),
(41, 3233, 1, 1),
(42, 12664, 26102, 1),
(43, 30539, 2187, -1),
(44, 28382, 2187, -1),
(45, 30539, 1, -1),
(46, 10138, 1, 1);
Expecting result: a list of names and total SUM of votes
You can join the player table on an aggregate query of the votes:
SELECT nick AS player, sum_votes
FROM players p
JOIN (SELECT playerid, SUM(vote) AS sum_votes
FROM trust
GROUP BY playerid) t ON p.playerid = t.playerid
ORDER BY 2 DESC

Update one table column data to another table column along with unique,duplicate check and update with suffix on duplicate

Need to do it for lower Mysql version like 4.9 to 5.6
I need to copy one table column data to another table but need to apply unique check and in case found duplicate then needs to add suffix to data and continue the update.(Don't want to stop query execution because of duplicate data) .
Let me clarify things:
My first table is tbl_categories:
cat_id cat_parent_id cat_active cat_display_order cat_suggested_hourly_rate
1 0 1 1 10
2 1 1 2 10
And second table is tbl_categories_metadata:
cdata_cat_id cdata_lang_id cdata_name
1 1 A
1 2 B
1 3 C
2 1 A
2 2 B
3 1 D
3 2 E
3 3 F
So in my second table category name added based on language id.
Now I need to add a unique column in first table with name cat_identifier, so I did :
ALTER TABLE `tbl_categories` ADD `cat_identifier` VARCHAR(100) NOT NULL AFTER `cat_id`;
Which worked fine, Now I have to make it unique, but straight forward it can not be done due to similar value present while running above query, so I did:
UPDATE
`tbl_categories` a
INNER JOIN `tbl_categories` b ON `a`.cat_id = `b`.cat_id
SET
`a`.cat_identifier = `b`.cat_id;
It worked fine and cat_id added to cat_identifier column, now I am able to made this column unique via below query :
ALTER TABLE `tbl_categories`
ADD UNIQUE KEY `cat_identifier` (`cat_identifier`);
Worked fine,and my table now look like this now:
cat_id cat_identifier cat_parent_id cat_active cat_display_order cat_suggested_hourly_rate
1 1 0 1 1 10
2 2 1 1 2 10
Where I am stuck:
I need to update cat_identifier values taken from cdata_name column based on language id 1, but in-case language id 1 has same data for 2 categories, then i need to add -cat_id as suffix for that data and needs to update
So I tried below query :
UPDATE
`tbl_categories`
INNER JOIN `tbl_categories_metadata` ON `tbl_categories`.cat_id = `tbl_categories_metadata`.cdata_cat_id
SET
`tbl_categories`.cat_identifier = `tbl_categories_metadata`.cdata_name
WHERE
`tbl_categories_metadata`.cdata_lang_id = 1;
It's working , but at once duplicate found for language 1 it stops.
What I want is In case duplicate found then add -cat_id (category id of column) as suffix and do update. like clean-3,clean-4 etc...
Purpose to do so : Sometime admin/front-end seller not adding language specific names for categories and some time they add same name, so we added cat_identifier which will be unique,language independent as well as mandatory to add. This concept will work straightforward for new installation of our project,but in already working system(previous version of our projects) we have to do it in a way so that with minimal changes system work fine.
Note: Queries to create both table along with data
CREATE TABLE `tbl_categories` (
`cat_id` int(11) UNSIGNED NOT NULL,
`cat_identifier` varchar(100) NOT NULL,
`cat_parent_id` int(11) UNSIGNED NOT NULL COMMENT '0 defaults to parent category',
`cat_active` tinyint(4) UNSIGNED NOT NULL COMMENT '0 - Inactive, 1 - Active',
`cat_display_order` decimal(4,2) NOT NULL,
`cat_suggested_hourly_rate` decimal(10,2) NOT NULL COMMENT 'This will be used as suggestion hourly rate for this category.'
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
INSERT INTO `tbl_categories` (`cat_id`, `cat_identifier`, `cat_parent_id`, `cat_active`, `cat_display_order`, `cat_suggested_hourly_rate`) VALUES
(1, '', 0, 1, '1.00', '20.00'),
(2, '', 1, 1, '4.00', '15.00'),
(3, '', 1, 0, '3.00', '12.00'),
(4, '', 1, 1, '1.00', '18.00'),
(5, '', 1, 1, '2.00', '15.00'),
(6, '', 1, 1, '5.00', '10.00'),
(7, '', 0, 1, '2.00', '25.00'),
(8, '', 7, 1, '1.00', '20.00'),
(9, '', 7, 1, '2.00', '20.00'),
(10, '', 7, 1, '3.00', '20.00'),
(11, '', 0, 1, '3.00', '25.00'),
(12, '', 11, 1, '1.00', '20.00'),
(13, '', 11, 1, '2.00', '25.00'),
(14, '', 0, 1, '4.00', '20.00'),
(15, '', 14, 1, '1.00', '18.00'),
(16, '', 14, 1, '2.00', '25.00'),
(17, '', 0, 1, '5.00', '30.00'),
(18, '', 17, 1, '1.00', '0.00'),
(19, '', 17, 1, '2.00', '0.00'),
(20, '', 17, 1, '3.00', '0.00'),
(21, '', 0, 0, '2.00', '20.00'),
(22, '', 0, 0, '4.00', '25.00'),
(23, '', 0, 1, '5.00', '15.00'),
(24, '', 0, 0, '8.00', '22.00'),
(25, '', 0, 0, '9.00', '28.00'),
(26, '', 0, 1, '1.00', '20.00'),
(27, '', 26, 1, '1.00', '20.00'),
(28, '', 26, 1, '2.00', '45.00'),
(29, '', 26, 1, '3.00', '40.00'),
(30, '', 0, 0, '2.00', '15.00'),
(31, '', 0, 1, '3.00', '30.00'),
(32, '', 31, 1, '1.00', '22.00'),
(33, '', 31, 1, '2.00', '0.00'),
(34, '', 0, 0, '4.00', '15.00'),
(35, '', 0, 1, '5.00', '25.00'),
(36, '', 35, 1, '1.00', '25.00'),
(37, '', 35, 1, '2.00', '10.00'),
(38, '', 0, 0, '1.00', '40.00'),
(39, '', 0, 1, '3.00', '25.00'),
(40, '', 39, 1, '1.00', '22.00'),
(41, '', 39, 1, '2.00', '25.00'),
(42, '', 0, 0, '6.00', '35.00'),
(43, '', 0, 1, '7.00', '15.00'),
(44, '', 23, 1, '1.00', '22.00'),
(45, '', 23, 1, '2.00', '20.00'),
(46, '', 7, 1, '4.00', '25.00'),
(47, '', 43, 1, '1.00', '35.00'),
(48, '', 43, 1, '2.00', '18.00'),
(49, '', 43, 1, '3.00', '20.00'),
(50, '', 43, 1, '4.00', '40.00'),
(51, '', 7, 1, '5.00', '28.00'),
(52, '', 0, 1, '1.00', '10.00'),
(53, '', 0, 1, '1.00', '10.00');
ALTER TABLE `tbl_categories`
ADD PRIMARY KEY (`cat_id`);
ALTER TABLE `tbl_categories`
MODIFY `cat_id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=54;
CREATE TABLE `tbl_categories_metadata` (
`cdata_cat_id` int(11) UNSIGNED NOT NULL COMMENT 'ID of table tbl_categories',
`cdata_lang_id` int(11) UNSIGNED NOT NULL,
`cdata_name` varchar(255) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
INSERT INTO `tbl_categories_metadata` (`cdata_cat_id`, `cdata_lang_id`, `cdata_name`) VALUES
(1, 3, 'Limpieza'),
(1, 2, 'Nettoyage'),
(1, 1, 'Cleaning'),
(2, 1, 'Bathroom Deep Cleaning'),
(2, 2, 'Nettoyage en profondeur de la salle de bain'),
(2, 3, 'Limpieza profunda de ba?'),
(3, 3, 'Limpieza de alfombras'),
(3, 2, 'Nettoyage de tapis'),
(3, 1, 'Carpet Cleaning'),
(4, 3, 'Limpieza profunda en el hogar'),
(4, 2, 'Nettoyage en profondeur'),
(4, 1, 'Home Deep Cleaning'),
(5, 1, 'Kitchen Deep Cleaning'),
(5, 2, 'Nettoyage en profondeur de la cuisine'),
(5, 3, 'Limpieza profunda de cocina'),
(6, 1, 'Car Cleaning'),
(6, 2, 'Nettoyage de voiture'),
(6, 3, 'Limpieza de coches'),
(7, 3, 'Experto'),
(7, 2, 'Qualifié'),
(7, 1, 'Skilled'),
(8, 1, 'Electricians'),
(8, 2, '?'),
(8, 3, 'Electricistas'),
(9, 1, 'Plumbers'),
(9, 2, 'Plombiers'),
(9, 3, 'Fontaneros'),
(10, 1, 'Carpenters'),
(10, 2, 'Charpentiers'),
(10, 3, 'Carpinteros'),
(11, 1, 'Fitness & Yoga'),
(11, 2, 'Fitness et yoga'),
(11, 3, 'Fitness y yoga'),
(12, 1, 'Fitness Trainer at Home'),
(12, 2, 'Fitness Trainer ?'),
(12, 3, 'Entrenador de fitness en casa'),
(13, 1, 'Yoga Trainer at Home'),
(13, 2, 'Formateur de yoga ?'),
(13, 3, 'Entrenador de yoga en casa'),
(14, 1, 'Salon at Home'),
(14, 2, 'Salon ?'),
(14, 3, 'Salon en casa'),
(15, 3, 'Salon en casa'),
(15, 2, 'Salon à domicile'),
(15, 1, 'Salon at home'),
(16, 1, 'Makeup and Hairstyling'),
(16, 2, 'Maquillage et Coiffure'),
(16, 3, 'Maquillaje y Peluquer?'),
(17, 3, 'Servicios de fotografia'),
(17, 2, 'Services de photographie'),
(17, 1, 'Photography Services'),
(18, 1, 'Wedding Photography & Filming'),
(18, 2, 'Photographie et tournage de mariage'),
(18, 3, 'Fotografía y filmación de bodas'),
(19, 3, 'Fotografía y rodaje de cumpleaños'),
(19, 2, 'Photographie et tournage d\'anniversaire'),
(19, 1, 'Birthday Photography & Filming'),
(20, 1, 'Family Function Shoots'),
(20, 2, 'Prise de vue en famille'),
(20, 3, 'Disparos de funciones familiares'),
(21, 3, 'Pintura mural'),
(21, 2, 'Peinture murale'),
(22, 2, 'Charpenterie'),
(22, 1, 'Carpentry'),
(23, 3, 'Personal de mantenimiento'),
(23, 2, 'Bricoleur'),
(23, 1, 'Handyman'),
(24, 3, 'Actividades de jardinería'),
(24, 2, 'Activités de jardinage'),
(24, 1, 'Gardening Activities'),
(25, 2, 'Déménagement d\'une maison complète / déménagement d\'une maison'),
(25, 3, 'Remoción de casa completa / mudanza de casa'),
(25, 1, 'Full House Removal / House moving'),
(26, 1, 'Performing Arts'),
(26, 2, 'Arts performants'),
(26, 3, 'Las artes escénicas'),
(27, 1, 'Party Host'),
(27, 2, 'Hôte de fête'),
(27, 3, 'Anfitrión de la fiesta'),
(28, 1, 'DJ'),
(28, 2, 'DJ'),
(28, 3, 'DJ'),
(29, 1, 'Choreographer'),
(29, 2, 'Chorégraphe'),
(29, 3, 'Coreógrafo'),
(30, 3, 'Mesas de barman / espera'),
(30, 2, 'Tables de barman / d\'attente'),
(30, 1, 'Bartending / Waiting Tables'),
(31, 2, 'Connectivité réseau'),
(31, 1, 'Network Connectivity'),
(31, 3, 'Conectividad de red'),
(32, 1, 'Broadband Connection installation'),
(32, 2, 'Installation de connexion à large bande'),
(32, 3, 'Instalación de conexión de banda ancha'),
(33, 1, 'Leased Line Connection'),
(33, 2, 'Connexion de ligne louée'),
(33, 3, 'Conexión de línea arrendada'),
(34, 3, 'Vigilancia de los niños'),
(34, 2, 'Baby-sitting'),
(34, 1, 'Baby Sitting'),
(35, 1, 'Pet Services'),
(35, 2, 'Services pour animaux'),
(35, 3, 'Servicios para mascotas'),
(36, 1, 'Pet Bathing & Grooming'),
(36, 2, 'Bain et toilettage d\'animaux'),
(36, 3, 'Baño y aseo de mascotas'),
(37, 1, 'Walking the pet'),
(37, 2, 'Promener l\'animal'),
(37, 3, 'Paseando a la mascota'),
(38, 2, 'Antiparasitaire'),
(39, 1, 'Personal Training'),
(39, 2, 'Formation personnelle'),
(39, 3, 'Entrenamiento personal'),
(40, 1, 'Voice Modulation / Speech'),
(40, 2, 'Modulation vocale / discours'),
(40, 3, 'Modulación de voz / habla'),
(41, 1, 'Personality Trainer'),
(41, 2, 'Entraîneur de personnalité'),
(41, 3, 'Entrenador de personalidad'),
(42, 3, 'Carta de presentación / Redactor'),
(42, 2, 'Lettre d\'accompagnement / Rédacteur de CV'),
(42, 1, 'Cover Letter / Resume Writer'),
(43, 3, 'Otros'),
(43, 2, 'Autres'),
(43, 1, 'Others'),
(21, 1, 'Wall Painting'),
(44, 1, 'Gardening Activities'),
(44, 2, 'Activités de jardinage'),
(44, 3, 'Actividades de jardinería'),
(45, 1, 'House moving'),
(45, 2, 'déménagement'),
(45, 3, 'mudanza'),
(22, 3, 'Carpintería'),
(46, 1, 'Carpentry'),
(46, 2, 'Charpenterie'),
(46, 3, 'Carpintería'),
(47, 1, 'Cover letter/Resume Writer'),
(47, 2, 'Lettre de motivation / Rédacteur de CV'),
(47, 3, 'Carta de presentación / Redactor'),
(48, 1, 'Baby Sitting'),
(48, 2, 'Baby-sitting'),
(48, 3, 'Vigilancia de los niños'),
(49, 1, 'Bartending/ Waiting Tables'),
(49, 2, 'Tables de barman / d\'attente'),
(49, 3, 'Mesas de barman / espera'),
(50, 1, 'Pest Control'),
(50, 2, 'Antiparasitaire'),
(50, 3, 'Control de plagas'),
(38, 1, 'Pest Control'),
(38, 3, 'Control de plagas'),
(51, 1, 'Wall Painting'),
(51, 2, 'Peinture murale'),
(51, 3, 'Pintura mural'),
(52, 1, 'Cat1'),
(53, 1, 'Cleaning');
ALTER TABLE `tbl_categories_metadata`
ADD UNIQUE KEY `cat_id` (`cdata_cat_id`,`cdata_lang_id`);
This might work.
UPDATE tbl_categories a
INNER JOIN (
SELECT a.cat_id, MAX(b.cdata_name) cdata_name, ROW_NUMBER() OVER (PARTITION BY cdata_name ORDER BY cat_id) rn
FROM tbl_categories a
INNER JOIN tbl_categories_metadata b ON a.cat_id = b.cdata_cat_id
WHERE b.cdata_lang_id = 1
GROUP BY a.cat_id
) b ON a.cat_id = b.cat_id
SET a.cat_identifier = (CASE WHEN b.rn = 1 THEN b.cdata_name ELSE CONCAT(b.cdata_name, '-', a.cat_id) END)
https://dbfiddle.uk/?rdbms=mysql_8.0&fiddle=8f620a00e3d81012a3e1332f13914ed8
Revised version for MySQL 5.6
UPDATE tbl_categories a
INNER JOIN (
SELECT a.cat_id, MAX(b.cdata_name) cdata_name
FROM tbl_categories a
INNER JOIN tbl_categories_metadata b ON a.cat_id = b.cdata_cat_id
WHERE b.cdata_lang_id = 1
GROUP BY a.cat_id
) b ON a.cat_id = b.cat_id
LEFT JOIN (
SELECT MIN(a.cat_id) cat_id, b.cdata_name
FROM tbl_categories a
INNER JOIN tbl_categories_metadata b ON a.cat_id = b.cdata_cat_id
WHERE b.cdata_lang_id = 1
GROUP BY b.cdata_name
) c ON a.cat_id = c.cat_id AND b.cdata_name = c.cdata_name
SET a.cat_identifier = (CASE WHEN c.cat_id IS NOT NULL THEN b.cdata_name ELSE CONCAT(b.cdata_name, '-', a.cat_id) END)
;
https://dbfiddle.uk/?rdbms=mysql_5.6&fiddle=2c433ca4f20af22f7578dfe31e66db7b
Aside
cat_identifier and cat_id have the same meaning, which is confusing. A more appropriate name for the new column, given its use, would be default_name (or default_en_name). This answer will use the former.
Answer
First, set the column values to guaranteed unique values as planned, using both tbl_categories_metadata.cdata_name and tbl_categories.cat_id:
UPDATE `tbl_categories` AS tc
JOIN `tbl_categories_metadata` AS tcm
ON tc.cat_id = tcm.cdata_cat_id
SET `default_name` = CONCAT(tcm.cdata_name, '-', tc.cat_id)
WHERE
tcm.cdata_lang_id = 1;
The column could simply be left as-is. However, if you don't want the cat_id on some of the fields, remove it. What expression used to remove it depends on the version of MySQL server used (and what UDFs are loaded). If using MySQL 8.0, make use of REGEXP_REPLACE
UPDATE IGNORE `tbl_categories`
SET `default_name` = REGEXP_REPLACE(`default_name`,
CONCAT('-', cat_id, '$'),
'')
ORDER BY cat_id
Similarly, if you have a UDF that adds regex functionality, use that. If using a version before 8.0, a combination of SUBSTRING() and CHAR_LENGTH().
...
SET `default_name` = SUBSTRING(`default_name`, 1,
CHAR_LENGTH(`default_name`) - 1 - CHAR_LENGTH(cat_id)
)
...
A simpler (though more error-prone) solution would be to use REPLACE()
...
SET `default_name` = REPLACE(`default_name`,
CONCAT('-', cat_id), '')
...
Alternate Answer
More as an exercise, it can be done in a single query in a few ways; here's a systematic approach.
Whenever rows might depend on other rows (such as with unique indices, but not only then), a single-query solution can generally be done with an additional join on one of the tables, usually grouped and by the use of aggregate functions, though sometimes with non-equality join conditions (e.g. sometimes you can use something like tbl_alias_0.col < tbl_alias_1.col). To get the related rows, the join goes through tbl_categories_metadata. The table references clause would thus be:
...
`tbl_categories` AS tc
JOIN `tbl_categories_metadata` AS tcm
ON tc.cat_id = tcm.cdata_cat_id
JOIN `tbl_categories_metadata` AS tcm_groups
ON tcm.cdata_name = tcm_groups.cdata_name
AND tcm.cdata_lang_id = tcm_groups.cdata_lang_id
...
(Note that tcm is only used to join through in this example, though in some places some of the tcm_groups column references could be replaced with tcm column references.)
For this example, since each row (identified by cat_id or cdata_cat_id) will get assigned a cdata_name, these naturally form groups.
...
GROUP BY tc.cat_id, tcm_groups.cdata_name
...
The cdata_name in each group will potentially come from multiple rows in tbl_categories (via cdata_cat_id). As only one row from tbl_categories in the group won't have a suffix appended to the default name, this must be specified. One simple option is to pick the row with minimal cat_id, but other options (e.g. maximal cat_id, random) could be implemented instead. This is implemented with a CASE using a comparison with tcm_groups.cdata_cat_id to distinguish the cases.
...
CASE tc.cat_id
WHEN MIN(tcm_groups.cdata_cat_id) THEN tcm.cdata_name
ELSE CONCAT(tcm_groups.cdata_name, '-', tc.cat_id)
END
...
If this were a simple SELECT, the above components are all you'd need. (Starting with a SELECT is useful to check the work.) Combined, they are:
SELECT tc.cat_id,
CASE tc.cat_id
WHEN MIN(tcm_groups.cdata_cat_id) THEN tcm_groups.cdata_name
ELSE CONCAT(tcm_groups.cdata_name, '-', tc.cat_id)
END AS default_name
FROM `tbl_categories` AS tc
JOIN `tbl_categories_metadata` AS tcm
ON tc.cat_id = tcm.cdata_cat_id
JOIN `tbl_categories_metadata` AS tcm_groups
ON tcm.cdata_name = tcm_groups.cdata_name
AND tcm.cdata_lang_id = tcm_groups.cdata_lang_id
WHERE tcm_groups.cdata_lang_id = 1
GROUP BY tc.cat_id, tcm_groups.cdata_name
ORDER BY tc.cat_id
The one issue with this is that GROUP BY isn't allowed in UPDATE statements. To address this, the joined table, grouping and aggregate functions need to instead take place in a sub-SELECT. The groups therein should be the columns in JOIN conditions and any in the grouping clause. The aggregate functions get used in the result columns. This gives the sub-SELECT:
SELECT MIN(cdata_cat_id) AS cdata_cat_id, cdata_lang_id, cdata_name
FROM `tbl_categories_metadata`
GROUP BY cdata_name, cdata_lang_id
Rewriting the table references using that gives:
...
`tbl_categories` AS tc
JOIN `tbl_categories_metadata` AS tcm
ON tc.cat_id = tcm.cdata_cat_id
JOIN (
SELECT MIN(cdata_cat_id) AS cdata_cat_id, cdata_lang_id, cdata_name
FROM `tbl_categories_metadata`
GROUP BY cdata_name, cdata_lang_id
) AS tcm_groups
ON tcm.cdata_name = tcm_groups.cdata_name
AND tcm.cdata_lang_id = tcm_groups.cdata_lang_id
...
Aggregate functions are replaced with references to the sub-SELECT columns:
...
CASE tc.cat_id
WHEN tcm_groups.cdata_cat_id THEN tcm_groups.cdata_name
ELSE CONCAT(tcm_groups.cdata_name, '-', tc.cat_id)
END
...
These parts can be combined into an UPDATE:
UPDATE `tbl_categories` AS tc
JOIN `tbl_categories_metadata` AS tcm
ON tc.cat_id = tcm.cdata_cat_id
JOIN (
SELECT MIN(cdata_cat_id) AS cdata_cat_id, cdata_lang_id, cdata_name
FROM `tbl_categories_metadata`
GROUP BY cdata_name, cdata_lang_id
) AS tcm_groups
ON tcm.cdata_name = tcm_groups.cdata_name AND tcm.cdata_lang_id = tcm_groups.cdata_lang_id
SET default_name = CASE tc.cat_id
WHEN tcm_groups.cdata_cat_id THEN tcm.cdata_name
ELSE CONCAT(tcm.cdata_name, '-', tc.cat_id)
END
WHERE tcm.cdata_lang_id = 1
Performance
The sub-SELECT is less performant than the flat join, but can't be avoided in the UPDATE.
A WHERE tcm.cdata_lang_id = 1 could be added to the sub-SELECT. This won't affect correctness, but could result in a more efficient query if there's an index on cdata_lang_id. The most efficient query will result from an index on (cdata_lang_id, cdata_name).

Selecting customers who failed the first time and were successful on the next attempts

I have a database with two tables: users and payments. There is a one to many relationship between users and payments: each user can have one or more payments and a payment belongs to a user. Also, each payment can be successful or failed.
I need to create a query that tells me how many users failed on the first attempt and then were successful at least once.
Example:
Considering the following users and payments
CREATE TABLE users
(`id` int, `name` varchar(6), `email` varchar(7), `password` varchar(10), `created_at` timestamp, `updated_at` timestamp);
INSERT INTO users
(`id`, `name`, `email`, `password`)
VALUES
(1, 'name 1', 'email 1', 'password 1'),
(2, 'name 2', 'email 2', 'password 2'),
(3, 'name 3', 'email 3', 'password 3'),
(4, 'name 4', 'email 4', 'password 4'),
(5, 'name 5', 'email 5', 'password 5');
CREATE TABLE payments
(`id` int, `date` varchar(10), `status` varchar(7), `user_id` int ,`created_at` timestamp, `updated_at` timestamp);
INSERT INTO payments
(`id`, `date`, `status`, `user_id`)
VALUES
(1, '2019-01-01', 'success', 1),
(2, '2019-01-01', 'failed', 2),
(3, '2019-01-01', 'failed', 3),
(4, '2019-01-01', 'success', 4),
(5, '2019-01-01', 'failed', 5),
(6, '2019-02-01', 'success', 1),
(7, '2019-02-01', 'success', 2),
(8, '2019-02-01', 'success', 3),
(9, '2019-02-01', 'success', 4),
(10, '2019-02-01', 'failed', 5),
(11, '2019-03-01', 'success', 1),
(12, '2019-03-01', 'failed', 2),
(13, '2019-03-01', 'success', 3),
(14, '2019-03-01', 'failed', 4),
(15, '2019-03-01', 'failed', 5),
(16, '2019-04-01', 'success', 1),
(17, '2019-04-01', 'failed', 2),
(18, '2019-04-01', 'failed', 3),
(19, '2019-04-01', 'failed', 4),
(20, '2019-04-01', 'failed', 5),
(21, '2019-05-01', 'success', 1),
(22, '2019-05-01', 'failed', 2),
(23, '2019-05-01', 'failed', 3),
(24, '2019-05-01', 'failed', 4),
(25, '2019-05-01', 'failed', 5),
(26, '2019-06-01', 'success', 1),
(27, '2019-06-01', 'success', 2),
(28, '2019-06-01', 'failed', 3),
(29, '2019-06-01', 'failed', 4),
(30, '2019-06-01', 'failed', 5);
We can see that users with id 2 and 3 failed on 2019-01-01 but then had at least one successful payment on the following dates. The query that I need should return users 2 and 3.
Is this possible to achieve with pure SQL? Or do I need other language to loop through all users and check their payments and return only the ones that I want? I can do it "easily" with php for example.
Thanks in advance
You can achieve the desired result using Window function, If you are using MySQL version 8.0 or above -
SELECT distinct user_id
FROM(SELECT user_id, status, LAG(status) OVER(PARTITION BY user_id ORDER BY `date`) prev_status
FROM payments) TEMP
WHERE status = 'success'
AND prev_status = 'failed'
Here is the Fiddle
SELECT DISTINCT x.user_id
FROM payments x
JOIN payments y
ON y.user_id = x.user_id
AND y.id < x.id
AND y.status = 'failed'
WHERE x.status = 'success';

SQL query: How to validate the prerequisite for courses that finished and how to find time conflict of courses?

Am trying to write query that advise student what courses to register . The query will select the suitable courses and will validate 1) the courses they finished .and what left for them to take 2) The prerequisite courses to be finished .3) validate the time conflict. In order to recommend for him best courses.
I did those table and join them , but the join operation is not working . What syntax is the correct ?What if there are no prerequisite how i will check that ?some prerequisite are for senior or junior is that need separate table?
ERROR 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your
MYSQL server version for the right syntax to use near 'studyplan sp on (t.std_id=sp.std_is)
left outer join prerequsit p on (p.preid = c.' at line 3
select c.*
from std t
inner join schedule22 c studyplan sp
on (t.std_id=sp.std_id)
left outer join prerequsit p
on (p.preid=c.courseid)
inner join schedule22 c
on (c.courseid=p.courseid)
where t.std=1 AND
sp.complated='No' AND
sp.passed='No' AND
p.preid=courseid;
Student
enter code here
std_id username pass fname email
1 hjh 154 jdf example#live.com
Studyplan
Courseid code `prerequisite std_id completed passed
2 UNS 100 No Prerequisite 1 Y Y
3, 'ENG 100', 'No Prerequisite', 1, 'Y', 'Y'),
5, 'MTT 101', 'MTG 100', 1, 'Y', 'Y'),
6, 'MTT 202', 'MTT 101', 1, 'Y', 'N'),
(7, 'STT 100', 'No Prerequisite', 1, 'N', 'N'),
(8, 'MTT 102','MTT 101', 1, 'N', 'N'),
(9, 'ENG 200','english1', 1, 'N', 'N'),
(10, 'OE1',3, 'NULL', 1, 'N', 'N'),
(11, 'ENG 201','ENG 200', 1, 'N', 'N'),
(12, 'CSC 302', 'MTT 202', 1, 'N', 'N'),
(13, 'STT 201',, 'STT 100', 1, 'N', 'N'),
(15, 'CSC 201','MTT 101 or MTT 102', 1, 'N', 'N'),
(16, 'CSC 202', 'CSC 201', 1, 'N', 'N'),
(17, 'PSY 201', 'ENG 100 + UNS 100', 1, 'N', 'N'),
(18, 'NSC 201', 'No Prerequisite', 1, 'N', 'N'),
(19, 'CSC 307', 'CSC 201', 1, 'N', 'N'),
(20, 'CSC 301','CSC 202', 1, 'N', 'N'),
(21, 'ITE 390', 'Junior Level', 1, 'N', 'N'),
(22, 'CSC 305', 'Junior Level', 1, 'Y', 'Y'),
(23, 'ITE 305', ' 'Junior Level', 1, 'Y', 'Y'),
(24, 'ITE 414', ', 'junior Level', 1, 'Y', 'Y'),
(25, 'CSC 308', 'CSC 301', 1, 'N', 'N'),
(26, 'ITE 402', 'CSC 305', 1, 'N', 'N'),
(27, 'CSC 311', 'CSC 201', 1, 'N', 'N'),
(28, 'ITE 422', 'CSC 305', 1, 'N', 'N'),
(29, 'CIS 401', 'CSC 302', 1, 'N', 'N'),
(30, 'ITE 409', 'Senior Level', 1, 'N', 'N'),
(31, 'CIS 401', 'CSC 302', 1, 'N', 'N'),
(32, 'CSC 401', 'ITE 305', 1, 'N', 'N'),
(33, 'ITE 409', 'Null', 1, 'N', 'N'),
(34, 'ITE 408', 'CSC 305', 1, 'N', 'N')
Schedule
enter code here
semester`, `courseid`, `coursecode`, `section`,`date`, `time`, `,`sch_id`)
('fall', 9, 'ENG 100', 51,'MoWe', '1:45PM-3:15PM', 'staff',1),
('fall', 16, 'CSC202', 51, 'Mo-We',' 1:45PM-3:15PM', 'staff',1),
('fall', 26, 'ITE402', 51, 'Tu','10:30-12pm', 'staff',1),
('fall', 6, 'MTT 202', 51,'Su-Tu', '12:00-2:00PM', 'staff',1),
('fall', 8, 'MTT 102', 51','SuTu',' 12:00-2:00PM', 'staff',1),
('fall', 12, 'CSC 302', 51,'Mo-We',' 10:00-12:00PM', 'staff',1),
('fall', 15, 'CSC 201', 52,'Mo-We',' 10:00-12:00PM', 'staff',1),
('fall', 21, 'ITE 390', 51, 'Su-Tu',' 12:00-2:00PM', 'staff',1),
('fall', 5, 'MTT 101', 51, 'Su',' 4:00PM -7:00PM', 'staff',1),
('fall', 28, 'ITE 422', 51, Su-Tu',' 12:00-2:00PM', 'staff',1);
prerequsit`
enter code here
(`courseid`, `preid`) VALUES
(5, 1,),
(6, 2),
(8, 3),
(9, 4),
(11, 5),
(12, 6),
(13, 7),
(14, 8),
(15, 9),
(16, 10),
(17, 11),
(18, 12),
(19, 13),
(20, 14),
(21, 21),
(22, 22),
(23, 23),
(24, 24),
(25, 20),
(26, 22),
(27, 25),
(28, 22),
(29, 12),
(30, 30),
(32, 23),
(34, 22,),
(35, 12),
(36, 22),
(37, 3),
Your query contains schedule22 c twice in the from clause. That's an error. There may be more.

Group results by day and month (php timestamp) showing total revenue per day

Using mysql how can I group together by day and month showing tghe total revenue?
E.g. (not based on below data)
day month revenue
1 01 10.97
2 01 3.57
3 01 0
etc.
Heres an example of my data:
CREATE TABLE IF NOT EXISTS `sales` (
`id` bigint(255) NOT NULL AUTO_INCREMENT,
`timestamp` int(12) NOT NULL,
`product` int(5) NOT NULL,
`publisher` int(5) NOT NULL,
`market` int(5) NOT NULL,
`revenue` float NOT NULL,
`Units` int(5) NOT NULL,
`Downloads` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=138 ;
--
-- Dumping data for table `sales`
--
INSERT INTO `sales` (`id`, `timestamp`, `revenue`) VALUES
(1, 1394150400, 3.65),
(2, 1394064000, 0),
(4, 1393977600, 0),
(5, 1393891200, 7.42),
(6, 1393804800, 0),
(7, 1393718400, 0),
(8, 1393632000, 0),
(9, 1393545600, 0),
(10, 1393459200, 0),
(11, 1393372800, 0),
(12, 1393286400, 3.65),
(13, 1393200000, 3.65),
(14, 1393177032, 0),
(15, 1393090632, 3.65),
(16, 1393004232, 0),
(17, 1392917832, 0),
(18, 1392831432, 0),
(19, 1392745032, 0),
(20, 1392658632, 0),
(21, 1392572232, 0),
(24, 1391881032, 0),
(23, 1392485832, 0),
(25, 1392336000, 0),
(26, 1392249600, 0),
(27, 1392163200, 0),
(28, 1392076800, 0),
(29, 1391990400, 3.81),
(30, 1391904000, 0),
(31, 1391817600, 0),
(32, 1391731200, 3.65),
(33, 1391644800, 3.58),
(34, 1391558400, 3.58),
(35, 1391472000, 0),
(36, 1391385600, 0),
(37, 1391299200, 0),
(38, 1391212800, 7.23),
(39, 1391126400, 0),
(40, 1391040000, 0),
(41, 1390953600, 3.81),
(42, 1390867200, 4.52),
(43, 1390780800, 0),
(44, 1390694400, 3.65),
(45, 1390608000, 3.81),
(46, 1390585032, 0),
(47, 1390435200, 0),
(48, 1390348800, 3.58),
(49, 1390262400, 0),
(50, 1390176000, 0),
(51, 1390089600, 0),
(52, 1390003200, 0),
(53, 1389916800, 3.58),
(54, 1389893832, 0),
(55, 1389744000, 0),
(56, 1389657600, 0),
(57, 1389571200, 0),
(58, 1389484800, 0),
(59, 1389398400, 3.65),
(60, 1389312000, 3.18),
(61, 1389225600, 0),
(62, 1389139200, 0),
(63, 1389052800, 0),
(64, 1389052800, 0),
(65, 1388966400, 3.65),
(66, 1388880000, 4.05),
(67, 1388793600, 0),
(68, 1388707200, 3.65),
(69, 1388620800, 0),
(70, 1388534400, 0),
(71, 1394236800, 0),
(72, 1394236800, 2.51),
(73, 1394236800, 0),
(74, 1394150400, 5.02),
(75, 1394150400, 2.76),
(76, 1394064000, 7.5),
(77, 1394064000, 8.28),
(78, 1393977600, 0),
(79, 1393977600, 0),
(80, 1393891200, 7.5),
(81, 1393891200, 2.36),
(82, 1393804800, 0),
(83, 1393804800, 0),
(84, 1393718400, 2.76),
(85, 1393718400, 0),
(86, 1393632000, 0),
(87, 1393545600, 0),
(88, 1393545600, 2.76),
(89, 1393459200, 2.51),
(90, 1393459200, 2.51),
(91, 1393433613, 2.51),
(92, 1393433613, 0),
(93, 1393286400, 2.54),
(94, 1393286400, 2.76),
(95, 1393200000, 2.52),
(96, 1393200000, 5.51),
(97, 1394323200, 0),
(98, 1394323200, 5.01),
(99, 1394323200, 5.52),
(100, 1394409600, 0),
(101, 1394409600, 2.05),
(102, 1394409600, 5.27),
(103, 1393113600, 5.08),
(104, 1393027200, 5.09),
(105, 1392854400, 5.32),
(106, 1392854400, 7.63),
(107, 1392940800, 0),
(108, 1392595200, 0),
(109, 1392508800, 7.64),
(110, 1392422400, 0),
(111, 1392336000, 2.58),
(112, 1392163200, 5.57),
(113, 1391990400, 0),
(114, 1391817600, 0),
(115, 1391731200, 15.99),
(116, 1391472000, 10.66),
(117, 1391385600, 2.54),
(118, 1391299200, 2.54),
(119, 1391212800, 5.34),
(120, 1391040000, 0),
(121, 1390953600, 2.55),
(122, 1390780800, 10.9),
(123, 1390608000, 12.72),
(124, 1390435200, 7.64),
(125, 1390262400, 2.55),
(126, 1390089600, 9.92),
(127, 1389916800, 2.55),
(128, 1389744000, 2.55),
(129, 1389571200, 5.1),
(130, 1389398400, 2.55),
(131, 1389225600, 5.1),
(132, 1389052800, 7.65),
(133, 1388880000, 5.1),
(134, 1388793600, 9.99),
(135, 1388620800, 0),
(136, 1394582400, 4.14),
(137, 1394582400, 2.76);
SELECT DATE_FORMAT(FROM_UNIXTIME(`timestamp`),'%d') DAY, DATE_FORMAT(FROM_UNIXTIME(`timestamp`),'%m') MONTH, SUM(`revenue`)
FROM sales
GROUP BY DAY,MONTH
ORDER BY MONTH,DAY
Check the FROM_UNIXTIME Function Here