I'm trying to get rank of user in the table with stored time.
RAW SQLÂ query is working fine but I can't make it work as procedure.
SET #rownum := 0;
SELECT rank, user_id, best_time
FROM (
SELECT #rownum := #rownum +1 AS rank,id, best_time, user_id
FROM user_round WHERE round_id=1 ORDER BY best_time ASC
) AS result WHERE user_id = 1
My try to procedure:
BEGIN
DECLARE variable INT DEFAULT 0;
SELECT rank,best_time, user_id
FROM (
SELECT SET variable=variable+1 AS rank, best_time, user_id
FROM database.user_round WHERE round_id=1 ORDER BY best_time ASC
) AS result WHERE user_id = 1;
END
You need to continue using a 9.4. User-Defined Variables, not a 13.6.4.1. Local Variable DECLARE Syntax:
BEGIN
-- DECLARE variable INT DEFAULT 0;
SELECT rank, best_time, user_id
FROM (
-- SELECT SET variable = variable + 1 AS rank, best_time, user_id
SELECT #variable := #variable + 1 AS rank, best_time, user_id
FROM database.user_round, (SELECT #variable := 0) init
WHERE round_id = 1
ORDER BY best_time ASC
) AS result
WHERE user_id = 1;
END
Related
For my homework assignment, the stored procedure should accept an optional integer between 1 and 15, but default to 3 if no value is passed.
DELIMITER //
CREATE OR REPLACE PROCEDURE rankVideos(rank INT)
BEGIN
if rank = null then
SET rank = 3;
END if;
CREATE OR REPLACE TEMPORARY TABLE all_ranks AS (
SELECT * FROM youtube.homework7a
);
create OR REPLACE TEMPORARY TABLE t2 AS ( SELECT
category,
row_number() OVER (ORDER BY cnt DESC) v_cnt,
row_number() OVER (ORDER BY views DESC) v_views,
row_number() OVER (ORDER BY likes DESC) v_likes,
row_number() OVER (ORDER BY dislikes DESC) v_dislikes,
row_number() OVER (ORDER BY comment_count DESC) v_comment_count FROM all_ranks
);
CREATE OR REPLACE TEMPORARY TABLE t3 AS (
SELECT * FROM t2
WHERE v_cnt <= rank OR v_views <= rank OR v_likes <= rank
OR v_dislikes < rank OR v_comment_count <= rank
);
CREATE OR replace TEMPORARY TABLE t4 AS (
SELECT category,
case when v_cnt <= rank then v_cnt ELSE null END cnt,
case when v_views <= rank then v_views ELSE null END views,
case when v_likes <= rank then v_likes ELSE null END likes,
case when v_dislikes <= rank then v_dislikes ELSE null END dislikes,
case when v_comment_count <= rank then v_comment_count ELSE null END comment_count
FROM t3
)
;
SELECT *,
ifnull(cnt,999)
+ ifnull(views,999)
+ ifnull(likes,999)
+ ifnull(dislikes,999)
+ifnull(comment_count,999) num_non_null_cols,
ifnull(cnt,0)
+ ifnull(views,0)
+ ifnull(likes,0)
+ ifnull(dislikes,0)
+ ifnull(comment_count,0) sum_non_null_cols
FROM t4
ORDER BY num_non_null_cols, sum_non_null_cols;
END
//
DELIMITER ;
When I run the procedure and leave the integer blank I get an error that it has an incorrect integer value.
The syntax you show makes me think you are using MySQL or MariaDB.
These implementations don't support a feature for default values for procedure parameters. This has been requested in MySQL: https://bugs.mysql.com/bug.php?id=15975 But so far, it is not supported.
You're using the best workaround I know of, to set the parameter to your default value if it is NULL.
Another way of coding this is to use the COALESCE() function:
SET rank = COALESCE(rank, 3);
It's just another way to achieve the same thing that your IF/THEN code does.
SQL
MSSQL
create proc MyProc
#rank int = 3
as
...
GO
If you pass in a value, it will use that value. If you don't pass in a value, #rank = 3.
I have a routine. But it' s too slow. How can I improve the query?
My records: http://www.sqlfiddle.com/#!9/14cceb/1/0
My query:
CREATE DEFINER = 'root'#'localhost'
PROCEDURE example.ssa()
BEGIN
drop table if exists gps_table;
drop table if exists exam_datas;
CREATE TEMPORARY TABLE gps_table(ID int PRIMARY KEY AUTO_INCREMENT,timei
int,
trun_date_time datetime, tadd_meter int, tin_here int null);
insert into gps_table(timei,trun_date_time,tadd_meter,tin_here) select
imei, run_date_time, add_meter, in_here from example_table;
CREATE TEMPORARY TABLE exam_datas(ID int PRIMARY KEY AUTO_INCREMENT,vimei
int, vbas_run_date_time datetime, vbit_run_date_time datetime, vdifff int);
select tin_here from gps_table limit 1 into #onceki_durum;
select count(id) from gps_table into #kayit_sayisi;
set #i = 1;
set #min_mes = 0;
set #max_mes = 0;
set #frst_id = 0;
set #imei = 0;
set #run_date_time = '0000-00-00 00:00:00';
set #run_date_time2 = '0000-00-00 00:00:00';
myloop: WHILE (#i <= #kayit_sayisi) DO
select tin_here from gps_table where id = #i into #in_here_true;
if (#in_here_true = 1) then
select id,trun_date_time, tadd_meter from gps_table where id = #i into #frst_id,#run_date_time2, #min_mes;
select id from gps_table where id > #frst_id and tin_here =0 order by id asc limit 1 INTO #id;
SET #id = #id-1;
select id, timei, trun_date_time, tadd_meter from gps_table
where id = #id and tin_here =1 limit 1 into #i, #imei, #run_date_time, #max_mes;
if(#i-#frst_id>3) then
set #i:=#i+1;
insert into exam_datas(vimei,vbas_run_date_time,vbit_run_date_time,vdifff) Values (#imei, #run_date_time2, #run_date_time, #max_mes-#min_mes);
SELECT * FROM exam_datas;
SET #asd =1;
elseif 1=1 then
set #i:=#i+1;
End if;
ELSEIF 1=1
THEN SET #i:=#i+1;
End if;
IF (#i = #kayit_sayisi)
THEN set #tamam =1; LEAVE myloop;
END IF;
END WHILE myloop;
select DISTINCT * from exam_datas;
drop table if exists exam_datas;
drop table if exists gps_table;
END
I need: id= 6 first true and id= 11 last_true
firs_trure - last_true = 304-290= 14
id=14 first true and id=18 last_true
firs_true - last_true = 332-324= 8
This routine is too slow.
MySql version is 5.7 and There are 2 milions record in the table.
UPDATE:
Query is here. HERE
Thank you #LukStorms
It's possible to get such results in 1 query.
Thus avoiding a WHILE loop over records.
This example works without using window functions. Just using variables inside the query to calculate a rank. Which is then used to get the minimums and maximums of the groups.
select
imei,
min(run_date_time) as start_dt,
max(run_date_time) as stop_dt,
max(add_meter) - min(add_meter) as diff
from
(
select imei, id, run_date_time, add_meter, in_here,
case
when #prev_imei = imei and #prev_ih = in_here then #rnk
when #rnk := #rnk + 1 then #rnk
end as rnk,
#prev_imei := imei as prev_imei,
#prev_ih := in_here as prev_ih
from example_table t
cross join (select #rnk := 0, #prev_ih := null, #prev_imei := null) vars
order by imei, id, run_date_time
) q
where in_here = 1
group by imei, rnk
having count(*) > 4
order by imei, min(id);
In the procedure such query can be used to fill that final temporary table.
A test on db<>fiddle here
I have this query:
SELECT COUNT(1), name, (#i := #i + 1) AS counter FROM mytbl, (SELECT #i := 0) tmp_tbl GROUP BY counter
For this query, the counter column increases its value with 2.
But if I remove COUNT(1), such as:
SELECT name, (#i := #i + 1) AS counter FROM mytbl, (SELECT #i := 0) tmp_tbl GROUP BY counter
counter column increases its value with 1.
Can anyone explain why this behavior?
Table would be:
create table mytbl (name VARCHAR(20));
With data:
INSERT INTO mytbl VALUES
('a1'),
('a2'),
('a3');
As mentioned in MySQL document, we should not assign a value to a user variable and read the value within the same statement. We might get the expected results, but this is not guaranteed. Changing the statement (for example, by adding a GROUP BY, HAVING, or ORDER BY clause) may cause MySQL to select an execution plan with a different order of evaluation.
In your query, counter field will be evaluated in SELECT statement and then be used in GROUP BY statement. Seem when we add an aggregation function to SELECT statement, the field that be used in GROUP BY statement will be evaluated 2 times.
I've create a demo, you could check it. In the demo, I've this query
SELECT Count(1),
name,
( #i := #i + 1 ) AS counter,
( #j := #j + 1 ) AS group_field
FROM (SELECT 'A' AS name
UNION
SELECT 'B' AS name
UNION
SELECT 'C' AS name) mytable,
(SELECT #i := 0) tmp_tbl,
(SELECT #j := 0) tmp_tbl1
GROUP BY group_field;
In the execution result, counter field only be increased by 1 and group_field be increased by 2.
To make the counter field only increasing by 1, you could try this
SELECT Count(1),
name,
counter
FROM (SELECT name,
( #i := #i + 1 ) AS counter
FROM mytbl,
(SELECT #i := 0) tmp_tbl) data
GROUP BY counter;
I have the query working, just wondering if there is a better way to do this without cursors/loops/php side. I've been a DBA for 5+ years and just came across the := statement. Very cool.
Table (tblPeople) with the person ID and the number of tickets they bought.
PersonId NumTickets
1 3
2 1
3 1
I then want to assign individual tickets to each person in a new table (tblTickets), depending on how many tickets they bought. The TicketId is a key, auto increment column.
TicketId PersonId
100 1
101 1
102 1
103 2
104 3
Here is the code. It loops through the whole tblPeople over and over again incrementing a new calculated column called rowID. Then I filter out the rows based on the number of tickets they bought in the WHERE clause. The problem I see is the subquery is huge, the more people I have, the bigger the subquery gets. Just not sure if there is a better way to write this.
INSERT INTO tblTickets (PersonId)
SELECT PersonId
FROM (
SELECT s.PersonId, s.NumTickets,
#rowID := IF(#lastPersonId = s.PersonId and #lastNumTickets = s.NumTickets, #rowID + 1, 0) AS rowID,
#lastPersonId := s.PersonId,
#lastNumTickets := s.NumTickets
FROM tblPeople m,
(SELECT #rowID := 0, #lastPersonId := 0, #lastNumTickets := 0) t
INNER JOIN tblPeople s
) tbl
WHERE rowID < NumTickets
I'd add a utility table Numbers which contains all the numbers from 1 up to the maximal number of tickets a person may buy. Then you can do something like this:
INSERT INTO tblTickets (PersonId)
SELECT s.PersonId
FROM tblPeople s, Numbers n
WHERE n.number <= s.NumTickets
Following Stored procedure will serve your purpose...
DELIMITER $$
USE <your database name> $$
DROP PROCEDURE IF EXISTS `update_ticket_value2`$$
CREATE PROCEDURE `update_ticket_value2`()
BEGIN
DECLARE index_value INT;
DECLARE loop_variable INT;
SET #KeyValue = 100;
SET #LastPersonID = 0;
SET #TicketNum = 0;
SET #PersonIDToHandle = 0;
SELECT #PersonIDToHandle = PersonID, #TicketNum = NumTickets
FROM tblPeople
WHERE PersonId > #LastPersonID
ORDER BY PersonId
LIMIT 0,1;
WHILE #PersonIDToHandle IS NOT NULL
DO
SET loop_variable = 0;
WHILE(loop_variable < #TicketNum) DO
INSERT INTO tblTickets(TicketId, PersonId) VALUES(#KeyValue + loop_variable, #PersonIDToHandle);
SET loop_variable = loop_variable + 1;
END WHILE;
SET #LastPersonID = #PersonIDToHandle;
SET #PersonIDToHandle = NULL;
SET #KeyValue = #KeyValue + #TicketNum;
SELECT #PersonIDToHandle := PersonID, #TicketNum := NumTickets
FROM tblPeople
WHERE PersonId > #LastPersonID
ORDER BY PersonId
LIMIT 0,1;
END WHILE;
END$$
DELIMITER ;
Call the procedure as:
CALL update_ticket_value2();
Hope it helps...
I have a column I want to sort by, with periodical updates on the rank (daily). I currently use this in code
get all rows from table order by column
rank = 1
foreach row in table
update row's rank to rank
rank++
this takes an update for each row in MySQL. Are there more efficient ways to do this?
Use an update with a join:
set #rank := 0;
update tbl a join
(select id, #rank := #rank + 1 as new_rank from tbl order by col) b
on a.id = b.id set a.rank = b.new_rank;
If expecting to have a lot of rows, you'll get the best performance by doing the join against a table that is indexed, e.g.:
set #rank := 0;
create temporary table tmp (id int primary key, rank int)
select id, #rank := #rank + 1 as rank from tbl order by col;
update tbl join tmp on tbl.id = tmp.id set tbl.rank = tmp.rank;
Finally, you could potentially make it faster by skipping the update step entirely and swapping in a new table (not always feasible):
set #rank := 0;
create table new_tbl (id int primary key, rank int, col char(10),
col2 char(20)) select id, #rank := #rank + 1 as rank, col, col2
from tbl order by col;
drop table tbl;
rename table new_tbl to tbl;