mysql cursor taking long to execute - mysql

I have written a procedure which uses cursor to loop through the rows. It is taking too long to execute.
CREATE PROCEDURE test_port()
BEGIN
declare done BOOL default FALSE;
declare I,J,C,P,NOB int default 0;
declare n,k,t int default 0;
declare Lid int default 0;
declare inTS timestamp;
select max(id) into n from MAIN_TBL;
select ctrValue into k from ID_CNT;
set k=k+1;
WHILE k<=n
do
select SourcePort,DestPort,LinkID,NoOfBytes,insertTime into I,J,Lid,NOB,inTS from MAIN_TBL where id=k;
select count(*) into t from APP_PORTMAP_MSTR where Port in (I,J);
IF(t=1) THEN
select Port into P from APP_PORTMAP_MSTR where Port in (I,J);
SET C=0;
select count(*) INTO C from LINK_APP_TBL where LinkID=Lid and Port=P;
insert into TRAFFIC_HIST_TBL(LinkID,Port,NoOfBytes,Time_1) values(Lid,P,NOB,inTS);
IF(C=0) THEN
insert into LINK_APP_TBL(Port,LinkID) values(P,Lid);
END IF;
ELSE
if(I>J && J<>0) THEN
SET C=0;
select count(*) INTO C from LINK_APP_TBL where LinkID=Lid and Port=J;
IF(C=0) THEN
insert into LINK_APP_TBL(Port,LinkID) values(J,Lid);
insert into TRAFFIC_HIST_TBL(LinkID,Port,NoOfBytes,Time_1) values(Lid,J,NOB,inTS);
END IF;
ELSE
SET C=0;
select count(*) INTO C from LINK_APP_TBL where LinkID=Lid and Port=I;
IF(C=0) THEN
insert into LINK_APP_TBL(Port,LinkID) values(I,Lid);
insert into TRAFFIC_HIST_TBL(LinkID,Port,NoOfBytes,Time_1) values(Lid,I,NOB,inTS);
END IF;
END IF;
END IF;
SET k=k+1;
END WHILE;
END$$
\d ;
The possible reason for being slow is the "Insert" statements but can we improve the performance in any ways ? It is processing around 10K records at a time.

10k per second is a big number... you can try delete some indexes that you are not using or you can move your data files to faster disk (like SSD) that makes a huge difference...

Well, after carefully analysing the processlist, I found that "query end" state is taking very long time.
Hence, after searching some threads I found that setting innodb_flush_log_at_trx_commit to either 0 or 2 will speed up the operations but they prohibit innodb to be ACID complaint.
Changing the values may result into 1 instruction loss in case of OS crash or power failure.
Ref:
UPDATE statements are in "query end state"

Related

Nested while loop taking more time than nested cursor

I am inserting n number of rows after fetching data from two sql statement. I have used two ways so far first one is Cursor and other is While loop.
Nested Cursor:
begin
declare userId,taskId int default 0;
declare userCnt int default 0;
declare c1 cursor for select us_id from us_uxusermaster ;
declare continue handler for not found set userCnt=1;
open c1;
CheckId: loop
fetch c1 into userId;
if userCnt=1
then
leave CheckId;
end if;
Select pl.pl_minTarget into target from pl_planlist pl inner join ap_affiliateplan ap inner join us_uxusermaster us on Find_in_set(pl.pl_id,us.us_planListId) and ap.ap_id =us.us_taskPlanId where us_id=userId and pl.pl_serviceName=2;
Begin
DECLARE taskId int default 0;
Declare taskCnt int default 0;
Declare t1 cursor for select tk.tk_id from tk_taskmaster tk where tk.tk_activeTime=AddDate(Current_date(),1) and tk_actStatus=0 and tk_status=1 limit target;
Declare continue handler for not found set taskCnt=1;
open t1;
CheckTask: loop
fetch t1 into taskId;
if taskCnt=1
then
leave CheckTask;
end if;
insert into ut_userstask(ut_tk_id,ut_us_id,ut_edtm,ut_eby) values (taskId,userId,current_timestamp,'Via-Event');
end loop checkTask;
close t1;
End;
end loop CheckId;
close c1;
end;
While Loop:
begin
declare taskName,taskCode,description,url,userLevel,TaskStatus,TaskActStatus,Steps,taskId,userId varchar(50);
declare activationTime,deActivationTime datetime;
Declare flag,flag2,counts,counts2 int default 0;
Drop Temporary Table if exists temptrigg;
Set #rownumber=0;
Set #rownumber2=0;
create temporary table temptrigg as(select * from (select (#rownumber := #rownumber + 1) AS newrow, us_id from us_uxusermaster) AS xst);
select count(*) into counts from temptrigg;
while(flag<counts)
Do
Set flag=flag+1;
Select us_id into userId from temptrigg where newrow=flag;
Drop Temporary Table if exists temptrigg2;
Create temporary table temptrigg2 as(select * from(select (#rownumber2 := #rownumber2 + 1) as newrow2,tk.tk_id from tk_taskmaster tk where tk.tk_activeTime=Current_date() and tk_actStatus=0 and tk_status=1)as xst);
Select count(*) into Counts2 from temptrigg2;
While(flag2<Counts2)
Do
Set flag2=flag2+1;
Select tk_id into taskId from temptrigg2 where newrow2=flag2;
insert into ut_userstask(ut_tk_id,ut_us_id,ut_edtm,ut_eby) values (taskId,userId,current_timestamp,'Via-Event');
End While;
End While;
end
Here the problem is that the while loop is taking double time than the cursor. I am confused about its future results. will it be good to follow the cursor by replacing nested while loop.
While inserting 425 rows cursor taking 23.05 sec and while loop is taking 46 secs. both the timing are too much for me. Is there any other way to increase performance.
Will be glad to know if any.
I'm not sure if I caught every check you have in there (especially the limit), but it would save a lot if you could squeeze it into a single insert..select like this:
Insert into ut_userstask(ut_tk_id,ut_us_id,ut_edtm,ut_eby)
Select
tk.tk_id,
us.us_id,
current_timestamp,
'Via-Event'
from pl_planlist pl
inner join ap_affiliateplan ap
inner join us_uxusermaster us on ap.ap_id = us.us_taskPlanId
inner join tk_taskmaster tk on tk.tk_activeTime=AddDate(Current_date(),1) and tk_actStatus=0 and tk_status=1
where
pl.pl_serviceName=2
and Find_in_set(pl.pl_id,us.us_planListId)
Other stuff to keep in mind: Make sure you have proper indexes and try to avoid functions like FIND_IN_SET. It is generally a sign that your database is not normalized enough, and it's very slow to use, since it bypasses any indexes available on the column.
Even if you can't put everthing in one select, it's probably still faster to loop through a main cursor (for instance to get the users), and perform an insert..select for each of the rows of the cursor.

MySQL stored procedure performance issue when using cursor and temporary table

I tried to replace heavy Java method which runs multiple requests to the DB with stored SQL procedure.
It's doing its work but I expected much higher performance improvement.
The logic of the procedure(as well as Java method):
Get list of IDs from table1(purpose)
Iterate the list and get average value of a field from table2(record) for each id
Return list
of pairs id/average_value
Are there any efficiency issues in the procedure?
DROP PROCEDURE IF EXISTS test1.getGeneralAverage;
CREATE DEFINER=root#localhost PROCEDURE getGeneralAverage()
BEGIN
DECLARE p_id BIGINT(20);
DECLARE exit_loop BOOLEAN;
DECLARE cur CURSOR FOR
SELECT purpose_id FROM purpose
WHERE purpose.type = 'GENERAL'
AND (SELECT COUNT(*) > 0 FROM record
WHERE record.purpose_id=purpose.purpose_id) is true;
DECLARE CONTINUE HANDLER FOR NOT FOUND SET exit_loop = TRUE;
CREATE TEMPORARY TABLE IF NOT EXISTS general_average
(id BIGINT(20), average DOUBLE) ENGINE=memory;
TRUNCATE TABLE general_average;
OPEN cur;
average_loop: LOOP
FETCH cur INTO p_id;
INSERT INTO test1.general_average (id, average)
VALUES (p_id, (SELECT AVG(amount) FROM record
WHERE record.purpose_id=p_id));
IF exit_loop THEN
CLOSE cur;
LEAVE average_loop;
END IF;
END LOOP average_loop;
INSERT INTO test1.general_average (id, average)
VALUES (0,
(select avg(amount) from record where purpose_type='CUSTOM'));
SELECT * FROM general_average;
END
Several patterns to change...
Try to avoid CURSORs; usually the entire operation can be done with a single SQL statement. That will be much faster.
INSERT ... VALUES (0, ( SELECT ... ) ) --> INSERT ... SELECT 0, ...
Don't use a TEMP table when you can simply deliver the results. In your case, you may need a UNION ALL to deliver the two chunks at once.

Want to generate unique Id's using a function in mysql

I wrote a function to generate unique id's,its working but sometimes two people are getting same id,I mean duplicates are formed. My unique id looks like
2016-17NLR250001, I deal with only last four digits 0001. I am posting my function please correct it and please help me in avoiding duplicates even though users login into same account or if they do it on same time.
MY FUNCTION:
DELIMITER $$
USE `olmsap`$$
DROP FUNCTION IF EXISTS `fun_generate_uniqueid`$$
CREATE DEFINER=`root`#`%` FUNCTION `fun_generate_uniqueid`( V_DATE DATE,V_MANDALID INT ) RETURNS VARCHAR(30) CHARSET latin1
DETERMINISTIC
BEGIN
DECLARE MDLCODE VARCHAR(5);
SET MDLCODE = ' ';
SELECT COUNT(*) INTO #CNT FROM `st_com_mandal` WHERE MANDAL_VS_MC=V_MANDALID;
SELECT dist_mandal_code INTO MDLCODE FROM `st_com_mandal` WHERE MANDAL_VS_MC=V_MANDALID;
IF #CNT>0 THEN
SET #YR=`FUN_FISCAL_YR`(V_DATE);
SELECT CONCAT(IF(DIST_SAN_CODE='GUN','GNT',DIST_SAN_CODE),IFNULL(`dist_mandal_code`,'NULL'))INTO #MANDAL
FROM `st_com_dist` SCD INNER JOIN `st_com_mandal` STM ON STM.`mandal_dist_id`= SCD.`DIST_VC_DC` WHERE MANDAL_VS_MC=V_MANDALID;
IF MDLCODE >0 THEN
SELECT COUNT(Soil_Sample_ID)+1 INTO #ID FROM `tt_mao_soil_sample_dtls` WHERE MANDAL_ID=V_MANDALID AND SUBSTR(UNIQUE_ID,1,7)=#YR ;
ELSE
SELECT COUNT(Soil_Sample_ID)+1 INTO #ID FROM `tt_mao_soil_sample_dtls` WHERE SUBSTR(UNIQUE_ID,1,14)=CONCAT(#YR,#MANDAL) ;
END IF ;
IF LENGTH(#ID)=1 THEN
SET #ID=CONCAT('000',#ID);
ELSEIF LENGTH(#ID)=2 THEN
SET #ID=CONCAT('00',#ID);
ELSEIF LENGTH(#ID)=3 THEN
SET #ID=CONCAT('0',#ID);
ELSE
SET #ID=#ID;
END IF ;
RETURN CONCAT(#YR,#MANDAL,#ID);
ELSE
RETURN 'Mandal Doesnt Exists';
END IF;
END$$
DELIMITER ;
I do not think community will be able to help you with this question. This is a complex function that requires very careful analysis of table / index access and locking.
The only thing I can recommend is to not use existing table data to calculate next sequence as this is a bad practice.
Besides Race conditions that you are experiencing you will also get problems if the record with the last sequence is deleted.
I suggest you read this to get an idea on how to write a custom sequence generator:
http://en.latindevelopers.com/ivancp/2012/custom-auto-increment-values/

MySQL Stored Procedure Updating Status of Accounts Error Code: 1329

I have a situation in which I would like to iterate through a "schedPayments" table that stores a schedule of payments corresponding with a client in the "client" table. The client table also contains a "status" column at the moment holds a 0 for "Past Due" and a 1 for "Current". When the balance from the client table is greater than the supposed balance from the schedPayments table AND today's date is later than the date the payment was scheduled for, the status column in the clients table should be set to 0.
I may be completely off the wall with my solution, but I keep getting Error Code: 1329. No Data - zero rows fetched, selected or processed. The MySQL Workbench lacks some major debugging capabilities that I wish it had. The documentation also doesn't quite cover what I need either in this situation.
CREATE PROCEDURE `project`.`status_update` ()
BEGIN
DECLARE balance DECIMAL(20) DEFAULT 0;
DECLARE cID INT(10) DEFAULT 0;
DECLARE currentID INT(10) DEFAULT 0;
DECLARE supposedBal DECIMAL(20) DEFAULT 0;
DECLARE payDate DATE;
DECLARE cur1 CURSOR FOR SELECT ClientID,SupposedBalance,Date FROM project.schedpayments;
OPEN cur1;
status_loop: LOOP
FETCH cur1 INTO cID, supposedBal, payDate;
BLOCK2: BEGIN
DECLARE cur2 CURSOR FOR SELECT balance FROM project.client WHERE ID=cID;
OPEN cur2;
FETCH cur2 INTO balance;
IF currentID > cID THEN
SET currentID = cID;
IF (CURDATE() > payDate) AND (supposedBal < balance) THEN
UPDATE feeagree SET Status=0 WHERE ID=cID;
END IF;
CLOSE Cur2;
END IF;
END BLOCK2;
END LOOP;
CLOSE cur1;
END $$
You can see the remnants of how I had enclosed the entire procedure in a block and that only resulted in the compiler thinking the first block ended with END BLOCK2; and that resulted in an Error Code 1325. Cursor is already open.
I am definitely making this more complicated than necessary, so any help would be much appreciated. The only way I learn this stuff is trial by fire, and it is super hot today.
It seems that you don't need all those cursors and you can achieve your goal with one UPDATE statement.
It's hard to be precise without seeing your tables structures and sample data, but a more succinct version of your SP might look like this
CREATE PROCEDURE status_update()
UPDATE feeagree
SET Status = 0
WHERE ID IN
(
SELECT p.cID
FROM schedpayments p JOIN client c
ON p.cID = p.ID
WHERE p.Date < CURDATE()
AND p.SupposedBalance < c.balance
GROUP BY p.cID
);
...
DECLARE done TINYINT DEFAULT FALSE;
DECLARE CONTINUE HANDLER FOR NOT FOUND SET DONE = true;
OPEN cur1;
status_loop: LOOP
FETCH cur1 INTO cID, supposedBal, payDate;
IF DONE = true THEN LEAVE status_loop; END IF;
...
SET DONE = false;
END LOOP;
The SET DONE = false at the end resets DONE in case anything in the inner block results in it getting set to TRUE;

Optomizing stored procedure for 250k record update to random records in 1.7m record table?

I am currently running the following stored procedure. While it is a lot more efficient than my original procedure it is still taking an excessive amount of time. I'm not actually sure what the slow down is as the first 10k-30k records happened fast, but it has grown slower and slower as it gets further in. I'm expecting to update about 250k rows of about 1.7 million. Once this is complete I will then be doing something similar to insert records into each "Solar System".
To give you an example of the time this is taking. It has now been running for a little over 24 hours and it is only on iteration 786 of the 1716 it has to do. The reason for the changing limit on the selects is that there are 1000 possible rows per a sector in my table. I don't personally see any slow downs, but then I don't understand the inner workings of MySQL that well.
This work is being done on my local computer, no it is not slow, but there is always the possibility that there are changes that need to be done at the server level that would make these queries more efficient. If need be I can change the server settings so that is a possibility also. FYI I'm using the stock configuration from MySQL on a Windows 7.
DECLARE CurrentOffset int; -- Current offset limit to only deal with one
DECLARE CurrentOffsetMultiplier int;
DECLARE RandRow int; -- Random Row to make a Solar System with
DECLARE CheckSystemExists int; -- Used to insure RandRow is not already a Solar System Row
DECLARE TotalSystemLoops int; -- Total number of loops so each Galaxy gets it's systems.
DECLARE RandomSolarSystemCount int; -- This is the number of Solar Systems that will be in each Galaxy;
DECLARE UpdateSolarCount int;
DECLARE NumberOfOffsets int;
SET CurrentOffsetMultiplier = 0;
SET NumberOfOffsets = 1716;
SET CurrentOffset = 0;
OffsetLoop: LOOP
SET UpdateSolarCount = 0;
/*Sets the amount of Solary Systems going in a Galaxy*/
CheckRandomSolarSystemCount: LOOP
SET RandomSolarSystemCount = FLOOR(125 + RAND() * (175 - 125) + 1);
IF RandomSolarSystemCount >= 125 THEN
IF RandomSolarSystemCount <= 175 THEN
LEAVE CheckRandomSolarSystemCount;
END IF;
END IF;
END LOOP;
UpdateGalaxyWithSolarSystems: LOOP
SET UpdateSolarCount = UpdateSolarCount + 1;
IF UpdateSolarCount > RandomSolarSystemCount THEN
LEAVE UpdateGalaxyWithSolarSystems;
END IF;
/*Sets RandRow and CheckSystemExists*/
CheckExistsLoop: Loop
SET RandRow = FLOOR(0 + RAND() * (1000)+ 1);
SET CheckSystemExists = (SELECT COUNT(*)
FROM
(SELECT * FROM
(SELECT * FROM galaxies2 LIMIT CurrentOffset, 1000) AS LimitedTable
LIMIT RandRow ,1) AS RandTable
WHERE SolarSystemName IS NULL);
IF CheckSystemExists THEN
LEAVE CheckExistsLoop;
END IF;
END LOOP;
/*Updates the tables SolarSystemName column with a default system name*/
UPDATE galaxies2
SET SolarSystemName = CONCAT("Solar System ", RandRow)
WHERE galaxies2.idGalaxy =
(SELECT LimitedTable.idGalaxy AS GalaxyID FROM
(SELECT galaxies2.idGalaxy FROM galaxies2 LIMIT CurrentOffset, 1000) AS LimitedTable
LIMIT RandRow ,1)
;
END LOOP;
SET CurrentOffsetMultiplier = CurrentOffsetMultiplier + 1;
SET CurrentOffset = CurrentOffsetMultiplier * 1000;
IF CurrentOffsetMultiplier = 1717 THEN
LEAVE OffsetLoop;
END IF;
END LOOP;
It's getting slower and slower because you are "walking" through the galaxies2 table.
SELECT * FROM galaxies2 LIMIT CurrentOffset, 1000
As the CurrentOffset value increases, MySQL has to "walk" through more and more records to get to the starting point. You may actually be able to get a speed boost by specifying an ORDER BY on the primary key. You would want to have an ORDER BY anyway since MySQL just reads records randomly if no order is specified. It does not read the records in any particular order so you could (though unlikely) get the same set of records in different offsets.
It would be better to specify a range on the auto increment field. Assuming you have one. Then the first and last queries should perform about the same. It's not as ideal since there could be gaps from deleted records.
SELECT * FROM galaxies2 WHERE auto_incr_field BETWEEN CurrentOffset AND CurrentOffset+1000