MySQL Query Causing Deadlocks - mysql

I am dealing with a query that is causing some deadlocks, I believe because it's trying to SELECT from the same table it's updating. Regardless, this is a terribly clumsy way of doing this and I'd like to find a better way.
One obvious solution here is to pull the SELECT out into a separate query, but I'm hoping one of you SQL ninjas out there has a suggestion for a more elegant solution that could get this done in a single query with much less overhead.
SET #update_id := 0;
UPDATE msgstream
SET retryCount = retryCount + 1,
retryTime = TIMESTAMPADD(SECOND,?,NOW(3)),
messageid = (SELECT #update_id := messageid)
WHERE receiver = ?
AND isDelivered = 0
AND retryCount < 10
AND retryTime < NOW(3)
AND (
SELECT m.counter
FROM (
SELECT COUNT(messageid) AS counter
FROM msgstream
WHERE receiver = ?
AND isDelivered = 0
AND retryCount < 10
AND retryTime > NOW(3)
) AS m
) = 0
ORDER BY messageid LIMIT 1;

Related

Subquery returned more than 1 value.4...Different query

Hello I have this query that i am trying to execute and i keep getting this error "Subquery returned more than 1 value. This is not permitted when the subquery follows =, !=, <, <= , >, >= or when the subquery is used as an expression.", Kindly help please.
DECLARE #NUMCOUNT BIT
Select #NUMCOUNT = (SELECT
CASE WHEN
(SELECT COUNT(R5REQUISLINES.RQL_REQ)
WHERE R5REQUISLINES.RQL_STATUS IN ('A')
) IN
(SELECT COUNT(R5REQUISLINES.RQL_REQ)
WHERE R5REQUISLINES.RQL_STATUS IN ( 'A','C') ) THEN 1 else 0 END AS NUMCOUNT1
FROM R5REQUISLINES JOIN
R5REQUISITIONS ON R5REQUISLINES.RQL_REQ = R5REQUISITIONS.REQ_CODE
GROUP BY R5REQUISLINES.RQL_REQ, R5REQUISITIONS.REQ_CODE,R5REQUISLINES.RQL_STATUS
)
IF #NUMCOUNT = '1'
begin
UPDATE R5REQUISITIONS
SET R5REQUISITIONS.REQ_STATUS = 'CP'
end
Ok, it sounds like what you actually want to do is update R5REQUISITIONS when there is no RQL_STATUS = 'C' in R5REQUISLINES, since you said you want to count the records where the RQL_STATUS is A and where it's A or C, and then do the update if the counts are the same.. You can greatly simplify this task with the following query:
UPDATE r5
SET r5.REQ_STATUS = 'CP'
FROM R5REQUISITIONS r5
WHERE NOT EXISTS (SELECT 1 FROM R5REQUISLINES r5q WHERE r5q.RQL_REQ = r5.REQ_CODE AND r5q.RQL_STATUS = 'C')
Your 'SELECT CASE' is returning more than 1 record, so it can't be assigned to #NUMBER. Either fix the sub-query to only return the record your looking for or hack it to return only 1 with a 'LIMIT 1' qualification.
I don't know what your data looks like so I can't tell you why your case subquery returns more records than you think it should.
Try running this and see what it returns, that will probably tell you wall you need to know:
SELECT
CASE WHEN
(SELECT COUNT(R5REQUISLINES.RQL_REQ)
WHERE R5REQUISLINES.RQL_STATUS IN ('A')
) IN
(SELECT COUNT(R5REQUISLINES.RQL_REQ)
WHERE R5REQUISLINES.RQL_STATUS IN ( 'A','C')
)
THEN 1
ELSE 0
END AS NUMCOUNT1
FROM R5REQUISLINES JOIN
R5REQUISITIONS ON R5REQUISLINES.RQL_REQ = R5REQUISITIONS.REQ_CODE
GROUP BY R5REQUISLINES.RQL_REQ, R5REQUISITIONS.REQ_CODE,R5REQUISLINES.RQL_STATUS
If there is more than 1 row returned, that's where your problem is.

Performance issue with update query after adding index

I have added the index to my update query and by adding the same query start taking several hours to complete the process.While without index its completing in some minutes i have added the index to faster the process but it became very slow exactly opposite to my desire.
Below is sample code snippet of my code.
Cursor c_updt_stg_rsn is
select distinct substr(r.state_claim_id, 1, 12), ROWID
from nemis.stg_state_resp_rsn r
WHERE r.seq_resp_plan_id = v_seq_resp_plan_id
and r.submitted_claim_id is null
and r.filler_2 is null;
BEGIN
OPEN c_updt_stg_rsn;
LOOP
FETCH c_updt_stg_rsn BULK COLLECT
INTO v_state_claim_id, v_rowid LIMIT c_BULK_SIZE;
FORALL i IN 1 .. v_state_claim_id.COUNT()
UPDATE /*+ index(STG_STATE_RESP_RSN,IDX2_STG_STATE_RESP_RSN) */ nemis.stg_state_resp_rsn
SET (submitted_claim_id , filler_2) = (SELECT DISTINCT submitted_claim_id, sl_group_id FROM nemis.state_sub_Resp_dtl D WHERE
(d.state, d.type_of_claim) in (select distinct state, type_of_claim
from nemis.resp_match_state
where seq_resp_match_table_level_id in
(select seq_resp_match_table_level_id
from nemis.resp_match_table_level
where seq_resp_plan_id = v_seq_resp_plan_id))
AND resp_state_claim_id LIKE v_state_claim_id(i)||'%'
)
WHERE ROWID = v_rowid(i);
IF v_state_claim_id.COUNT() != 0 THEN
v_cnt_rsn := v_cnt_rsn + SQL%ROWCOUNT;
END IF;
COMMIT;
EXIT WHEN c_updt_stg_rsn%NOTFOUND;
END LOOP;
CLOSE c_updt_stg_rsn;

MySql Rolling sum issue

I want to do rolling sum of column. I have created following query
SET #FinancialIncremental:=0;
SELECT j.ErrorCategory,
COUNT(IF(j.ErrorType = 'P',j.ErrorType,NULL)) AS Financial,
COUNT(IF(j.ErrorType = 'NP',j.ErrorType,NULL)) AS Procedural,
SUM(j.OverPaymentAmount) AS 'Financial Over Paid Amount ($)',
(
SELECT
#FinancialIncremental := #FinancialIncremental + q1.c
FROM
(
(SELECT
COUNT(IF(ErrorType = 'P',ErrorType,NULL)) AS c
FROM DemoScheme.TestTable WHERE IsDeleted = 0 AND ErrorType IN ('P','NP')
GROUP BY ErrorCategory) AS q1
)
) AS 'Financial Incremental'
FROM DemoScheme.TestTable j WHERE IsDeleted = 0 AND ErrorType IN ('P','NP')
GROUP BY ErrorCategory;
But It is giving me an error. It giving me an error on 'Financial Incremental' subquery
Error Code: 1242. Subquery returns more than 1 row
Can anybody help me to solve this?
You have some repetitions in there which complicate things unnecessarily,try this query.You might also need to replace NULL with 0 when you add it.
SET #FinancialIncremental:=0;
SELECT j.ErrorCategory,
COUNT(IF(j.ErrorType = 'P',j.ErrorType,NULL)) AS Financial,
COUNT(IF(j.ErrorType = 'NP',j.ErrorType,NULL)) AS Procedural,
SUM(j.OverPaymentAmount) AS 'Financial Over Paid Amount ($)',
(#FinancialIncremental := #FinancialIncremental + COUNT(IF(j.ErrorType = 'P',j.ErrorType,0))
AS 'Financial Incremental'
FROM DemoScheme.TestTable j WHERE IsDeleted = 0 AND ErrorType IN ('P','NP')
GROUP BY ErrorCategory;

Laravel Eloquent skip every N th row

I have a large DB, with 50000+ rows, i'm trying to get all rows but with skipping every 50 rows for example.
I tried this from Laravel documentation: Offset & Limit
$users = DB::table('users')->skip(10)->take(5)->get();
But this will only skip the first 10 rows and get the next 5 rows. I can't find Eloquent solution to this problem.
Has anybode solve this before?
Solved using raw query:
return DB::select(DB::raw('
SELECT dateTime, row1, row2
FROM (
SELECT #row := #row +1 AS rownum, dateTime, row1, row2
FROM (
SELECT #row :=0
) r, users
) ranked
WHERE rownum % 50 = 0'));
It's much faster solution then #disf.asia suggestion.
You have to use a loop.
$start = 0;
$skip = 50;
$take = 1;
$all = array();
do {
$partial = User::skip($start++ * $skip)->take($take)->get();
$all = array_merge($all, (array) $partial);
} while (count($partial) > 0);
this will take 1 row every 50skipped rows, so 1st, 51th, 100th ... till the end of the table

How to make function execute faster in SQL?

I am using function to update to one column , like
DetailedStatus = dbo.fn_GetProcessStageWiseStatus(PR.ProcessID, PR.ProcessRunID, getdate())
Here 500,000 records are continuously UPDATED in this line. Its like like a loop
So using this function for few records its executing fast but when its 500,000 records executing it becomes very slow...
What can I do to make this execute faster using many records?
Any measures to be taken or any split to be used?
Function:
CREATE FUNCTION [dbo].[fn_GetProcessStageWiseStatus]
(
#ProcessID INT
,#ProcessRunID INT
,#SearchDate SMALLDATETIME
)
RETURNS VARCHAR(100)
AS
BEGIN
DECLARE
#iLoopCount SMALLINT
,#iRowCount SMALLINT
,#StepProgress VARCHAR(100)
,#StepCount SMALLINT
IF EXISTS(
SELECT TOP 1 1
FROM dbo.Step S WITH(NOLOCK)
JOIN dbo.vw_FileGroup FG
ON S.FileConfigGroupID = FG.FileConfigGroupID
WHERE S.ProcessID = #ProcessID
AND S.Active = 1
AND FG.FileConfigGroupActive = 1
AND FG.Direction = 'Inbound'
)
BEGIN
SET #StepProgress = 'Not Received'
END
ELSE
BEGIN
SET #StepProgress = 'Not Started'
END
DECLARE #StepRunDetailsTable TABLE
(
KeyNo INT IDENTITY(1,1)
,StepID INT
,StepStartTime SMALLDATETIME
,StepEndTime SMALLDATETIME
,SourceEnv VARCHAR(100)
,DestEnv VARCHAR(100)
)
INSERT INTO #StepRunDetailsTable
SELECT
S.StepID
,MAX(isnull(SR.StepStartTime, '06/06/2079'))
,MAX(isnull(SR.StepEndTime, '06/06/2079'))
,isnull(SENV.EnvironmentName, '')
,isnull(DENV.EnvironmentName, '')
FROM dbo.ProcessRun PR WITH(NOLOCK)
JOIN dbo.StepRun SR WITH(NOLOCK)
ON SR.ProcessRunID = PR.ProcessRunID
JOIN dbo.vw_StepHierarchy SH
ON SR.StepID = SH.StepID
AND SH.Active = 1
JOIN dbo.Step S WITH(NOLOCK)
ON SH.StepID = S.StepID
JOIN dbo.WorkFlow WF WITH(NOLOCK)
ON S.WorkFlowID = WF.WorkFlowID
AND WF.Active = 1
JOIN dbo.Environment SENV WITH(NOLOCK)
ON SENV.EnvironmentID = WF.SourceEnvironmentID
AND SENV.Active = 1
JOIN dbo.Environment DENV WITH(NOLOCK)
ON DENV.EnvironmentID = WF.DestinationEnvironmentID
AND DENV.Active = 1
WHERE PR.ProcessRunID = #ProcessRunID
GROUP BY S.StepID, SENV.EnvironmentName, DENV.EnvironmentName, SH.StepOrder
ORDER BY SH.StepOrder ASC
SELECT #StepCount = COUNT(*)
FROM dbo.ProcessRun PR WITH(NOLOCK)
JOIN dbo.Step S WITH(NOLOCK)
ON PR.ProcessID = S.ProcessID
AND PR.ProcessRunID = #ProcessRunID
AND S.Active = 1
SELECT #iRowCount = COUNT(DISTINCT StepID) FROM #StepRunDetailsTable
SET #iLoopCount = 0
WHILE (#iRowCount > #iLoopCount)
BEGIN
SET #iLoopCount = #iLoopCount + 1
SELECT
#StepProgress =
CASE
--WHEN #SearchDate BETWEEN StepStartTime AND StepEndTime
WHEN #SearchDate >= StepStartTime AND #SearchDate <= StepEndTime
THEN DestEnv + ' Load in Progress'
WHEN #SearchDate > StepEndTime AND #iLoopCount < #StepCount
THEN 'Waiting on next step - Loaded to ' + DestEnv
WHEN #SearchDate > StepEndTime AND #iLoopCount = #StepCount
THEN 'Completed'
WHEN #SearchDate < StepStartTime AND #iLoopCount = 1
THEN 'Load Not Started'
ELSE #StepProgress
END
FROM #StepRunDetailsTable
WHERE KeyNo = #iLoopCount
END
RETURN #StepProgress
END
Thanks in advance.
Seems like you have a change in execution plan when you try to update 500k rows.
You can try and set forceseek hint on the from clause to force using seeks instead of scans.
Also, WHILE (#iRowCount > #iLoopCount) should be replaced with if exists, because you basically check for certain conditions on the results table and you need to return as early as possible.
I see that you use nolock hint everywhere to allow dirty reads, you can set isolation level read uncommitted in the calling stored procedure and remove all of those; or consider to change the database to set read_committed_snapshot on to avoid locks.
By the way, scalar function calls in SQL Server are very expensive, so if you have some massive updates/selects happening in a loop where you call a function you have to avoid using functions as much as possible.