I need the column alias to be named based on a scenario
declare #testing as varchar(max)
set #testing = 'choice'
select 1 as case when #testing = 'choice' then 'chose' else 'didntChoose' end
So if #testing = 'choice', the results would look like this:
chose
1
else:
didntChoose
1
Is it possible to do this without dynamic SQL?
No, you cannot change the name of the alias based on the value unless you use dynamic SQL.
When you are selecting the columns, you can only have one name/alias for each column.
If you want different column names, then you could use some like the following which uses different select statements:
IF #testing = 'choice'
select 1 as 'Chose'
ELSE
select 1 as 'didntChoose'
Or you could return two separate columns:
select
case when #testing = 'choice' then 1 else 0 end Chose,
case when #testing <> 'choice' then 1 else 0 end DidNotChose
Here is something I wrote that kind of achieves the goal, but it is not the most elegant piece of work I have ever done.
Various customers want to display different values for attributes associated with their resources. So a generic table exists that lets each customer assign different values to each of the resources.
Let's create the structures first and populate them. Nothing too fancy:
create table CustResource (
CustId int,
Attr1 varchar(50),
Attr2 varchar(50),
Attr3 varchar(50),
Attr4 varchar(50),
Attr5 varchar(50))
insert into CustResource (CustId, attr1, attr2, attr3, attr4) values (1, 'Div','Dept','Machine Type','Main Usage')
/* What just happened above is that the customer assigned display values to the first 4 attributes only */
create table PortalResource (
ResourceId int,
custId int,
ResourceName varchar(50),
Attr1 varchar(50),
Attr2 varchar(50),
Attr3 varchar(50),
Attr4 varchar(50),
Attr5 varchar(50))
insert into PortalResource (ResourceId, CustId, ResourceName, attr1, attr2, attr3, attr4)
values (10,1,'abcd1234','Local Government','State Emergency Services','File Server','Production')
insert into PortalResource (ResourceId, CustId, ResourceName, attr1, attr2, attr3, attr4)
values (11,1,'bcde2345','Local Government','State Emergency Services','Database Server','Production')
insert into PortalResource (ResourceId, CustId, ResourceName, attr1, attr2, attr3, attr4)
values (12,1,'bcde2346','Local Government','Department of Education','Domain Controller','Production')
/* Notice in the above that attr5 is not populated. This is deliberate! */
/* OK, now we want to accept the customer Id (I have hard-coded it here for quick reference, but you get the point) */
declare #SQLString varchar(1000)
, #attr1 varchar (50)
, #attr2 varchar(50)
, #attr3 varchar(50)
, #attr4 varchar(50)
, #attr5 varchar(50)
, #CustId varchar(10)
set #CustId = 1
select #attr1 = upper(attr1)
, #attr2 = upper(attr2)
, #attr3 = upper(attr3)
, #attr4 = upper(attr4 )
, #attr5 = UPPER(attr5)
, #CustId = convert(varchar,custId)
from CustResource where custid = #CustId
set #SQLString = 'Select ' + #CustId + 'as CustomerID'
If #attr1 is not null set #SQLString = #SQLString +
' , attr1 as ' + '"' + #attr1 + '"'
If #attr2 is not null set #SQLString = #SQLString +
' , attr2 as ' + '"' + #attr2 + '"'
If #attr3 is not null set #SQLString = #SQLString +
' , attr3 as ' + '"' + #attr3 + '"'
If #attr4 is not null set #SQLString = #SQLString +
' , attr4 as ' + '"' + #attr4 + '"'
If #attr5 is not null set #SQLString = #SQLString +
' , attr5 as ' + '"' + #attr5 + '"'
Set #SQLString = #SQLString + ' from PortalResource where CustId = ' + #CustId
print #SQLString
exec (#SQLString)
This works a charm, but it is super-ugleeeeee!!!!
I'll just leave this here http://www.sommarskog.se/dynamic_sql.html#columnalias
You first get the data into a temp table, and then you use sp_rename
to rename the column along your needs. (You need to qualify sp_rename
with tempdb to have it to operate in that database.)
Do have a read of his site if you're dealing a lot with dynamic SQL, there's a lot of ways to shoot yourself in the foot if you're not careful...
Related
I am working on SQL Server 2008. I have to create json from two tables, which have a one-to-many relationship. The tables are Customer and Orders.
Each customer may have one or many orders. The json is constructed by first getting data from the customer table and then appending all the purchases they have made.
The following is my query. I have also enclosed the json output from the query. It works and creates valid jsons. The problem is that it's too slow as I am using a cursor to loop through the Customer table. I have managed to avoid cursor to get data from the Orders table by using for xml path. Since I have to handle millions of rows, I have to replace the cursor with some other mechanism.
DECLARE #PaymentType VARCHAR(50),
#Email VARCHAR(100),
#OrderId INT
DECLARE CustomerCursor CURSOR FAST_FORWARD FOR
SELECT TOP 10
PaymentType, Email, OrderId
FROM
CUSTOMER
OPEN CustomerCursor
FETCH NEXT FROM CustomerCursor INTO #PaymentType, #Email, #OrderId
WHILE (##FETCH_STATUS = 0)
BEGIN
DECLARE #customer VARCHAR(MAX)
DECLARE #order VARCHAR(MAX)
DECLARE #customer_with_order VARCHAR(MAX)
-- construct order json
SET #order = '[' + STUFF((SELECT ',{"orderProductID":' + CAST(orderProductID AS VARCHAR) +
',"productType":"' + ProductType + '"' +
',"productName":"' + ProductName + '"' +
',"categoryName":"' + CategoryName + '"' + '}'
FROM ORDERS
WHERE orderid = #OrderId
FOR XML PAT(''), TYPE).value('.', 'VARCHAR(MAX)'), 1, 1, '') + ']'
-- construct customer json
SET #customer = '{"email":"' + CASE WHEN #Email IS NULL THEN '' ELSE
#Email END + '"'
+ ',"eventName": "ChristmasSale", "dataFields": {'
+ '"orderId":' + CAST(CASE WHEN #OrderId IS NULL THEN 0 ELSE
#OrderId END AS VARCHAR)
+ ',"paymentType":"' + CASE WHEN #PaymentType IS NULL THEN
'' ELSE #PaymentType END + '"'
+ ',"products": '
-- combine these two
SET #customer_with_order = #customer + #order + '}}'
-- insert into CUSTOMER_ORDER_DATA
INSERT INTO CUSTOMER_ORDER_DATA(email, order_id, orders)
VALUES (#Email, #OrderId, #customer_with_order)
FETCH NEXT FROM CustomerCursor INTO #PaymentType, #Email, #OrderId
END
CLOSE CustomerCursor
DEALLOCATE CustomerCursor
I can't test this, but I suspect you could rewrite the above as a set based method as below (as I have no way of testing this, there is no way I can be certain this'll work, if it doesn't, you may need to troubleshoot it a little):
INSERT INTO CUSTOMER_ORDER_DATA(email, order_id, orders)
SELECT C.Email,
C.orderid,
'{"email":"' + CASE WHEN #Email IS NULL THEN '' ELSE
#Email END + '"'
+ ',"eventName": "ChristmasSale", "dataFields": {'
+ '"orderId":' + CAST(CASE WHEN #OrderId IS NULL THEN 0 ELSE
#OrderId END AS varchar)
+ ',"paymentType":"' + CASE WHEN #PaymentType IS NULL THEN
'' ELSE #PaymentType END + '"'
+ ',"products": ' +
('[' + STUFF((
SELECT
',{"orderProductID":' + CAST(orderProductID AS varchar)
+ ',"productType":"' + ProductType + '"'
+ ',"productName":"' + ProductName + '"'
+ ',"categoryName":"' + CategoryName + '"'
+'}'
FROM ORDERS AS O
WHERE O.orderid = C.orderid
FOR XML PATH(''),TYPE).value('.', 'varchar(max)'), 1, 1, '') + ']')
FROM CUSTOMER AS C
Considering the OP has 5 millions rows, then this would likely be a bit much for one batch. Seperating it into batching of say 10,000 may be better for performance over all. Unfortunately the OP is still using 2008, so they don't have access to the OFFSET clause.
After searching a code for capitalise the first letter of each word in a string in SQL Server I found this :
CREATE FUNCTION [dbo].[InitCap]
(#InputString varchar(4000) )
RETURNS VARCHAR(4000)
AS
BEGIN
DECLARE #Index INT
DECLARE #Char CHAR(1)
DECLARE #PrevChar CHAR(1)
DECLARE #OutputString VARCHAR(255)
SET #OutputString = LOWER(#InputString)
SET #Index = 1
WHILE #Index <= LEN(#InputString)
BEGIN
SET #Char = SUBSTRING(#InputString, #Index, 1)
SET #PrevChar = CASE WHEN #Index = 1 THEN ' '
ELSE SUBSTRING(#InputString, #Index - 1, 1)
END
IF #PrevChar IN (' ', ';', ':', '!', '?', ',', '.', '_', '-', '/', '&', '''', '(')
BEGIN
IF #PrevChar != '''' OR UPPER(#Char) != 'S'
SET #OutputString = STUFF(#OutputString, #Index, 1, UPPER(#Char))
END
SET #Index = #Index + 1
END
RETURN #OutputString
END
GO
but I don't now how to associate with updating code in SSMS ... something like this
update tabel1
set #InputString = #OutputString
You Cross apply
Cross Apply executes for Each row of outer query,so Assuming tableyouwanttoupdate has the string you want to capitalize ,you can pass it function and use that in your update
Update t1
set t1.string=b.string
from
tableyouwanttoupdate t1
cross apply
[dbo].[InitCap] (t1.string) b(string)
What about this:
DECLARE #tbl TABLE (ID INT IDENTITY,s NVARCHAR(100));
INSERT INTO #tbl(s) VALUES
('this is all lower case!')
,('Here we have a sentence. And another one!')
,('This IS mIxEd!!! CoMMpletelY MixeD!');
WITH Splitted AS
(
SELECT ID
,s
,CAST(N'<x>' + REPLACE((SELECT s AS [*] FOR XML PATH('')),N' ',N'</x><x>') + N'</x>' AS XML) AS InParts
FROM #tbl
)
SELECT ID
,s
,(
STUFF(
(
SELECT ' ' + UPPER(LEFT(x.value('.','nvarchar(max)'),1)) + LOWER(SUBSTRING(x.value('.','nvarchar(max)'),2,1000))
FROM Splitted.InParts.nodes('/x') AS A(x)
FOR XML PATH('')
),1,1,'')
) AS NewString
FROM Splitted
The result
ID s NewString
1 this is all lower case! This Is All Lower Case!
2 Here we have a sentence. And another one! Here We Have A Sentence. And Another One!
3 This IS mIxEd!!! CoMMpletelY MixeD! This Is Mixed!!! Commpletely Mixed!
UPDATE
If you want to update your column this is very easy too:
DECLARE #tbl TABLE (ID INT IDENTITY,s NVARCHAR(100));
INSERT INTO #tbl(s) VALUES
('this is all lower case!')
,('Here we have a sentence. And another one!')
,('This IS mIxEd!!! CoMMpletelY MixeD!');
WITH Splitted AS
(
SELECT ID
,s
,CAST(N'<x>' + REPLACE((SELECT s AS [*] FOR XML PATH('')),N' ',N'</x><x>') + N'</x>' AS XML) AS InParts
FROM #tbl
)
UPDATE Splitted SET s=
(
STUFF(
(
SELECT ' ' + UPPER(LEFT(x.value('.','nvarchar(max)'),1)) + LOWER(SUBSTRING(x.value('.','nvarchar(max)'),2,1000))
FROM Splitted.InParts.nodes('/x') AS A(x)
FOR XML PATH('')
),1,1,'')
)
FROM Splitted;
SELECT * FROM #tbl;
I have a query that selects from multiple tables using a join. I want to execute this query from different databases via a loop.
I have accomplished that via (simplified query):
DECLARE #intCounter int
SET #intCounter = 1
DECLARE #tblBedrijven TABLE (ID int identity(1,1),
CompanyName varchar(20),
DatabaseTable varchar(100))
INSERT INTO #tblBedrijven VALUES ('001-CureCare', '<TABLE/ DATABASE1> AUS'),
('002-Cleaning', '[global_nav5_prod].[dbo].<TABLE/ DATABASE2>] AUS')
DECLARE #strCompany varchar(20)
DECLARE #strTable varchar(100)
WHILE (#intCounter <= (SELECT MAX(ID) FROM #tblBedrijven))
BEGIN
SET #strTable = (SELECT DatabaseTable FROM #tblBedrijven
WHERE ID = #intCounter)
SET #strCompany = (SELECT CompanyName FROM #tblBedrijven
WHERE ID = #intCounter)
EXEC('SELECT ''' + #strCompany + ''' as Company,
AUS.[User],
AUS.[E-mail]
FROM' + #strTable)
SET #intCounter = #intCounter + 1
END
My problem is that the result generates 2 separate tables (for every loop). I want to union the results but have no clue how.
Any suggestions?
Thanks in advance.
Can't you use something like the below code where you append all the sqls with union and finally execute the sql once only without executing in a loop. I am not an expert in SQL Server but I have written many other similar stored procedures using other RDBMS. So please bear any syntax errors.
DECLARE #intCounter int
DECLARE #maxId int
SET #intCounter = 1
DECLARE #tblBedrijven TABLE (ID int identity(1,1),
CompanyName varchar(20),
DatabaseTable varchar(100))
INSERT INTO #tblBedrijven VALUES ('001-CureCare', '<TABLE/ DATABASE1> AUS'),
('002-Cleaning', '[global_nav5_prod].[dbo].<TABLE/ DATABASE2>] AUS')
DECLARE #strCompany varchar(20)
DECLARE #strTable varchar(100)
DECLARE #strSql varchar(5000)
SET #maxId = (SELECT MAX(ID) FROM #tblBedrijven)
WHILE (#intCounter <= #maxId)
BEGIN
SET #strTable = (SELECT DatabaseTable FROM #tblBedrijven
WHERE ID = #intCounter)
SET #strCompany = (SELECT CompanyName FROM #tblBedrijven
WHERE ID = #intCounter)
SET #strSql = #strSql + ('SELECT ''' + #strCompany + ''' as Company,
AUS.[User],
AUS.[E-mail]
FROM' + #strTable)
IF #intCounter < #maxId THEN
BEGIN
SET #strSql = #strSql + ' UNION '
END
SET #intCounter = #intCounter + 1
END
EXEC(#strSql)
I have a table with 500k rows where the address is in one field, delimited by Char(13)+Char(10). I have added 5 fields to the table in the hope of splitting this up.
Found online this split function that seems to perform well as I cannot use parsename due to there being 5 parts and also that the . may be in the field.
This is a table-valued function so I would have to loop the rows and update the record, previously I would have used a cursor or sql while or possibly even c# to do this but I feel their must be a cte or set based answer to do this.
So given some source data:
CREATE TABLE dbo.Addresses
(
AddressID INT IDENTITY(1,1),
[Address] VARCHAR(255),
Address1 VARCHAR(255),
Address2 VARCHAR(255),
Address3 VARCHAR(255),
Address4 VARCHAR(255),
Address5 VARCHAR(255)
);
INSERT dbo.Addresses([Address])
SELECT 'foo
bar'
UNION ALL SELECT 'add1
add2
add3
add4
add5';
Let's create a function that returns the address parts in a sequence:
CREATE FUNCTION dbo.SplitAddressOrdered
(
#AddressID INT,
#List VARCHAR(MAX),
#Delimiter VARCHAR(32)
)
RETURNS TABLE
AS
RETURN
(
SELECT
AddressID = #AddressID,
rn = ROW_NUMBER() OVER (ORDER BY Number),
AddressItem = Item
FROM (SELECT Number, Item = LTRIM(RTRIM(SUBSTRING(#List, Number,
CHARINDEX(#Delimiter, #List + #Delimiter, Number) - Number)))
FROM (SELECT ROW_NUMBER() OVER (ORDER BY [object_id])
FROM sys.all_objects) AS n(Number)
WHERE Number <= CONVERT(INT, LEN(#List))
AND SUBSTRING(#Delimiter + #List, Number, LEN(#Delimiter)) = #Delimiter
) AS y
);
GO
Now you can do this (you will have to run the query 5 times):
DECLARE
#i INT = 1,
#sql NVARCHAR(MAX),
#src NVARCHAR(MAX) = N';WITH x AS
(
SELECT a.*, Original = s.AddressID, s.rn, s.AddressItem
FROM dbo.Addresses AS a
CROSS APPLY dbo.SplitAddressOrdered(a.AddressID, a.Address,
CHAR(13) + CHAR(10)) AS s WHERE rn = #i
)';
WHILE #i <= 5
BEGIN
SET #sql = #src + N'UPDATE x SET Address' + RTRIM(#i)
+ ' = CASE WHEN AddressID = Original AND rn = '
+ RTRIM(#i) + ' THEN AddressItem END;';
EXEC sp_executesql #sql, N'#i INT', #i;
SET #i += 1;
END
Then you can drop the Address column:
ALTER TABLE dbo.Addresses DROP COLUMN [Address];
Then the table has:
AddressID Address1 Address2 Address3 Address4 Address5
--------- -------- -------- -------- -------- --------
1 foo bar NULL NULL NULL
2 add1 add2 add3 add4 add5
I'm sure someone more clever than I will show how to utilize that function without having to loop.
I could also envision a slight change to the function that would allow you to simply pull out a certain element... hold please...
EDIT
Here's a scalar function that is more expensive on its own but allows you to make one pass of the table instead of 5:
CREATE FUNCTION dbo.ElementFromOrderedList
(
#List VARCHAR(MAX),
#Delimiter VARCHAR(32),
#Index SMALLINT
)
RETURNS VARCHAR(255)
AS
BEGIN
RETURN
(
SELECT Item
FROM (SELECT rn = ROW_NUMBER() OVER (ORDER BY Number),
Item = LTRIM(RTRIM(SUBSTRING(#List, Number,
CHARINDEX(#Delimiter, #List + #Delimiter, Number) - Number)))
FROM (SELECT ROW_NUMBER() OVER (ORDER BY [object_id])
FROM sys.all_objects) AS n(Number)
WHERE Number <= CONVERT(INT, LEN(#List))
AND SUBSTRING(#Delimiter + #List, Number, LEN(#Delimiter)) = #Delimiter
) AS y WHERE rn = #Index
);
END
GO
Now the update, given the above table above (prior to the update and prior to the drop), is simply:
UPDATE dbo.Addresses
SET Address1 = dbo.ElementFromOrderedList([Address], CHAR(13) + CHAR(10), 1),
Address2 = dbo.ElementFromOrderedList([Address], CHAR(13) + CHAR(10), 2),
Address3 = dbo.ElementFromOrderedList([Address], CHAR(13) + CHAR(10), 3),
Address4 = dbo.ElementFromOrderedList([Address], CHAR(13) + CHAR(10), 4),
Address5 = dbo.ElementFromOrderedList([Address], CHAR(13) + CHAR(10), 5);
You have couple of options:
You can create a temp table and then parse the address into the temp table and then update the original table by joining it to the temp table.
or
You can write your own T-SQL functions and use those functions in your update statement function like follows:
UPDATE myTable
SET address1 = myGetAddress1Function(address),
address2 = myGetAddress2Function(address)....
I'm trying to prepare some data for deletion by a 3rd party, and unfortunately they can only process data in batches of 2000 records. I have 100k records and may need to divide-and-export this data several more times, so I'd like to automate the process somehow.
Is there a reasonably easy way to do this using SQL Server 2008? I'm not running a complex query -- it's not too far off from SELECT PKID FROM Sometable ORDER BY PKID -- and while I can probably do this using a cursor, I'd like to know if there's a better way.
SET NOCOUNT ON;
CREATE TABLE [dbo].[SyncAudit] ( PkId INT, BatchNumber INT)
DECLARE #batchsize INT
,#rowcount INT
,#batchcount INT
,#rootdir VARCHAR(2048)
,#saveas VARCHAR(2048)
,#query VARCHAR(2048)
,#bcpquery VARCHAR(2048)
,#bcpconn VARCHAR(64)
,#bcpdelim VARCHAR(2)
SET #rootdir = '\\SERVER1\SHARE1\FOLDER\'
SET #batchsize = 2000
SET #bcpdelim = '|'
SET #bcpconn = '-T' -- Trusted
--SET #bcpconn = '-U <username> -P <password>' -- SQL authentication
SELECT #rowcount = COUNT(1),
#batchcount = CEILING(COUNT(1)/#batchsize) FROM <#TableName, string, 'SomeTable'>
SELECT [BatchSize] = #BatchSize, [BatchCount] = #Batchcount
INSERT INTO SyncAudit
SELECT
<#TableKey, string, 'PKField'>
,groupnum = NTILE(#batchcount) OVER ( ORDER BY <#TableKey, string, 'PKField'>)
FROM
<#TableName, string, 'SomeTable'>
WHILE (#batchcount > 0)
BEGIN
SET #saveas = #rootdir + 'batchnumber-' + cast(#batchcount as varchar) + '.txt'
SET #query = ' SELECT [<#TableName, string, 'SomeTable'>].*
FROM [' + db_name() + '].[dbo].[<#TableName, string, 'SomeTable'>]
JOIN [' + db_name() + '].[dbo].[SyncAudit]
ON [<#TableName, string, 'SomeTable'>].<#TableKey, string, 'PKField'> = [SyncAudit].PkId
AND [SyncAudit].BatchNumber = ' + cast(#batchcount as varchar) + ''
SET #bcpquery = 'bcp "' + replace(#query, char(10), '') + '" QUERYOUT "' + #saveas + '" -c -t^' + #bcpdelim + ' ' + #bcpconn + ' -S ' + ##servername
EXEC master..xp_cmdshell #bcpquery
--EXEC (#query)
SET #batchcount = #batchcount -1
END
DROP TABLE [dbo].[SyncAudit] -- or leave for reference
I think you can take advantage of using ROW_NUMBER and then using BETWEEN to specify a range of rows that you like. Alternatively you could use PKID if you knew there wasn't gaps, or didn't care about the gaps
e.g.
SELECT ...
FROM
(SELECT ...
ROW_NUMBER() OVER(ORDER BY PKID ) as RowNum
FROM Sometable e
) t
WHERE RowNum BETWEEN #startRowIndex AND (#startRowIndex + #maximumRows) - 1
This is often used for paging results. 4GuysFromRolla have a good article on it
You could work out the ranges in a while ##ROWCOUNT loop to target the rows required. It may work better than ROW_NUMBER() which would have to keep numbering from the start.
declare #startid int
declare #endid int
-- get one range, these are efficient as they go over the PKID key by range
select top(1) #startid = pkid from sometable order by pkid -- 1 key visited
select top(2000) #endid = pkid from sometable order by pkid -- 2000 keys visited
-- note: top 2000 may end up with the 514th id if that is the last one
while ##ROWCOUNT > 0
begin
insert otherdb.dbo.backupcopy
select * from sometable
where pkid between #startid and #endid
select top(1) #startid = pkid from sometable
WHERE pkid > #endid -- binary locate
order by pkid
select top(2000) #endid = pkid from sometable
WHERE pkid > #endid -- binary locate, then forward range lookup, max 2000 keys
order by pkid
end
I ended up using a combination of the approaches provided by cyberkiwi and Adam. I didn't need to use ROW_NUMBER only because I used an IDENTITY column in a table data type instead.
Here's a redacted version of the code I used -- it worked like a charm. Thanks again to everyone for all the help!
use Testing
GO
SET NOCOUNT ON
declare
#now datetime = GETDATE(),
#batchsize int = 2000,
#bcpTargetDir varchar(500) = '\\SomeServer\Upload\',
#csvQueryServer varchar(500) = '.\SQLExpress',
#rowcount integer,
#nowstring varchar(100),
#batch_id int,
#startid int,
#endid int,
#oidCSV varchar(max),
#csvQuery varchar(max),
#bcpFilename varchar(200),
#bcpQuery varchar(1000)
declare #tblBatchRanges table (
batch_id integer NOT NULL IDENTITY(1,1) PRIMARY KEY,
oid_start integer NOT NULL,
oid_end integer NOT NULL,
csvQuery varchar(max)
)
-- Create a unique timestamp-based string, which will be used to name the exported files.
select #nowstring = CONVERT(varchar, #now, 112) + '-' + REPLACE(CONVERT(varchar, #now, 114), ':', '')
--
select top(1) #startid = oid from Testing..MyObjectIds order by oid
select top(#batchsize) #endid = oid from Testing..MyObjectIds order by oid
select #rowcount = ##ROWCOUNT
while (#rowcount > 0) begin
-- Create a CSV of all object IDs in the batch, using the STUFF() function
select #csvQuery = 'select stuff((select distinct '','' + CAST(oid as varchar) from Testing..MyObjectIds where oid between ' + CAST(#startid as varchar) + ' and ' + CAST(#endid as varchar) + ' order by '','' + CAST(oid as varchar) for xml path('''')),1,1,'''')'
-- Log the info and get the batch ID.
insert into #tblBatchRanges (oid_start, oid_end, csvQuery)
values (#startid, #endid, #oidCSV, #csvQuery)
select #batch_id = ##IDENTITY
-- Advance #startid and #endid so that they point to the next batch
select top(1) #startid = oid
from Testing..MyObjectIds
where oid > #endid
order by oid
select top(#batchsize) #endid = oid
from Testing..MyObjectIds
where oid > #endid
order by oid
select #rowcount = ##ROWCOUNT
-- Export the current batch to a file.
select #bcpFilename = 'MyExport-' + #nowstring + '-' + cast(#batch_id as varchar) + '.txt'
select #bcpQuery = 'bcp "' + #csvQuery + '" QUERYOUT "' + #bcpTargetDir + #bcpFilename + '" -S ' + #csvQueryServer + ' -T -c'
exec master..xp_cmdshell #bcpquery
end
SET NOCOUNT OFF
--Check all of the logged info.
select oid_start, oid_end, csvQuery from #tblBatchRanges