How to convert SQL table into a customised gridview - c#

I have a c# web page and i am using SQL for the database
i need to display the data from table into a gridview
SQL : I have a table as below
Exec_Date Plat Pass Fail
----------------------------------------------------------------
2017-02-19 12:32:43.570 MSSQL 10 12
2017-02-19 12:32:43.570 MSSQL 10 12
2017-02-18 12:32:43.570 Sybase 10 12
2017-02-18 12:32:43.570 Oracle 10 12
i would like to convert the table into a customised format for a presentation
Status 18/02 19/02
-------------------------
Pass 20 20
Fail 24 24
is the above possible to be done by Pivot ? if so can anyone give some idea on that pls?
I have tried the below so far and the #Names is not resolving the variable value in query
create table #TempTable
(Exec_Date varchar(max),
Pass int,
Ref_Status varchar(max)
)
Insert into #TempTable
select cast(Exec_Date as DATE) as Date,SUM(Pass) as Pass,'PASS' from F_Exec where cast(Exec_Date as DATE) >= '2017-02-17' and cast(Exec_Date as DATE) <= '2017-02-20' group by Exec_Date,Pass
DECLARE #Names varchar(max)
SELECT #Names = COALESCE(#Names + '],[', '[') + Exec_Date FROM #TempTable
select #Names=#Names+']'
Select * from #TempTable pivot
(SUM(Pass) for Exec_Date IN (#Names) )as PivotTable
The variable #Names is not considered as variable. If i manually replace that resultant value from the variable it works

Try this code:
declare #tbl table (Exec_Date datetime, Plat varchar(max), Pass int, Fail int)
insert into #tbl(Exec_Date , Plat , Pass , Fail ) values
('2017-02-19 12:32:43.570', 'MSSQL',10,12),
('2017-02-19 12:32:43.570', 'MSSQL',10,12),
('2017-02-18 12:32:43.570', 'Sybase',10,12),
('2017-02-18 12:32:43.570', 'Oracle',10,12)
select * from
(
select Date,Status,sum(Success) success from
(
select left(convert(varchar(20),Exec_Date,103),5) as [Date],Pass,Fail from #tbl
) as baseData
unpivot
(
Success for Status in (Pass,Fail)
) as tblUnPivot
group by date ,status
) as baseData
pivot
(
sum(success)
for Date in ([18/02],[19/02])
) as tblPivot
order by Status

I got the solution
declared nvarchar variable and concatenated the query & dynamic column names
DECLARE #sqlstring nvarchar(MAX)
SET #sqlstring =N'
Select * from #TempTable pivot (SUM(Pass) for Exec_Date IN (' +#Names+') )as PivotTable;';
EXEC sp_executesql #sqlstring;

Related

MSSQL split string function performance terrible

I have two separate queries/methods and functions so that I can pass two strings in my stored procedures which basically look like this:
1234,12346,12690812,1259081 => UPCList parameter
1234,12346,12690812,1259081 => EANList parameter
Now the first method looks like following:
create procedure [dbo].[zsp_selectallupceans_list]
(
#UPCList nvarchar(4000),
#EANList nvarchar(4000),
#Type tinyint
)
as
select DISTINCT dd.UPC,dd.EAN,dd.EBAYID as ItemID
from ThirdPartyData as dd
where dd.UPC in (SELECT * FROM dbo.SplitString(#UPCList)) OR
dd.EAN in (SELECT * FROM dbo.SplitString(#EANList)) and dd.Type=#Type
Combined with splitstring function created in MSSQL:
create FUNCTION [dbo].[splitstring] ( #stringToSplit VARCHAR(MAX) )
RETURNS
#returnList TABLE ([Name] [nvarchar] (500))
AS
BEGIN
DECLARE #name NVARCHAR(255)
DECLARE #pos INT
WHILE CHARINDEX(',', #stringToSplit) > 0
BEGIN
SELECT #pos = CHARINDEX(',', #stringToSplit)
SELECT #name = SUBSTRING(#stringToSplit, 1, #pos-1)
INSERT INTO #returnList
SELECT #name
SELECT #stringToSplit = SUBSTRING(#stringToSplit, #pos+1, LEN(#stringToSplit)-#pos)
END
INSERT INTO #returnList
SELECT #stringToSplit
RETURN
END
And now the second method:
create procedure [dbo].[zsp_selectallupceans_listProduction]
(
#UPCList nvarchar(4000),
#EANList nvarchar(4000),
#Type tinyint
)
as
SELECT dd.UPC,dd.EAN,dd.EBAYID as ItemID
from ThirdPartyData as dd
WHERE EXISTS (SELECT 1 FROM dbo.SplitStringProduction(#UPCList,',') S1 WHERE dd.UPC=S1.val)
OR EXISTS (SELECT 1 FROM dbo.SplitStringProduction(#EANList,',') S2 WHERE dd.EAN=S2.val) and dd.Type=#Type
And now the second method splitstringProduction:
create FUNCTION [dbo].[SplitStringProduction]
(
#string nvarchar(max),
#delimiter nvarchar(5)
) RETURNS #t TABLE
(
val nvarchar(500)
)
AS
BEGIN
declare #xml xml
set #xml = N'<root><r>' + replace(#string,#delimiter,'</r><r>') + '</r></root>'
insert into #t(val)
select
r.value('.','varchar(500)') as item
from #xml.nodes('//root/r') as records(r)
RETURN
END
And now here comes the weird part... Benchmark results of both methods come up like this:
var findEans = ctx.zsp_selectallupceans_list(formedList.UPCList, formedList.EANList, 1).ToList();
//Execution time 4.8 seconds
var findEans = ctx.zsp_selectallupceans_listProduction(formedList.UPCList, formedList.EANList, 1).ToList();
//Execution time: 15 seconds
As far as my understanding goes, the second method should perform much faster than first... But this is not the case... Not only is the performance for the 2nd method worse, it is more than 3 times execution time than the first... And I cannot understand why ...
What is going on here guys? What would be the best method to reduce the execution time to <1 second?
Can someone help me out here please?

Passing two strings into a stored procedure and splitting them via specific character to perform a query

I have basic procedure which basically looks like following:
create procedure zsp_selectallupceans_list
(#UPCList nvarchar(4000),
#EANList nvarchar(4000))
as
select *
from data as dd
where dd.UPC in (--myUPC list) or dd.EAN in (--myEAN list)
This is the basic idea. Now I need to somehow split this string that I passed from my C# application and it would look like following for the UPC and EAN List:
where dd.UPC in ('123','456','567') or dd.EAN in('1234','5542','412')
The UPCList parameter that is passed from C# application looks like:
'123,456,567' and eanlist: '1234,5542,412'
I have found a method which looks like this:
CREATE FUNCTION dbo.splitstring
(#stringToSplit VARCHAR(MAX))
RETURNS
#returnList TABLE ([Name] [NVARCHAR](500))
AS
BEGIN
DECLARE #name NVARCHAR(255)
DECLARE #pos INT
WHILE CHARINDEX(',', #stringToSplit) > 0
BEGIN
SELECT #pos = CHARINDEX(',', #stringToSplit)
SELECT #name = SUBSTRING(#stringToSplit, 1, #pos-1)
INSERT INTO #returnList
SELECT #name
SELECT #stringToSplit = SUBSTRING(#stringToSplit, #pos+1, LEN(#stringToSplit)-#pos)
END
INSERT INTO #returnList
SELECT #stringToSplit
RETURN
END
And the usage of this function is like following:
SELECT * FROM dbo.splitstring('91,12,65,78,56,789')
where the output is these numbers where they are split and output as a result.
Now I just need to somehow combine all this so that I can form a proper where statement based on passed parameter UPCList and EANList
Can someone help me out with this?
Updating your stored proc as below should do the trick:
create procedure zsp_selectallupceans_list
(
#UPCList nvarchar(4000),
#EANList nvarchar(4000)
)
as
select *
from data as dd
where dd.UPC in (SELECT * FROM dbo.SplitString(#UPCList)) OR
dd.EAN in (SELECT * FROM dbo.SplitString(#EANList))
You pretty much have the answer:
Compile and save the splitstring function and then your where clause will look like the following:
where dd.UPC in (Select Name From splitstring(--myUpcList)) or dd.EAN in (Select Name from splitstring(--myEanList)
Here is an XML based function for string splitting, this method is much faster than the SUBSTRING method you already found. It is also recommended to use EXISTS instead of IN for performance improvement also, see here for more information on this.
CREATE FUNCTION [dbo].[SplitString]
(
#string nvarchar(max),
#delimiter nvarchar(5)
) RETURNS #t TABLE
(
val nvarchar(500)
)
AS
BEGIN
declare #xml xml
set #xml = N'<root><r>' + replace(#string,#delimiter,'</r><r>') + '</r></root>'
insert into #t(val)
select
r.value('.','varchar(500)') as item
from #xml.nodes('//root/r') as records(r)
RETURN
END
To use:
SELECT *
FROM data t
WHERE EXISTS (SELECT 1 FROM dbo.SplitString(#UPCList,',') S1 WHERE t.UPC=S1.val)
OR EXISTS (SELECT 1 FROM dbo.SplitString(#EANList,',') S2 WHERE t.EAN=S2.val)

Given a sorted row of n numbers in SQL server, how do i find at what point the sum of the rows reach the value k?

Assume i got the below row of number and max quantity value is 10.
Quantity BatchValue
2 0
4 0
4 0
6 1
8 2
Summation of 2+4+4 gives me a value less than or equal to max quatity 10 and so the batch value for those rows become 0. The pending rows are 6 and 8. They cannot be summed up to be < max quantity. So they will be seperate. Can we get an sql query or an algorith that can do this?
Here's a nice running sum routine you can use
create table #temp (rowid int identity, quantity int)
insert #temp
select quantity from yourtable order by your order
declare #holding table (quantity int, runningsum int)
declare #quantity int
declare #running int=0
declare #iterator int = 1
while #iterator<=(select max(rowid) from #temp)
begin
select #quantity=quantity from #temp where rowid=#iterator
set #running=#quantity+#running
insert #holding
select #quantity, #running
set #iterator=#iterator+1
end
Edited code from Daniel Marcus above to give the actual response requested in query.
CREATE TABLE #temp(rowid int identity(1,1), quantity int)
INSERT INTO #temp
SELECT 2
UNION ALL SELECT 4
UNION ALL SELECT 4
UNION ALL SELECT 6
UNION ALL SELECT 8
declare #batchValue int = 0 ,#maxquantity int = 10
declare #holding table (quantity int, batchvalue int)
declare #quantity int
declare #running int=0
declare #iterator int = 1
while #iterator<=(select max(rowid) from #temp)
begin
select #quantity=quantity from #temp where rowid=#iterator
set #running=#quantity+#running
-- Newly added condition
if (#running > #maxquantity) BEGIN
SET #batchValue = #batchValue + 1 -- increment the batch value
insert #holding select #quantity, #batchValue
SET #running = #quantity -- reset the running value
END
ELSE
insert #holding select #quantity, #batchValue
set #iterator=#iterator+1
end
SELECT * FROM #holding
DROP TABLE #temp
Hope the snippet works for your purpose. I tested this in SQL azure and provides the result you mentioned.
--The below query will help you if you working on sql server 2012 or higher
CREATE TABLE #RUN_TOT(ID INT)
INSERT INTO #RUN_TOT VALUES(2),(4),(4),(6),(8)
;WITH CTE AS
(
SELECT ID,
ROW_NUMBER() OVER(ORDER BY ID) RNUM,
CASE
WHEN SUM(ID) OVER(ORDER BY ID ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) <=
(SELECT MAX(ID) FROM #RUN_TOT) THEN 0
ELSE
ID
END VAL
FROM #RUN_TOT
)
SELECT ID,VAL FROM CTE WHERE VAL=0
UNION ALL
SELECT ID,ROW_NUMBER() OVER(ORDER BY VAL) VAL FROM CTE WHERE VAL<>0

SQL Query results WHERE ID IN (#ids) can't convert nvarchar() to int

I am having difficulty solving my problem of selecting records using WHERE IN (). I am working with C# in back end where I am sending a comma separated string of id values. I am looking to query all records that are in that string.
Here is my sample string:
"548,549,550,551,712,713"
Here is my SQL Stored Procedure
#ids nvarchar(200)
SELECT * FROM Users
WHERE USers.ID IN (#ids)
But I get an error:
Conversion failed when converting the nvarchar value '548, 549, 550,
551, 712, 713' to data type int.
Thank you
SQL Server 2016 and later versions
Declare #ids nvarchar(200) = '123,456';
Select *
From Users
Where ID IN ( Select Value from STRING_SPLIT ( #ids , ',' ))
For older versions SQL Server 2005 - 2014
Declare #ids nvarchar(200) = '123,456'; --<-- Comma delimited list of Client Ids
Select *
From Users
Where ID IN (
SELECT CAST(RTRIM(LTRIM(Split.a.value('.', 'VARCHAR(100)'))) AS INT) IDs
FROM (
SELECT Cast ('<X>'
+ Replace(#ids, ',', '</X><X>')
+ '</X>' AS XML) AS Data
) AS t CROSS APPLY Data.nodes ('/X') AS Split(a)
)
I have found that a table-valued function that unpacks a comma-separated string of numbers into a table to be very useful:
-- =============================================
-- Description: Generates a table of values generated from
-- a given csv list of integer values. Each value
-- in the list is separated by commas. The output
-- table of values has one integer per row. Any non-
-- numeric entries are discarded.
-- =============================================
CREATE FUNCTION [dbo].[fnCSVIntListToTable]
(
#CSVList nvarchar(MAX)
)
RETURNS #Result TABLE(Value int)
AS
BEGIN
-- Ensure the parameter is not null
SET #CSVList = ISNULL(#CSVList, '');
-- Create XML string from the CSV list
DECLARE #x XML
SELECT #x = CAST('<A>'+ REPLACE(#CSVList, ',' , '</A><A>')+ '</A>' AS XML);
-- Unpack the XML string into the table
INSERT INTO #Result
SELECT t.value('.', 'int') AS inVal
FROM #x.nodes('/A') AS x(t)
-- Discard any non-numeric entries
WHERE ISNUMERIC(t.value('.', 'varchar(20)'))=1
RETURN
END
Then in your procedure code you can use the function directly in a join:
Declare #ids nvarchar(200) = '123,456,789';
:
:
Select *
From Users UU
JOIN dbo.fnCSVIntListToTable(#ids) IDS
ON IDS.Value = UU.ID;

Return row_number() of affected rows after an UPDATE statement

In my C# application I'm executing multiple update queries to manipulate data in the database table. E.g. replace a specific character set into a different character set, insert new characters and remove characters. When a query like this has executed I want to do two things. Get the total rowcount of the affected rows and get a row_number() result set of the affected rows. The first thing is quite simple and is working already. The second thing however is something I haven't been able to figure out yet.
Here is an example of a query that I might use when I'm manipulating data:
UPDATE myTable
SET myColumn = STUFF(myColumn, fromCharPos, toCharPos,
REPLACE(SUBSTRING(myColumn, fromCharPos, toCharPos), charToReplace, charReplacement))
WHERE LEN(myColumn) >= fromCharPos;
This query replaces (on all the cells of a column) a character set with another character set within a specified character range.
When this query has executed I want to get a result set of row numbers from the affected rows. Anyone know how I'm able to implement this?
Some things to consider:
It has to work on atleast SERVER version 2005 and up.
The UPDATE statements are executed within a transaction
If anything is unclear, please comment below so I'm able to improve my question.
Edit:
I noticed that it is not quite clear what I want to achieve.
Lets say we have a set of data that looks like this:
34.56.44.12.33
32.44.68
45.22.66.33.77
44.42.44
66.44.22.44.45
00.22.78
43.98.34.65.33
Now I want to replace the dots with an underscore between character position 9 to 12. That means that only these rows will be affected by the query:
34.56.44.12.33 <--
32.44.68
45.22.66.33.77 <--
44.42.44
66.44.22.44.45 <--
00.22.78
43.98.34.65.33 <--
The thing I want to achieve is to get a row number result set of the affected rows. In my example that will be a result set like this:
Row_number()
1
3
5
7
This may help you..
CREATE TABLE #updatetablename
(excolumn VARCHAR(100))
INSERT INTO #updatetablename
VALUES ('34.56.44.12.33'),
('32.44.68'),
('45.22.66.33.77'),
('44.42.44'),
('66.44.22.44.45'),
('00.22.78'),
('43.98.34.65.33')
DECLARE #temp TABLE
(excolumn VARCHAR(100))
DECLARE #temp1 TABLE
(row_num INT,excolumn VARCHAR(100))
INSERT INTO #temp1
SELECT Row_number()OVER (ORDER BY excolumn),*
FROM #updatetablename
UPDATE #updatetablename
SET excolumn = Replace(excolumn, '.', '_')
output deleted.excolumn
INTO #temp
WHERE Len(excolumn) > 12
SELECT b.row_num AS updatedrows,
a.excolumn
FROM #temp a
JOIN #temp1 b
ON a.excolumn = b.excolumn
Updated
declare #table table(val varchar(500))
insert into #table values
('34.56.44.12.33'),
('32.44.68'),
('45.22.66.33.77'),
('44.42.44'),
('66.44.22.44.45'),
('00.22.78'),
('43.98.34.65.33')
--select * from #table
declare #temp table(rowid int,val varchar(500), createdate datetime)
insert into #temp
select ROW_NUMBER () over(order by val), val, GETDATE() from #table
declare #rowEffectedCount int = 0
--select ROW_NUMBER () over(order by val), val, GETDATE() from #table WHERE CHARINDEX( '.',val,9 ) > 0
UPDATE #table
SET val =
REPLACE(SUBSTRING(val, CHARINDEX( '.',val,9 ), LEN(val)), ',', '_' )
WHERE CHARINDEX( '.',val,9 ) > 0
set #rowEffectedCount = ##ROWCOUNT
select #rowEffectedCount roweffected ,* from #temp t1
where val not in (
select val from #table )
Old one
Its quite simple as my understanding.
You just add a one select query of your update query. read the comment for more understand
declare #rowEffectedCount int = 0
--you can use a temp table or permanet history table to take each work of portion.
--one thing to be carefull as structure is same or only save the pk , then no issue
--create table tempRowcount and insert the statement with the same where query, which filter the same data.
declare #t table(id int, createdate datetime)
select * into #t from myTable WHERE LEN(myColumn) >= fromCharPos
--or only pk
select id into #t from myTable WHERE LEN(myColumn) >= fromCharPos
--now update the
UPDATE myTable
SET myColumn = STUFF(myColumn, fromCharPos, toCharPos,
REPLACE(SUBSTRING(myColumn, fromCharPos, toCharPos), charToReplace, charReplacement))
WHERE LEN(myColumn) >= fromCharPos;
select * from #t
--finally delete or truncate or drop the table(if you use permanent table)

Categories