I have this small code and I wanna return Today and count always with or not values.
DateRange dateRangeLastWeek = DateRange.LastWeek(DateTime.Now);
var caseCounts = db.Find(x => x.FinishDate.HasValue)
.Where(c => c.FinishDate.Value.Date == dateRangeLastWeek.Today.Date ||
(c.FinishDate.Value.Date >= dateRangeLastWeek.Start && c.FinishDate.Value.Date <= dateRangeLastWeek.End))
.GroupBy(c => c.FinishDate.Value.Date == dateRangeLastWeek.Today.Date ? "Today" : "Last Week")
.Select(g => new JsonChartModel
{
Name = g.Key,
Value = g.Count()
});
In this case I wanna return:
Today : 10
Last Week: 0
Not really elegant, but you could use an Union with default Values, and select the max between default and returned values.
var defaultValues = new List<JsonChartModel> {
new JsonChartModel{Name = "Today", Value = 0},
new JsonChartModel{Name = "Last Week", Value = 0}
};
var result = <YourQuery>.ToList().Union(defaultValues)
.GroupBy(m => m.Name)
.Select(g => new JsonChartModel) {
Name = g.Key,
Value = g.Max(x => x.Value)
});
Related
I want to generate a query using EF core 3.1 equivalent to this one:
SELECT g.Date, Count(*) countIntervals
FROM(
SELECT
TODATETIMEOFFSET(DATETIME2FROMPARTS(DATEPART(year, myTimestamp),1,1,0,0,0,0,0), '+00:00') Date,
DATEPART(month, myTimestamp) - (DATEPART(month, myTimestamp) % 3) interval
GROUP BY
DATEPART(year, myTimestamp),
DATEPART(month, myTimestamp) - (DATEPART(month, myTimestamp) % 3),
UserId
) as g
GROUP BY Date
here is my use case: "i want to count all intervals in each year"
I tried this in C# but i get an error System.InvalidOperationException: The LINQ expression could not be translated.:
var query = _context.DatesTable
.GroupBy(m => new
{
Year = m.Timestamp.Year,
interval = m.Timestamp.Month - m.Timestamp.Month % 3,
UserId = m.UserId
})
.Select(g => new
{
Date = new DateTimeOffset(g.Key.Year, 1, 1, 0, 0, 0, TimeSpan.Zero),
interval = g.Key.interval
})
.GroupBy(x => new {
Date = x.Date
})
.Select(g => new
{
Date = g.Key.Date ,
CountIntervals = g.Count()
});
query.ToList()
I already tried to load data in memory using AsEnumerable(). That works but it's not efficient:
var query = _context.DatesTable
.GroupBy(m => new
{
Year = m.Timestamp.Year,
interval = m.Timestamp.Month - m.Timestamp.Month % 3,
UserId = m.UserId
})
.Select(g => new
{
Date = new DateTimeOffset(g.Key.Year, 1, 1, 0, 0, 0, TimeSpan.Zero),
interval = g.Key.interval
}).AsEnumerable();
var result = query.GroupBy(x => new {
Date = x.Date
})
.Select(g => new
{
Date = g.Key.Date,
CountIntervals = g.Count()
});
query.ToList()
Is there any efficient solution for this query ?
Try the following query:
var query = _context.DatesTable
.GroupBy(m => new
{
Year = m.Timestamp.Year,
interval = m.Timestamp.Month - m.Timestamp.Month % 3,
UserId = m.UserId
})
.Select(g => new
{
Year = g.Key.Year,
interval = g.Key.interval
})
.GroupBy(x => new {
Year = x.Year
})
.Select(g => new
{
Year = g.Key.Year,
CountIntervals = g.Count()
});
var result = query.ToList();
A pretty direct transalation of your query goes like this.
var query = _context.DatesTable
.GroupBy(m => new
{
Year = m.Timestamp.Year,
interval = m.Timestamp.Month - m.Timestamp.Month % 3,
UserId = m.UserId
})
.GroupBy(x => g.Key.Year)
.Select(g => new
{
Year = g.Key,
CountIntervals = g.Count()
});
var result = await query.ToListAsync();
I tried to divided Linq queries into 3 (total, success, fail) but so far "Total" Linq query is working fine. Please help me to get "Success", "Fail" columns (it has mulitple statuses and we have to check the last column of each transaction and destination)
Note: you need to group by ProcessTime, TransactionId, Destination and check last column whether it is success or Fail then apply count (we are using oracle as backend)
LINQ for Total count
var query = (from filetrans in context.FILE_TRANSACTION
join route in context.FILE_ROUTE on filetrans.FILE_TRANID equals route.FILE_TRANID
where
filetrans.PROCESS_STRT_TIME >= fromDateFilter && filetrans.PROCESS_STRT_TIME <= toDateFilter
select new { PROCESS_STRT_TIME = DbFunctions.TruncateTime((DateTime)filetrans.PROCESS_STRT_TIME), filetrans.FILE_TRANID, route.DESTINATION }).
GroupBy(p => new { p.PROCESS_STRT_TIME, p.FILE_TRANID, p.DESTINATION });
var result = query.GroupBy(x => x.Key.PROCESS_STRT_TIME).Select(x => new { x.Key, Count = x.Count() }).ToDictionary(a => a.Key, a => a.Count);
Check this solution. If it gives wrong result, then I need more details.
var fileTransQuery =
from filetrans in context.AFRS_FILE_TRANSACTION
where accountIds.Contains(filetrans.ACNT_ID) &&
filetrans.PROCESS_STRT_TIME >= fromDateFilter && filetrans.PROCESS_STRT_TIME <= toDateFilter
select filetrans;
var routesQuery =
from filetrans in fileTransQuery
join route in context.AFRS_FILE_ROUTE on filetrans.FILE_TRANID equals route.FILE_TRANID
select route;
var lastRouteQuery =
from d in routesQuery.GroupBy(route => new { route.FILE_TRANID, route.DESTINATION })
.Select(g => new
{
g.Key.FILE_TRANID,
g.Key.DESTINATION,
ROUTE_ID = g.Max(x => x.ROUTE_ID)
})
from route in routesQuery
.Where(route => d.FILE_TRANID == route.FILE_TRANID && d.DESTINATION == route.DESTINATION && d.ROUTE_ID == route.ROUTE_ID)
select route;
var recordsQuery =
from filetrans in fileTransQuery
join route in lastRouteQuery on filetrans.FILE_TRANID equals route.FILE_TRANID
select new { filetrans.PROCESS_STRT_TIME, route.CRNT_ROUTE_FILE_STATUS_ID };
var result = recordsQuery
.GroupBy(p => DbFunctions.TruncateTime((DateTime)p.PROCESS_STRT_TIME))
.Select(g => new TrendData
{
TotalCount = g.Sum(x => x.CRNT_ROUTE_FILE_STATUS_ID != 7 && x.CRNT_ROUTE_FILE_STATUS_ID != 8 ? 1 : 0)
SucccessCount = g.Sum(x => x.CRNT_ROUTE_FILE_STATUS_ID == 7 ? 1 : 0),
FailCount = g.Sum(x => failureStatus.Contains(x.CRNT_ROUTE_FILE_STATUS_ID) ? 1 : 0),
Date = g.Min(x => x.PROCESS_STRT_TIME)
})
.OrderBy(x => x.Date)
.ToList();
I have a problem when grouping weeks from input months,
the results I get are always like this
{name: "Pembunuhan", data: [1,4]}
it should be the result I want like this
{name: "Pembunuhan", data: [1,0,0,4]}
this is my code
var dateNya = DateTime.Today;
var bln = int.Parse(month);
var mstrKategori = context.master_kategori.OrderBy("id ASC").ToList();
var joinnya = (from ls in context.list_dokumen join ktgr in context.master_kategori on ls.kategori equals ktgr.id
where ls.polda_id != null
select new
{
tgl_laporan = ls.tgl_laporan,
idKategori = ktgr.id,
week = ls.week,
month = ls.month,
year = ls.year
}).ToArray();
foreach (var itemktgr in mstrKategori)
{
var tes2 = joinnya.Where(i => i.idKategori == itemktgr.id).Where(a => a.month == bln).Where(o => o.year == dateNya.Year)
.GroupBy(row => new { week = row.week ?? 0 })
.Select(g => new
{
week = g.Key.week,
couny = g == null ? 0: g.Count()
})
.ToList();
tes2.ToList().ForEach(p => lineChartList.Add(new DataChart {name = itemktgr.nama2, data = p.couny}));
}
var result = lineChartList.GroupBy(x => new { x.name })
.Select(b => new DataChartTrending2
{
data = b.Select(bn => bn.data).ToList(),
name = (b.Key.name == null) ? "Lainnya" : b.Key.name
}).ToList();
The GroupBy clause won't create empty groups for weeks that have no matching records.
Use GroupJoin to perform an outer join on week indices, meaning that you will get a group for each week index, even indices that no record in tes2 matched:
var weekIds = Enumerable.Range(0, 4); // assuming your weeks are 0, 1, 2, 3
var tes2 = joinnya
.Where(i => i.idKategori == itemktgr.id)
.Where(a => a.month == bln)
.Where(o => o.year == dateNya.Year)
var countPerWeek = weekIds.GroupJoin(
tes2,
weekId => weekId,
row => row.week,
(week, weekGroup) => weekGroup.Count()
);
For each week, it will get you the number of matching records, including zeroes for weeks that don't have a matching record.
Alternative syntax:
var countPerWeek =
from weekId in weekIds
join row in tes2 on weekId equals row.week into weekGroup
select weekGroup.Count();
I am working on a LINQ query which includes some pivot data as below
var q = data.GroupBy(x => new
{
x.Med.Name,
x.Med.GenericName,
}).ToList().Select(g =>
new SummaryDto
{
Name= g.Key.Name,
GenericName = g.Key.GenericName,
Data2012 = g.Where(z => z.ProcessDate.Year == 2012).Count(),
Data2013 = g.Where(z => z.ProcessDate.Year == 2013).Count(),
Data2014 = g.Where(z => z.ProcessDate.Year == 2014).Count(),
Data2015 = g.Where(z => z.ProcessDate.Year == 2015).Count(),
Data2016 = g.Where(z => z.ProcessDate.Year == 2016).Count(),
Data2017 = g.Where(z => z.ProcessDate.Year == 2017).Count(),
TotalCount = g.Count(),
}).AsQueryable();
return q;
The above LINQ takes too long as it queries grp q.Count()*6 times. If there are 10000 records, then it queries 60000 times
Is there a better way to make this faster?
Add year to the group key, then group again, and harvest per-group counts:
return data.GroupBy(x => new {
x.Med.Name
, x.Med.GenericName
, x.ProcessDate.Year
}).Select(g => new {
g.Key.Name
, g.Key.GenericName
, g.Key.Year
, Count = g.Count()
}).GroupBy(g => new {
g.Name
, g.GenericName
}).Select(g => new SummaryDto {
Name = g.Key.Name
, GenericName = g.Key.GenericName
, Data2012 = g.SingleOrDefault(x => x.Year == 2012)?.Count ?? 0
, Data2013 = g.SingleOrDefault(x => x.Year == 2013)?.Count ?? 0
, Data2014 = g.SingleOrDefault(x => x.Year == 2014)?.Count ?? 0
, Data2015 = g.SingleOrDefault(x => x.Year == 2015)?.Count ?? 0
, Data2016 = g.SingleOrDefault(x => x.Year == 2016)?.Count ?? 0
, Data2017 = g.SingleOrDefault(x => x.Year == 2017)?.Count ?? 0
, TotalCount = g.Sum(x => x.Count)
}).AsQueryable();
Note: This approach is problematic, because year is hard-coded in the SummaryDto class. You would be better off passing your DTO constructor an IDictionary<int,int> with counts for each year. If you make this change, the final Select(...) would look like this:
.Select(g => new SummaryDto {
Name = g.Key.Name
, GenericName = g.Key.GenericName
, TotalCount = g.Sum(x => x.Count)
, DataByYear = g.ToDictionary(i => i.Year, i => i.Count)
}).AsQueryable();
I suggest grouping inside the group by year and then converting to a dictionary to access the counts. Whether it is faster to group with year first and then count in-memory depends on the distribution of the initial grouping, but with the database it may depend on how efficiently it can group by year, so I would test to determine which seems fastest.
In any case grouping by year after the initial grouping is about 33% faster than your query in-memory, but again it is vastly dependent on the distribution. As the number of initial groups increase, the grouping by Year queries slow down to match the original query. Note that the original query without any year counts is about 1/3 the time.
Here is grouping after the database grouping:
var q = data.GroupBy(x => new {
x.Med.Name,
x.Med.GenericName,
}).ToList().Select(g => {
var gg = g.GroupBy(d => d.ProcessDate.Year).ToDictionary(d => d.Key, d => d.Count());
return new SummaryDto {
Name = g.Key.Name,
GenericName = g.Key.GenericName,
Data2012 = gg.GetValueOrDefault(2012),
Data2013 = gg.GetValueOrDefault(2013),
Data2014 = gg.GetValueOrDefault(2014),
Data2015 = gg.GetValueOrDefault(2015),
Data2016 = gg.GetValueOrDefault(2016),
Data2017 = gg.GetValueOrDefault(2017),
TotalCount = g.Count(),
};
}).AsQueryable();
I have a IList<string>() which holds some string values, and there could be duplicated items in the list. What I want is to append a index number to end of the string to eliminate the duplication.
For example, I have these values in my list: StringA, StringB, StringC, StringA, StringA, StringB. And I want the result looks like: StringA1, StringB1, StringC, StringA2, StringA3, StringB2. I need to retain the original order in list.
Is there a way I can just use one Lambda expression?
You are looking for something like this:
yourList.GroupBy(x => x)
.SelectMany(g => g.Select((x,idx) => g.Count() == 1 ? x : x + idx))
.ToList();
Edit: If the element order matters, here is another solution:
var counts = yourList.GroupBy(x => x).ToDictionary(x => x.Key, x => x.Count());
var values = counts.ToDictionary(x => x.Key, x => 0);
var list = yourList.Select(x => counts[x] > 1 ? x + ++values[x] : x).ToList();
You can do:
List<string> list = new List<string> { "StringA", "StringB", "StringC", "StringA", "StringA", "StringB" };
var newList =
list.Select((r, i) => new { Value = r, Index = i })
.GroupBy(r => r.Value)
.Select(grp => grp.Count() > 1 ?
grp.Select((subItem, i) => new
{
Value = subItem.Value + (i + 1),
OriginalIndex = subItem.Index
})
: grp.Select(subItem => new
{
Value = subItem.Value,
OriginalIndex = subItem.Index
}))
.SelectMany(r => r)
.OrderBy(r => r.OriginalIndex)
.Select(r => r.Value)
.ToList();
and you will get:
StringA1,StringB1,StringC,StringA2,StringA3,StringB2
If you don't want to preserve order then you can do:
var newList = list.GroupBy(r => r)
.Select(grp => grp.Count() > 1 ?
grp.Select((subItem, i) => subItem + (i + 1))
: grp.Select(subItem => subItem))
.SelectMany(r => r)
.ToList();
This uses some lambda expressions and linq to do it, maintaining the order but I'd suggested a function with a foreach loop and yield return would be better.
var result = list.Aggregate(
new List<KeyValuePair<string, int>>(),
(cache, s) =>
{
var last = cache.Reverse().FirstOrDefault(p => p.Key == s);
if (last == null)
{
cache.Add(new KeyValuePair<string, int>(s, 0));
}
else
{
if (last.Value = 0)
{
last.Value = 1;
}
cache.Add(new KeyValuePair<string, int>(s, last.Value + 1));
}
return cache;
},
cache => cache.Select(p => p.Value == 0 ?
p.Key :
p.Key + p.Value.ToString()));