c# how to convert bytes into gigabytes [duplicate] - c#
Just wondering if .NET provides a clean way to do this:
int64 x = 1000000;
string y = null;
if (x / 1024 == 0) {
y = x + " bytes";
}
else if (x / (1024 * 1024) == 0) {
y = string.Format("{0:n1} KB", x / 1024f);
}
etc...
Here is a fairly concise way to do this:
static readonly string[] SizeSuffixes =
{ "bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" };
static string SizeSuffix(Int64 value, int decimalPlaces = 1)
{
if (decimalPlaces < 0) { throw new ArgumentOutOfRangeException("decimalPlaces"); }
if (value < 0) { return "-" + SizeSuffix(-value, decimalPlaces); }
if (value == 0) { return string.Format("{0:n" + decimalPlaces + "} bytes", 0); }
// mag is 0 for bytes, 1 for KB, 2, for MB, etc.
int mag = (int)Math.Log(value, 1024);
// 1L << (mag * 10) == 2 ^ (10 * mag)
// [i.e. the number of bytes in the unit corresponding to mag]
decimal adjustedSize = (decimal)value / (1L << (mag * 10));
// make adjustment when the value is large enough that
// it would round up to 1000 or more
if (Math.Round(adjustedSize, decimalPlaces) >= 1000)
{
mag += 1;
adjustedSize /= 1024;
}
return string.Format("{0:n" + decimalPlaces + "} {1}",
adjustedSize,
SizeSuffixes[mag]);
}
And here's the original implementation I suggested, which may be marginally slower, but a bit easier to follow:
static readonly string[] SizeSuffixes =
{ "bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" };
static string SizeSuffix(Int64 value, int decimalPlaces = 1)
{
if (value < 0) { return "-" + SizeSuffix(-value, decimalPlaces); }
int i = 0;
decimal dValue = (decimal)value;
while (Math.Round(dValue, decimalPlaces) >= 1000)
{
dValue /= 1024;
i++;
}
return string.Format("{0:n" + decimalPlaces + "} {1}", dValue, SizeSuffixes[i]);
}
Console.WriteLine(SizeSuffix(100005000L));
One thing to bear in mind - in SI notation, "kilo" usually uses a lowercase k while all of the larger units use a capital letter. Windows uses KB, MB, GB, so I have used KB above, but you may consider kB instead.
Checkout the ByteSize library. It's the System.TimeSpan for bytes!
It handles the conversion and formatting for you.
var maxFileSize = ByteSize.FromKiloBytes(10);
maxFileSize.Bytes;
maxFileSize.MegaBytes;
maxFileSize.GigaBytes;
It also does string representation and parsing.
// ToString
ByteSize.FromKiloBytes(1024).ToString(); // 1 MB
ByteSize.FromGigabytes(.5).ToString(); // 512 MB
ByteSize.FromGigabytes(1024).ToString(); // 1 TB
// Parsing
ByteSize.Parse("5b");
ByteSize.Parse("1.55B");
I would solve it using Extension methods, Math.Pow function and Enums:
public static class MyExtension
{
public enum SizeUnits
{
Byte, KB, MB, GB, TB, PB, EB, ZB, YB
}
public static string ToSize(this Int64 value, SizeUnits unit)
{
return (value / (double)Math.Pow(1024, (Int64)unit)).ToString("0.00");
}
}
and use it like:
string h = x.ToSize(MyExtension.SizeUnits.KB);
Since everyone else is posting their methods, I figured I'd post the extension method I usually use for this:
EDIT: added int/long variants...and fixed a copypasta typo...
public static class Ext
{
private const long OneKb = 1024;
private const long OneMb = OneKb * 1024;
private const long OneGb = OneMb * 1024;
private const long OneTb = OneGb * 1024;
public static string ToPrettySize(this int value, int decimalPlaces = 0)
{
return ((long)value).ToPrettySize(decimalPlaces);
}
public static string ToPrettySize(this long value, int decimalPlaces = 0)
{
var asTb = Math.Round((double)value / OneTb, decimalPlaces);
var asGb = Math.Round((double)value / OneGb, decimalPlaces);
var asMb = Math.Round((double)value / OneMb, decimalPlaces);
var asKb = Math.Round((double)value / OneKb, decimalPlaces);
string chosenValue = asTb > 1 ? string.Format("{0}Tb",asTb)
: asGb > 1 ? string.Format("{0}Gb",asGb)
: asMb > 1 ? string.Format("{0}Mb",asMb)
: asKb > 1 ? string.Format("{0}Kb",asKb)
: string.Format("{0}B", Math.Round((double)value, decimalPlaces));
return chosenValue;
}
}
I know this is old thread already. but maybe someone will look for solution.
And here's what I use and the easiest way
public static string FormatFileSize(long bytes)
{
var unit = 1024;
if (bytes < unit) { return $"{bytes} B"; }
var exp = (int)(Math.Log(bytes) / Math.Log(unit));
return $"{bytes / Math.Pow(unit, exp):F2} {("KMGTPE")[exp - 1]}B";
}
Get folder size (for example usage)
public static long GetFolderSize(string path, string ext, bool AllDir)
{
var option = AllDir ? SearchOption.AllDirectories : SearchOption.TopDirectoryOnly;
return new DirectoryInfo(path).EnumerateFiles("*" + ext, option).Sum(file => file.Length);
}
EXAMPLE USAGE:
public static void TEST()
{
string folder = #"C:\Users\User\Videos";
var bytes = GetFolderSize(folder, "mp4", true); //or GetFolderSize(folder, "mp4", false) to get all single folder only
var totalFileSize = FormatFileSize(bytes);
Console.WriteLine(totalFileSize);
}
The short version of the most voted answer has problems with TB values.
I adjusted it appropriately to handle also tb values and still without a loop and also added a little error checking for negative values. Here's my solution:
static readonly string[] SizeSuffixes = { "bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" };
static string SizeSuffix(long value, int decimalPlaces = 0)
{
if (value < 0)
{
throw new ArgumentException("Bytes should not be negative", "value");
}
var mag = (int)Math.Max(0, Math.Log(value, 1024));
var adjustedSize = Math.Round(value / Math.Pow(1024, mag), decimalPlaces);
return String.Format("{0} {1}", adjustedSize, SizeSuffixes[mag]);
}
#Servy's answer was nice and succinct. I think it can be even simpler?
private static string[] suffixes = new [] { " B", " KB", " MB", " GB", " TB", " PB" };
public static string ToSize(double number, int precision = 2)
{
// unit's number of bytes
const double unit = 1024;
// suffix counter
int i = 0;
// as long as we're bigger than a unit, keep going
while(number > unit)
{
number /= unit;
i++;
}
// apply precision and current suffix
return Math.Round(number, precision) + suffixes[i];
}
Updated for C# 9.0 Relational Patterns
public const long OneKB = 1024;
public const long OneMB = OneKB * OneKB;
public const long OneGB = OneMB * OneKB;
public const long OneTB = OneGB * OneKB;
public static string BytesToHumanReadable(ulong bytes)
{
return bytes switch
{
(< OneKB) => $"{bytes}B",
(>= OneKB) and (< OneMB) => $"{bytes / OneKB}KB",
(>= OneMB) and (< OneGB) => $"{bytes / OneMB}MB",
(>= OneGB) and (< OneTB) => $"{bytes / OneMB}GB",
(>= OneTB) => $"{bytes / OneTB}"
//...
};
}
No. Mostly because it's of a rather niche need, and there are too many possible variations. (Is it "KB", "Kb" or "Ko"? Is a megabyte 1024 * 1024 bytes, or 1024 * 1000 bytes? -- yes, some places use that!)
Here is an option that's easier to extend than yours, but no, there is none built into the library itself.
private static List<string> suffixes = new List<string> { " B", " KB", " MB", " GB", " TB", " PB" };
public static string Foo(int number)
{
for (int i = 0; i < suffixes.Count; i++)
{
int temp = number / (int)Math.Pow(1024, i + 1);
if (temp == 0)
return (number / (int)Math.Pow(1024, i)) + suffixes[i];
}
return number.ToString();
}
private string GetFileSize(double byteCount)
{
string size = "0 Bytes";
if (byteCount >= 1073741824.0)
size = String.Format("{0:##.##}", byteCount / 1073741824.0) + " GB";
else if (byteCount >= 1048576.0)
size = String.Format("{0:##.##}", byteCount / 1048576.0) + " MB";
else if (byteCount >= 1024.0)
size = String.Format("{0:##.##}", byteCount / 1024.0) + " KB";
else if (byteCount > 0 && byteCount < 1024.0)
size = byteCount.ToString() + " Bytes";
return size;
}
private void btnBrowse_Click(object sender, EventArgs e)
{
if (openFile1.ShowDialog() == DialogResult.OK)
{
FileInfo thisFile = new FileInfo(openFile1.FileName);
string info = "";
info += "File: " + Path.GetFileName(openFile1.FileName);
info += Environment.NewLine;
info += "File Size: " + GetFileSize((int)thisFile.Length);
label1.Text = info;
}
}
This is one way to do it aswell (The number 1073741824.0 is from 1024*1024*1024 aka GB)
Based on NeverHopeless's elegant solution:
private static readonly KeyValuePair<long, string>[] Thresholds =
{
// new KeyValuePair<long, string>(0, " Bytes"), // Don't devide by Zero!
new KeyValuePair<long, string>(1, " Byte"),
new KeyValuePair<long, string>(2, " Bytes"),
new KeyValuePair<long, string>(1024, " KB"),
new KeyValuePair<long, string>(1048576, " MB"), // Note: 1024 ^ 2 = 1026 (xor operator)
new KeyValuePair<long, string>(1073741824, " GB"),
new KeyValuePair<long, string>(1099511627776, " TB"),
new KeyValuePair<long, string>(1125899906842620, " PB"),
new KeyValuePair<long, string>(1152921504606850000, " EB"),
// These don't fit into a int64
// new KeyValuePair<long, string>(1180591620717410000000, " ZB"),
// new KeyValuePair<long, string>(1208925819614630000000000, " YB")
};
/// <summary>
/// Returns x Bytes, kB, Mb, etc...
/// </summary>
public static string ToByteSize(this long value)
{
if (value == 0) return "0 Bytes"; // zero is plural
for (int t = Thresholds.Length - 1; t > 0; t--)
if (value >= Thresholds[t].Key) return ((double)value / Thresholds[t].Key).ToString("0.00") + Thresholds[t].Value;
return "-" + ToByteSize(-value); // negative bytes (common case optimised to the end of this routine)
}
Maybe there are excessive comments, but I tend to leave them to prevent myself from making the same mistakes over on future visits...
No.
But you can implement like this;
static double ConvertBytesToMegabytes(long bytes)
{
return (bytes / 1024f) / 1024f;
}
static double ConvertKilobytesToMegabytes(long kilobytes)
{
return kilobytes / 1024f;
}
Also check out How to correctly convert filesize in bytes into mega or gigabytes?
I have combined some of the answers here into two methods that work great. The second method below will convert from a bytes string (like 1.5.1 GB) back to bytes (like 1621350140) as a long type value. I hope this is useful to others looking for a solution to convert bytes to a string and back into bytes.
public static string BytesAsString(float bytes)
{
string[] suffix = { "B", "KB", "MB", "GB", "TB" };
int i;
double doubleBytes = 0;
for (i = 0; (int)(bytes / 1024) > 0; i++, bytes /= 1024)
{
doubleBytes = bytes / 1024.0;
}
return string.Format("{0:0.00} {1}", doubleBytes, suffix[i]);
}
public static long StringAsBytes(string bytesString)
{
if (string.IsNullOrEmpty(bytesString))
{
return 0;
}
const long OneKb = 1024;
const long OneMb = OneKb * 1024;
const long OneGb = OneMb * 1024;
const long OneTb = OneGb * 1024;
double returnValue;
string suffix = string.Empty;
if (bytesString.IndexOf(" ") > 0)
{
returnValue = float.Parse(bytesString.Substring(0, bytesString.IndexOf(" ")));
suffix = bytesString.Substring(bytesString.IndexOf(" ") + 1).ToUpperInvariant();
}
else
{
returnValue = float.Parse(bytesString.Substring(0, bytesString.Length - 2));
suffix = bytesString.ToUpperInvariant().Substring(bytesString.Length - 2);
}
switch (suffix)
{
case "KB":
{
returnValue *= OneKb;
break;
}
case "MB":
{
returnValue *= OneMb;
break;
}
case "GB":
{
returnValue *= OneGb;
break;
}
case "TB":
{
returnValue *= OneTb;
break;
}
default:
{
break;
}
}
return Convert.ToInt64(returnValue);
}
I went for JerKimballs solution, and thumbs up to that.
However, I would like to add / point out that this is indeed a matter of controversy as a whole. In my research (for other reasons) I have come up with the following pieces of information.
When normal people (I have heard they exist) speak of gigabytes they refer to the metric system wherein 1000 to the power of 3 from the original number of bytes == the number of gigabytes.
However, of course there is the IEC / JEDEC standards which is nicely summed up in wikipedia, which instead of 1000 to the power of x they have 1024.
Which for physical storage devices (and I guess logical such as amazon and others) means an ever increasing difference between metric vs IEC.
So for instance 1 TB == 1 terabyte metric is 1000 to the power of 4, but IEC officially terms the similar number as 1 TiB, tebibyte as 1024 to the power of 4.
But, alas, in non-technical applications (I would go by audience) the norm is metric, and in my own app for internal use currently I explain the difference in documentation. But for display purposes I do not even offer anything but metric. Internally even though it's not relevant in my app I only store bytes and do the calculation for display.
As a side note I find it somewhat lackluster that the .Net framework AFAIK (and I am frequently wrong thank the powers that be) even in it's 4.5 incarnation does not contain anything about this in any libraries internally. One would expect an open source library of some kind to be NuGettable at some point, but I admit this is a small peeve. On the other hand System.IO.DriveInfo and others also only have bytes (as long) which is rather clear.
How about some recursion:
private static string ReturnSize(double size, string sizeLabel)
{
if (size > 1024)
{
if (sizeLabel.Length == 0)
return ReturnSize(size / 1024, "KB");
else if (sizeLabel == "KB")
return ReturnSize(size / 1024, "MB");
else if (sizeLabel == "MB")
return ReturnSize(size / 1024, "GB");
else if (sizeLabel == "GB")
return ReturnSize(size / 1024, "TB");
else
return ReturnSize(size / 1024, "PB");
}
else
{
if (sizeLabel.Length > 0)
return string.Concat(size.ToString("0.00"), sizeLabel);
else
return string.Concat(size.ToString("0.00"), "Bytes");
}
}
Then you can call it:
ReturnSize(size, string.Empty);
I recently needed this and required to convert the in bytes to a number in long.
Usage: Byte.Kb.ToLong(1) should give 1024.
public enum Byte
{
Kb,
Mb,
Gb,
Tb
}
public static class ByteSize
{
private const long OneKb = 1024;
private const long OneMb = OneKb * 1024;
private const long OneGb = OneMb * 1024;
private const long OneTb = OneGb * 1024;
public static long ToLong(this Byte size, int value)
{
return size switch
{
Byte.Kb => value * OneKb,
Byte.Mb => value * OneMb,
Byte.Gb => value * OneGb,
Byte.Tb => value * OneTb,
_ => throw new NotImplementedException("This should never be hit.")
};
}
}
Tests using xunit:
[Theory]
[InlineData(Byte.Kb, 1, 1024)]
[InlineData(Byte.Kb, 2, 2048)]
[InlineData(Byte.Mb, 1, 1048576)]
[InlineData(Byte.Mb, 2, 2097152)]
[InlineData(Byte.Gb, 1, 1073741824)]
[InlineData(Byte.Gb, 2, 2147483648)]
[InlineData(Byte.Tb, 1, 1099511627776)]
[InlineData(Byte.Tb, 2, 2199023255552)]
public void ToLong_WhenConverting_ShouldMatchExpected(Byte size, int value, long expected)
{
var result = size.ToLong(value);
result.Should().Be(expected);
}
How about:
public void printMB(uint sizekB)
{
double sizeMB = (double) sizekB / 1024;
Console.WriteLine("Size is " + sizeMB.ToString("0.00") + "MB");
}
E.g. call like
printMB(123456);
Will result in output
"Size is 120,56 MB"
public static class MyExtension
{
public static string ToPrettySize(this float Size)
{
return ConvertToPrettySize(Size, 0);
}
public static string ToPrettySize(this int Size)
{
return ConvertToPrettySize(Size, 0);
}
private static string ConvertToPrettySize(float Size, int R)
{
float F = Size / 1024f;
if (F < 1)
{
switch (R)
{
case 0:
return string.Format("{0:0.00} byte", Size);
case 1:
return string.Format("{0:0.00} kb", Size);
case 2:
return string.Format("{0:0.00} mb", Size);
case 3:
return string.Format("{0:0.00} gb", Size);
}
}
return ConvertToPrettySize(F, ++R);
}
}
As posted above, the recursion is the favorite way, with the help of logarithm.
The following function has 3 arguments : the input, the dimension constraint of the output, that is the third argument.
int ByteReDim(unsigned long ival, int constraint, unsigned long *oval)
{
int base = 1 + (int) log10(ival);
(*oval) = ival;
if (base > constraint) {
(*oval) = (*oval) >> 10;
return(1 + ByteReDim((*oval), constraint, oval));
} else
return(0);
}
Now let's convert 12GB of RAM in several units:
int main(void)
{
unsigned long RAM;
int unit; // index of below symbols array
char symbol[5] = {'B', 'K', 'M', 'G', 'T'};
unit = ByteReDim(12884901888, 12, &RAM);
printf("%lu%c\n", RAM, symbol[unit]); // output is 12884901888B
unit = ByteReDim(12884901888, 9, &RAM);
printf("%lu%c\n", RAM, symbol[unit]); // output is 12582912K
unit = ByteReDim(12884901888, 6, &RAM);
printf("%lu%c\n", RAM, symbol[unit]); // output is 12288M
unit = ByteReDim(12884901888, 3, &RAM);
printf("%lu%c\n", RAM, symbol[unit]); // output is 12G
}
I use this for Windows (binary prefixes):
static readonly string[] BinaryPrefix = { "bytes", "KB", "MB", "GB", "TB" }; // , "PB", "EB", "ZB", "YB"
string GetMemoryString(double bytes)
{
int counter = 0;
double value = bytes;
string text = "";
do
{
text = value.ToString("0.0") + " " + BinaryPrefix[counter];
value /= 1024;
counter++;
}
while (Math.Floor(value) > 0 && counter < BinaryPrefix.Length);
return text;
}
I have incorporated this (with little to no modification) into a UWP DataBinding Converter for my project and thought it might also be useful to others.
The code is:
using System;
using System.Text;
using Windows.UI.Xaml.Data;
namespace MyApp.Converters
{
public class ByteSizeConverter : IValueConverter
{
static readonly string[] sSizeSuffixes = { "bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" };
// The number of decimal places the formatter should include in the scaled output - default 1dp
public int DecimalPlaces { get; set; } = 1;
public object Convert(object value, Type targetType, object parameter, string language)
{
Int64 intVal = System.Convert.ToInt64(value);
return SizeSuffix(intVal);
}
public object ConvertBack(object value, Type targetType, object parameter, string language)
{
// TODO: Parse string into number and suffix
// Scale number by suffix multiplier to get bytes
throw new NotImplementedException();
}
string SizeSuffix(Int64 value)
{
if (this.DecimalPlaces < 0) { throw new ArgumentOutOfRangeException(String.Format("DecimalPlaces = {0}", this.DecimalPlaces)); }
if (value < 0) { return "-" + SizeSuffix(-value); }
if (value == 0) { return string.Format("{0:n" + this.DecimalPlaces + "} bytes", 0); }
// magnitude is 0 for bytes, 1 for KB, 2, for MB, etc.
int magnitude = (int)Math.Log(value, 1024);
// clip magnitude - only 8 values currently supported, this prevents out-of-bounds exception
magnitude = Math.Min(magnitude, 8);
// 1L << (magnitude * 10) == 2 ^ (10 * magnitude) [i.e. the number of bytes in the unit corresponding to magnitude]
decimal adjustedSize = (decimal)value / (1L << (magnitude * 10));
// make adjustment when the value is large enough that it would round up to 1000 or more
if (Math.Round(adjustedSize, this.DecimalPlaces) >= 1000)
{
magnitude += 1;
adjustedSize /= 1024;
}
return String.Format("{0:n" + this.DecimalPlaces + "} {1}", adjustedSize, sSizeSuffixes[magnitude]);
}
}
}
To use it, add a local resource to your UserControl or Page XAML:
<UserControl.Resources>
<converters:ByteSizeConverter x:Key="ByteFormat" DecimalPlaces="3" />
</UserControl.Resources>
Reference it in a data binding template or data binding instance:
<TextBlock HorizontalAlignment="Left" VerticalAlignment="Center"
Text="{x:Bind MyItem.FileSize_bytes, Mode=OneWay, Converter={StaticResource ByteFormat}}" />
And hey presto. The magic happens.
https://github.com/logary/logary/blob/master/src/Logary/DataModel.fs#L832-L837
let scaleBytes (value : float) : float * string =
let log2 x = log x / log 2.
let prefixes = [| ""; "Ki"; "Mi"; "Gi"; "Ti"; "Pi" |] // note the capital K and the 'i'
let index = int (log2 value) / 10
1. / 2.**(float index * 10.),
sprintf "%s%s" prefixes.[index] (Units.symbol Bytes)
(DISCLAIMER: I wrote this code, even the code in the link!)
Here's my spin on #drzaus's answer. I modified it to use rounding errors to our advantage and correctly manage issues around unit boundaries. It also handles negative values.
Drop this C# Program into LinqPad:
// Kudos: https://stackoverflow.com/a/48467634/117797
void Main()
{
0.ToFriendly().Dump(); // 0 B
857.ToFriendly().Dump(); // 857 B
(173*1024).ToFriendly().Dump(); // 173 KB
(9541*1024).ToFriendly().Dump(); // 9.32 MB
(5261890L*1024).ToFriendly().Dump(); // 5.02 GB
1.ToFriendly().Dump(); // 1 B
1024.ToFriendly().Dump(); // 1 KB
1048576.ToFriendly().Dump(); // 1 MB
1073741824.ToFriendly().Dump(); // 1 GB
1099511627776.ToFriendly().Dump(); // 1 TB
1125899906842620.ToFriendly().Dump(); // 1 PB
1152921504606850000.ToFriendly().Dump(); // 1 EB
}
public static class Extensions
{
static string[] _byteUnits = new[] { "B", "KB", "MB", "GB", "TB", "PB", "EB" };
public static string ToFriendly(this int number, int decimals = 2)
{
return ((double)number).ToFriendly(decimals);
}
public static string ToFriendly(this long number, int decimals = 2)
{
return ((double)number).ToFriendly(decimals);
}
public static string ToFriendly(this double number, int decimals = 2)
{
const double divisor = 1024;
int unitIndex = 0;
var sign = number < 0 ? "-" : string.Empty;
var value = Math.Abs(number);
double lastValue = number;
while (value > 1)
{
lastValue = value;
// NOTE
// The following introduces ever increasing rounding errors, but at these scales we don't care.
// It also means we don't have to deal with problematic rounding errors due to dividing doubles.
value = Math.Round(value / divisor, decimals);
unitIndex++;
}
if (value < 1 && number != 0)
{
value = lastValue;
unitIndex--;
}
return $"{sign}{value} {_byteUnits[unitIndex]}";
}
}
Output is:
0 B
857 B
173 KB
9.32 MB
1.34 MB
5.02 GB
1 B
1 KB
1 MB
1 GB
1 TB
1 PB
1 EB
string Convert(float bytes)
{
string[] Group = { "Bytes", "KB", "MB", "GB", "TB"};
float B = bytes; int G = 0;
while (B >= 1024 && G < 5)
{
B /= 1024;
G += 1;
}
float truncated = (float)(Math.Truncate((double)B * 100.0) / 100.0);
string load = (truncated + " " + Group[G]);
return load;
}
This is how I do it.
Console.Write(FileSizeInBytes > 1048576 ? FileSizeInBytes / 1048576f + " MB" : FileSizeInBytes / 1024f + " KB"); //1048576 = 1024 * 1024
I combined zackmark15's code into an all-purpose file or directory measuring approach:
public static string PathSize(string path)
{
if (String.IsNullOrEmpty(path))
throw new ArgumentNullException(nameof(path));
long bytes;
if (File.Exists(path))
bytes = new FileInfo(path).Length;
else if (Directory.Exists(path))
bytes = new DirectoryInfo(path).EnumerateFiles("*", SearchOption.AllDirectories).Sum(fileInfo => fileInfo.Length);
else
throw new ArgumentException("Path does not exist.", nameof(path));
const long UNIT = 1024L;
if (bytes < UNIT)
return $"{bytes} bytes";
var exp = (int)(Math.Log(bytes) / Math.Log(UNIT));
return $"{bytes / Math.Pow(UNIT, exp):F2} {("KMGTPE")[exp - 1]}B";
}
Related
How to calculate protobuf file size?
I'm trying to figure out the final size of a file serialized with protobuf-net, so I'll can choose the best approach. I made some comparison tests with different proto configurations and a binary serialization, but still I don't understand how "varint to bytes" conversion works. Classes public class Pt2D { public Pt2D() { } public Pt2D(double x, double y) { X = x; Y = y; } public double X { get; set; } public double Y { get; set; } } public class Pt3D : Pt2D { public Pt3D() { } public Pt3D(double x, double y, double z) : base(x, y) { Z = z; } public double Z { get; set; } } public class FullPt3D { public FullPt3D() { } public FullPt3D(double x, double y, double z) { X = x; Y = y; Z = z; } public double X { get; set; } public double Y { get; set; } public double Z { get; set; } } Test case private void ProtoBufferTest() { var model = RuntimeTypeModel.Default; model.Add(typeof(Pt2D), false) .Add(1, "X") .Add(2, "Y") .AddSubType(101, typeof(Pt3D)); model[typeof(Pt3D)] .Add(1, "Z"); model.Add(typeof(FullPt3D), false) .Add(1, "X") .Add(2, "Y") .Add(3, "Z"); double x = 5.6050692524784562; double y = 0.74161805247031987; double z = 8.5883424750474937; string filename = "testPt3D.pb"; using (var file = File.Create(filename)) { Serializer.Serialize(file, new Pt3D(x, y, z)); } Console.WriteLine(filename + " length = " + new FileInfo(filename).Length + " bytes") ; filename = "testFullPt3D.pb"; using (var file = File.Create(filename)) { Serializer.Serialize(file, new FullPt3D(x, y, z)); } Console.WriteLine(filename + " length = " + new FileInfo(filename).Length + " bytes"); filename = "testBinaryWriter.bin"; using (var file = File.Create(filename)) { using (var writer = new BinaryWriter(file)) { writer.Write(x); writer.Write(y); writer.Write(z); } } Console.WriteLine(filename + " length = " + new FileInfo(filename).Length + " bytes"); } Test results 1) testPt3D.pb length = 30 bytes 2) testFullPt3D.pb length = 27 bytes 3) testBinaryWriter.bin length = 24 bytes Q1) 24 bytes are used to store the 3 double values and it's ok, but what values are stored in cases 1) and 2) to reach 30 and 27 bytes? (I suppose int values used in model mapping) Q2) I made some tests by changing the SubType mapping for Pt2D but I cannot understand the size changes model.Add(typeof(Pt2D), false) .Add(1, "X") .Add(2, "Y") .AddSubType(3, typeof(Pt3D)); Result: testPt3D.pb length = 29 bytes model.Add(typeof(Pt2D), false) .Add(1, "X") .Add(2, "Y") .AddSubType(21, typeof(Pt3D)); Result: testPt3D.pb length = 30 bytes model.Add(typeof(Pt2D), false) .Add(1, "X") .Add(2, "Y") .AddSubType(1111, typeof(Pt3D)); Result: testPt3D.pb length = 30 bytes I tried to use this tool to better understand, but it gives different bytes conversion results. Why do I get the same size by using 21, 101 or 1111?
1) testPt3D.pb length = 30 bytes (subclass comes first) [field 101, string] = 2 bytes, 3 bits for "string", 7 bits for "101"; varint packs in 7 bit units with a continuation bit, so: 2 bytes (total = 2) [data length "9"] = 1 byte (total = 3) [field 1, fixed 64] = 1 byte (total = 4) [payload 1] = 8 bytes (total = 12) [field 1, fixed 64] = 1 byte (total = 13) [payload 1] = 8 bytes (total = 21) [field 2, fixed 64] = 1 byte (total = 22) [payload 2] = 8 bytes (total = 30) 2) testFullPt3D.pb length = 27 bytes [field 1, fixed 64] = 1 byte (total = 1) [payload 1] = 8 bytes (total = 9) [field 2, fixed 64] = 1 byte (total = 10) [payload 2] = 8 bytes (total = 18) [field 3, fixed 64] = 1 byte (total = 19) [payload 3] = 8 bytes (total = 27) There are other options in protobuf when dealing with repeated data - "packed" and "grouped"; they only make sense when discussing more data than 3 values, though.
Convert a value in string format to long
I have different string values in the format "240.2 KB", "13.8 MB", "675 bytes" and so on. Can anyone help me figure out how to convert these string values to numeric format also taking into consideration the MBs and KBs
Do something like this: public long ConvertDataSize(string str) { string[] parts = str.Split(' '); if (parts.Length != 2) throw new Exception("Unexpected input"); var number_part = parts[0]; double number = Convert.ToDouble(number_part); var unit_part = parts[1]; var bytes_for_unit = GetNumberOfBytesForUnit(unit_part); return Convert.ToInt64(number*bytes_for_unit); } private long GetNumberOfBytesForUnit(string unit) { if (unit.Equals("kb", StringComparison.OrdinalIgnoreCase)) return 1024; if (unit.Equals("mb", StringComparison.OrdinalIgnoreCase)) return 1048576; if (unit.Equals("gb", StringComparison.OrdinalIgnoreCase)) return 1073741824; if (unit.Equals("bytes", StringComparison.OrdinalIgnoreCase)) return 1; //Add more rules here to support more units throw new Exception("Unexpected unit"); } Now, you can use it like this: long result = ConvertDataSize("240.2 KB");
Store the unit factors in a dictionary: Dictionary<string, long> units = new Dictionary<string, long>() { { "bytes", 1L }, { "KB", 1L << 10 }, // kilobytes { "MB", 1L << 20 }, // megabytes { "GB", 1L << 30 }, // gigabytes { "TB", 1L << 40 }, // terabytes { "PB", 1L << 50 }, // petabytes { "EB", 1L << 60 } // exabytes (who knows how much memory we'll get in future!) }; I am using the binary left shift operator in order to get the powers of 2. Don't forget to specify the long specifier "L". Otherwise it will assume int. You get the number of bytes with (I omitted checks for the sake of simplicity): private long ToBytes(string s) { string[] parts = s.Split(' '); decimal n = Decimal.Parse(parts[0]); return (long)(units[parts[1]] * n); }
How can i display in label the real size of a file on the hard disk? [duplicate]
This question already has answers here: How do I get a human-readable file size in bytes abbreviation using .NET? (26 answers) Closed 8 years ago. I'm using this method: public static long GetFileSizeOnDisk(string file) { FileInfo info = new FileInfo(file); uint dummy, sectorsPerCluster, bytesPerSector; int result = GetDiskFreeSpaceW(info.Directory.Root.FullName, out sectorsPerCluster, out bytesPerSector, out dummy, out dummy); if (result == 0) throw new Win32Exception(); uint clusterSize = sectorsPerCluster * bytesPerSector; uint hosize; uint losize = GetCompressedFileSizeW(file, out hosize); long size; size = (long)hosize << 32 | losize; return ((size + clusterSize - 1) / clusterSize) * clusterSize; } And use it like this: label10.Text = GetFileSizeOnDisk(previewFileName).ToString(); The result for example is: 5074944 But what i want it to dispaly is if it's low then mega byte then display as kb and if above then as mb or gigabyte i mean if 5074944 is megabyte then display it for example as: 5,074944 MB Including the MB Or how it known to display/write sizes.
The nice thing about programming is, that repetative tasks can be automated. A solution that automatically calculates the size could be: find file size repeat until size is bigger than 1024 divide size by 1024 store size position loop The code can look like this: private String sizeFormatter(Int64 filesize) { var sizes = new List<String> { "B", "KB", "MB", "GB", "TB", "PB" }; var size = 0; while (filesize > 1024) { filesize /= 1024; size++; } return String.Format("{0}{1}", filesize, sizes[size]); } And the usage: var bigFile = new FileInfo("C:\\oracle\\OracleXE112_Win32.zip"); var smallFile = new FileInfo("C:\\oracle\\ScriptCreateUser.sql"); var verySmallFile = new FileInfo("C:\\ScriptCreateTable.sql"); Console.WriteLine(sizeFormatter(bigFile.Length)); Console.WriteLine(sizeFormatter(smallFile.Length)); Console.WriteLine(sizeFormatter(verySmallFile.Length)); The output is: 312MB 12KB 363B This method could be optimized regarding accuracy, but for the general usage it should be alright.
You should just use some if statements: long size = GetFileSizeOnDisk(previewFileName); if(size > 1024 * 1024 * 1024) { label10.Text = (size / 1024 * 1024 * 1024F).ToString() + " Gb"; } else if(size > 1024 * 1024) { label10.Text = (size / 1024 * 1024F).ToString() + " Mb"; } else if(size > 1024) { label10.Text = (size / 1024F).ToString() + " Kb"; } else { label10.Text = size.ToString(); }
Converting between NTP and C# DateTime
I use the following code to convert between NTP and a C# DateTime. I think the forward corversion is correct, but backwards is wrong. See the following code to convert 8 bytes into a DatTime: Convert NTP to DateTime public static ulong GetMilliSeconds(byte[] ntpTime) { ulong intpart = 0, fractpart = 0; for (var i = 0; i <= 3; i++) intpart = 256 * intpart + ntpTime[i]; for (var i = 4; i <= 7; i++) fractpart = 256 * fractpart + ntpTime[i]; var milliseconds = intpart * 1000 + ((fractpart * 1000) / 0x100000000L); Debug.WriteLine("intpart: " + intpart); Debug.WriteLine("fractpart: " + fractpart); Debug.WriteLine("milliseconds: " + milliseconds); return milliseconds; } public static DateTime ConvertToDateTime(byte[] ntpTime) { var span = TimeSpan.FromMilliseconds(GetMilliSeconds(ntpTime)); var time = new DateTime(1900, 1, 1, 0, 0, 0, DateTimeKind.Utc); time += span; return time; } Convert from DateTime to NTP public static byte[] ConvertToNtp(ulong milliseconds) { ulong intpart = 0, fractpart = 0; var ntpData = new byte[8]; intpart = milliseconds / 1000; fractpart = ((milliseconds % 1000) * 0x100000000L) / 1000; Debug.WriteLine("intpart: " + intpart); Debug.WriteLine("fractpart: " + fractpart); Debug.WriteLine("milliseconds: " + milliseconds); var temp = intpart; for (var i = 3; i >= 0; i--) { ntpData[i] = (byte)(temp % 256); temp = temp / 256; } temp = fractpart; for (var i = 7; i >= 4; i--) { ntpData[i] = (byte)(temp % 256); temp = temp / 256; } return ntpData; } The following input produces the output: bytes = { 131, 170, 126, 128, 46, 197, 205, 234 } var ms = GetMilliSeconds(bytes ); var ntp = ConvertToNtp(ms) //GetMilliSeconds output milliseconds: 2208988800182 intpart: 2208988800 fractpart: 784715242 //ConvertToNtp output milliseconds: 2208988800182 intpart: 2208988800 fractpart: 781684047 Notice that the conversion from milliseconds to fractional part is wrong. Why? Update: As Jonathan S. points out - it's loss of fraction. So instead of converting back and forth, I want to manipulate with the NTP timestamp directly. More specific, add milliseconds to it. I would assume the following function would do just that, but I'm having a hard time validating it. I am very unsure about the fraction-part. public static void AddMilliSeconds(ref byte[] ntpTime, ulong millis) { ulong intpart = 0, fractpart = 0; for (var i = 0; i < 4; i++) intpart = 256 * intpart + ntpTime[i]; for (var i = 4; i <= 7; i++) fractpart = 256 * fractpart + ntpTime[i]; intpart += millis / 1000; fractpart += millis % 1000; var newIntpart = BitConverter.GetBytes(SwapEndianness(intpart)); var newFractpart = BitConverter.GetBytes(SwapEndianness(fractpart)); for (var i = 0; i < 8; i++) { if (i < 4) ntpTime[i] = newIntpart[i]; if (i >= 4) ntpTime[i] = newFractpart[i - 4]; } }
What you're running into here is loss of precision in the conversion from NTP timestamp to milliseconds. When you convert from NTP to milliseconds, you're dropping part of the fraction. When you then take that value and try to convert back, you get a value that's slightly different. You can see this more clearly if you change your ulong values to decimal values, as in this test: public static decimal GetMilliSeconds(byte[] ntpTime) { decimal intpart = 0, fractpart = 0; for (var i = 0; i <= 3; i++) intpart = 256 * intpart + ntpTime[i]; for (var i = 4; i <= 7; i++) fractpart = 256 * fractpart + ntpTime[i]; var milliseconds = intpart * 1000 + ((fractpart * 1000) / 0x100000000L); Console.WriteLine("milliseconds: " + milliseconds); Console.WriteLine("intpart: " + intpart); Console.WriteLine("fractpart: " + fractpart); return milliseconds; } public static byte[] ConvertToNtp(decimal milliseconds) { decimal intpart = 0, fractpart = 0; var ntpData = new byte[8]; intpart = milliseconds / 1000; fractpart = ((milliseconds % 1000) * 0x100000000L) / 1000m; Console.WriteLine("milliseconds: " + milliseconds); Console.WriteLine("intpart: " + intpart); Console.WriteLine("fractpart: " + fractpart); var temp = intpart; for (var i = 3; i >= 0; i--) { ntpData[i] = (byte)(temp % 256); temp = temp / 256; } temp = fractpart; for (var i = 7; i >= 4; i--) { ntpData[i] = (byte)(temp % 256); temp = temp / 256; } return ntpData; } public static void Main(string[] args) { byte[] bytes = { 131, 170, 126, 128, 46, 197, 205, 234 }; var ms = GetMilliSeconds(bytes); Console.WriteLine(); var ntp = ConvertToNtp(ms); } This yields the following result: milliseconds: 2208988800182.7057548798620701 intpart: 2208988800 fractpart: 784715242 milliseconds: 2208988800182.7057548798620701 intpart: 2208988800.1827057548798620701 fractpart: 784715242.0000000000703594496 It's the ~0.7 milliseconds that are screwing things up here. Since the NTP timestamp includes a 32-bit fractional second ("a theoretical resolution of 2^-32 seconds or 233 picoseconds"), a conversion to integer milliseconds will result in a loss of precision. Response to Update: Adding milliseconds to the NTP timestamp wouldn't be quite as simple as adding the integer parts and the fraction parts. Think of adding the decimals 1.75 and 2.75. 0.75 + 0.75 = 1.5, and you'd need to carry the one over to the integer part. Also, the fraction part in the NTP timestamp is not base-10, so you can't just add the milliseconds. Some conversion is necessary, using a proportion like ms / 1000 = ntpfrac / 0x100000000. This is entirely untested, but I'd think you'd want to replace your intpart += and fracpart += lines in AddMilliSeconds to be more like this: intpart += millis / 1000; ulong fractsum = fractpart + (millis % 1000) / 1000 * 0x100000000L); intpart += fractsum / 0x100000000L; fractpart = fractsum % 0x100000000L;
Suggestion To Cameron's Solution: use ntpEpoch = (new DateTime(1900, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc)).Ticks; to make sure that you dont calculate from your local time
Same as others but without division return (ulong)(elapsedTime.Ticks * 1e-7 * 4294967296ul) Or return (ulong)(((long)(elapsedTime.Ticks * 0.0000001) << 32) + (elapsedTime.TotalMilliseconds % 1000 * 4294967296 * 0.001)); //TicksPerPicosecond = 0.0000001m //4294967296 = uint.MaxValue + 1 //0.001 == PicosecondsPerNanosecond The full method would then be: public static System.DateTime UtcEpoch2036 = new System.DateTime(2036, 2, 7, 6, 28, 16, System.DateTimeKind.Utc); public static System.DateTime UtcEpoch1900 = new System.DateTime(1900, 1, 1, 0, 0, 0, System.DateTimeKind.Utc); public static ulong DateTimeToNptTimestamp(ref System.DateTime value/*, bool randomize = false*/) { System.DateTime baseDate = value >= UtcEpoch2036 ? UtcEpoch2036 : UtcEpoch1900; System.TimeSpan elapsedTime = value > baseDate ? value.ToUniversalTime() - baseDate.ToUniversalTime() : baseDate.ToUniversalTime() - value.ToUniversalTime(); //Media.Common.Extensions.TimeSpan.TimeSpanExtensions.MicrosecondsPerMillisecond = 1000 //TicksPerPicosecond = 0.0000001m = 1e-7 //4294967296 = uint.MaxValue + 1 //0.001 == PicosecondsPerNanosecond = 1e-3 //429496.7296 Picoseconds = 4.294967296e-7 Seconds //4.294967296e-7 * 1000 Milliseconds per second = 0.0004294967296 * 1e+9 (PicosecondsPerMilisecond) = 429.4967296 //0.4294967296 nanoseconds * 100 nanoseconds = 1 tick = 42.94967296 * 10000 ticks per millisecond = 429496.7296 / 1000 = 429.49672960000004 unchecked { //return (ulong)((long)(elapsedTime.Ticks * 0.0000001m) << 32 | (long)((decimal)elapsedTime.TotalMilliseconds % 1000 * 4294967296m * 0.001m)); //return (ulong)(((long)(elapsedTime.Ticks * 0.0000001m) << 32) + (elapsedTime.TotalMilliseconds % 1000 * 4294967296ul * 0.001)); //return (ulong)(elapsedTime.Ticks * 1e-7 * 4294967296ul); //ie-7 * 4294967296ul = 429.4967296 has random diff which complies better? (In order to minimize bias and help make timestamps unpredictable to an intruder, the non - significant bits should be set to an unbiased random bit string.) //return (ulong)(elapsedTime.Ticks * 429.4967296m);//decimal precision is better but we still lose precision because of the magnitude? 0.001 msec dif ((ulong)(elapsedTime.Ticks * 429.4967296000000000429m)) //429.49672960000004m has reliable 003 msec diff //Has 0 diff but causes fraction to be different from examples... //return (ulong)((elapsedTime.Ticks + 1) * 429.4967296m); //Also adding + 429ul; return (ulong)(elapsedTime.Ticks * 429.496729600000000000429m); //var ticks = (ulong)(elapsedTime.Ticks * 429.496729600000000000429m); //Has 0 diff on .137 measures otherwise 0.001 msec or 1 tick, keeps the examples the same. //if(randomize) ticks ^= (ulong)(Utility.Random.Next() & byte.MaxValue); //return ticks; } Where as the reverse would be: public static System.DateTime NptTimestampToDateTime(ref uint seconds, ref uint fractions, System.DateTime? epoch = null) { //Convert to ticks //ulong ticks = (ulong)((seconds * System.TimeSpan.TicksPerSecond) + ((fractions * System.TimeSpan.TicksPerSecond) / 0x100000000L)); //uint.MaxValue + 1 unchecked { //Convert to ticks, //'UtcEpoch1900.AddTicks(seconds * System.TimeSpan.TicksPerSecond + ((long)(fractions * 1e+12))).Millisecond' threw an exception of type 'System.ArgumentOutOfRangeException' //0.01 millisecond = 1e+7 picseconds = 10000 nanoseconds //10000 nanoseconds = 10 micros = 10000000 pioseconds //0.001 Centisecond = 10 Microsecond //1 Tick = 0.1 Microsecond //0.1 * 100 Nanos Per Tick = 100 //TenMicrosecondsPerPicosecond = 10000000 = TimeSpan.TicksPerSecond = 10000000 //System.TimeSpan.TicksPerSecond is fine here also... long ticks = seconds * System.TimeSpan.TicksPerSecond + ((long)(fractions * Media.Common.Extensions.TimeSpan.TimeSpanExtensions.TenMicrosecondsPerPicosecond) >> Common.Binary.BitsPerInteger); //Return the result of adding the ticks to the epoch //If the epoch was given then use that value otherwise determine the epoch based on the highest bit. return epoch.HasValue ? epoch.Value.AddTicks(ticks) : (seconds & 0x80000000L) == 0 ? UtcEpoch2036.AddTicks(ticks) : UtcEpoch1900.AddTicks(ticks); } }
DateTime ticks to NTP and back. static long ntpEpoch = (new DateTime(1900, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc)).Ticks; static public long Ntp2Ticks(UInt64 a) { var b = (decimal)a * 1e7m / (1UL << 32); return (long)b + ntpEpoch; } static public UInt64 Ticks2Ntp(long a) { decimal b = a - ntpEpoch; b = (decimal)b / 1e7m * (1UL << 32); return (UInt64)b; }
Does .NET provide an easy way convert bytes to KB, MB, GB, etc.?
Just wondering if .NET provides a clean way to do this: int64 x = 1000000; string y = null; if (x / 1024 == 0) { y = x + " bytes"; } else if (x / (1024 * 1024) == 0) { y = string.Format("{0:n1} KB", x / 1024f); } etc...
Here is a fairly concise way to do this: static readonly string[] SizeSuffixes = { "bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" }; static string SizeSuffix(Int64 value, int decimalPlaces = 1) { if (decimalPlaces < 0) { throw new ArgumentOutOfRangeException("decimalPlaces"); } if (value < 0) { return "-" + SizeSuffix(-value, decimalPlaces); } if (value == 0) { return string.Format("{0:n" + decimalPlaces + "} bytes", 0); } // mag is 0 for bytes, 1 for KB, 2, for MB, etc. int mag = (int)Math.Log(value, 1024); // 1L << (mag * 10) == 2 ^ (10 * mag) // [i.e. the number of bytes in the unit corresponding to mag] decimal adjustedSize = (decimal)value / (1L << (mag * 10)); // make adjustment when the value is large enough that // it would round up to 1000 or more if (Math.Round(adjustedSize, decimalPlaces) >= 1000) { mag += 1; adjustedSize /= 1024; } return string.Format("{0:n" + decimalPlaces + "} {1}", adjustedSize, SizeSuffixes[mag]); } And here's the original implementation I suggested, which may be marginally slower, but a bit easier to follow: static readonly string[] SizeSuffixes = { "bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" }; static string SizeSuffix(Int64 value, int decimalPlaces = 1) { if (value < 0) { return "-" + SizeSuffix(-value, decimalPlaces); } int i = 0; decimal dValue = (decimal)value; while (Math.Round(dValue, decimalPlaces) >= 1000) { dValue /= 1024; i++; } return string.Format("{0:n" + decimalPlaces + "} {1}", dValue, SizeSuffixes[i]); } Console.WriteLine(SizeSuffix(100005000L)); One thing to bear in mind - in SI notation, "kilo" usually uses a lowercase k while all of the larger units use a capital letter. Windows uses KB, MB, GB, so I have used KB above, but you may consider kB instead.
Checkout the ByteSize library. It's the System.TimeSpan for bytes! It handles the conversion and formatting for you. var maxFileSize = ByteSize.FromKiloBytes(10); maxFileSize.Bytes; maxFileSize.MegaBytes; maxFileSize.GigaBytes; It also does string representation and parsing. // ToString ByteSize.FromKiloBytes(1024).ToString(); // 1 MB ByteSize.FromGigabytes(.5).ToString(); // 512 MB ByteSize.FromGigabytes(1024).ToString(); // 1 TB // Parsing ByteSize.Parse("5b"); ByteSize.Parse("1.55B");
I would solve it using Extension methods, Math.Pow function and Enums: public static class MyExtension { public enum SizeUnits { Byte, KB, MB, GB, TB, PB, EB, ZB, YB } public static string ToSize(this Int64 value, SizeUnits unit) { return (value / (double)Math.Pow(1024, (Int64)unit)).ToString("0.00"); } } and use it like: string h = x.ToSize(MyExtension.SizeUnits.KB);
Since everyone else is posting their methods, I figured I'd post the extension method I usually use for this: EDIT: added int/long variants...and fixed a copypasta typo... public static class Ext { private const long OneKb = 1024; private const long OneMb = OneKb * 1024; private const long OneGb = OneMb * 1024; private const long OneTb = OneGb * 1024; public static string ToPrettySize(this int value, int decimalPlaces = 0) { return ((long)value).ToPrettySize(decimalPlaces); } public static string ToPrettySize(this long value, int decimalPlaces = 0) { var asTb = Math.Round((double)value / OneTb, decimalPlaces); var asGb = Math.Round((double)value / OneGb, decimalPlaces); var asMb = Math.Round((double)value / OneMb, decimalPlaces); var asKb = Math.Round((double)value / OneKb, decimalPlaces); string chosenValue = asTb > 1 ? string.Format("{0}Tb",asTb) : asGb > 1 ? string.Format("{0}Gb",asGb) : asMb > 1 ? string.Format("{0}Mb",asMb) : asKb > 1 ? string.Format("{0}Kb",asKb) : string.Format("{0}B", Math.Round((double)value, decimalPlaces)); return chosenValue; } }
I know this is old thread already. but maybe someone will look for solution. And here's what I use and the easiest way public static string FormatFileSize(long bytes) { var unit = 1024; if (bytes < unit) { return $"{bytes} B"; } var exp = (int)(Math.Log(bytes) / Math.Log(unit)); return $"{bytes / Math.Pow(unit, exp):F2} {("KMGTPE")[exp - 1]}B"; } Get folder size (for example usage) public static long GetFolderSize(string path, string ext, bool AllDir) { var option = AllDir ? SearchOption.AllDirectories : SearchOption.TopDirectoryOnly; return new DirectoryInfo(path).EnumerateFiles("*" + ext, option).Sum(file => file.Length); } EXAMPLE USAGE: public static void TEST() { string folder = #"C:\Users\User\Videos"; var bytes = GetFolderSize(folder, "mp4", true); //or GetFolderSize(folder, "mp4", false) to get all single folder only var totalFileSize = FormatFileSize(bytes); Console.WriteLine(totalFileSize); }
The short version of the most voted answer has problems with TB values. I adjusted it appropriately to handle also tb values and still without a loop and also added a little error checking for negative values. Here's my solution: static readonly string[] SizeSuffixes = { "bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" }; static string SizeSuffix(long value, int decimalPlaces = 0) { if (value < 0) { throw new ArgumentException("Bytes should not be negative", "value"); } var mag = (int)Math.Max(0, Math.Log(value, 1024)); var adjustedSize = Math.Round(value / Math.Pow(1024, mag), decimalPlaces); return String.Format("{0} {1}", adjustedSize, SizeSuffixes[mag]); }
#Servy's answer was nice and succinct. I think it can be even simpler? private static string[] suffixes = new [] { " B", " KB", " MB", " GB", " TB", " PB" }; public static string ToSize(double number, int precision = 2) { // unit's number of bytes const double unit = 1024; // suffix counter int i = 0; // as long as we're bigger than a unit, keep going while(number > unit) { number /= unit; i++; } // apply precision and current suffix return Math.Round(number, precision) + suffixes[i]; }
Updated for C# 9.0 Relational Patterns public const long OneKB = 1024; public const long OneMB = OneKB * OneKB; public const long OneGB = OneMB * OneKB; public const long OneTB = OneGB * OneKB; public static string BytesToHumanReadable(ulong bytes) { return bytes switch { (< OneKB) => $"{bytes}B", (>= OneKB) and (< OneMB) => $"{bytes / OneKB}KB", (>= OneMB) and (< OneGB) => $"{bytes / OneMB}MB", (>= OneGB) and (< OneTB) => $"{bytes / OneMB}GB", (>= OneTB) => $"{bytes / OneTB}" //... }; }
No. Mostly because it's of a rather niche need, and there are too many possible variations. (Is it "KB", "Kb" or "Ko"? Is a megabyte 1024 * 1024 bytes, or 1024 * 1000 bytes? -- yes, some places use that!)
Here is an option that's easier to extend than yours, but no, there is none built into the library itself. private static List<string> suffixes = new List<string> { " B", " KB", " MB", " GB", " TB", " PB" }; public static string Foo(int number) { for (int i = 0; i < suffixes.Count; i++) { int temp = number / (int)Math.Pow(1024, i + 1); if (temp == 0) return (number / (int)Math.Pow(1024, i)) + suffixes[i]; } return number.ToString(); }
private string GetFileSize(double byteCount) { string size = "0 Bytes"; if (byteCount >= 1073741824.0) size = String.Format("{0:##.##}", byteCount / 1073741824.0) + " GB"; else if (byteCount >= 1048576.0) size = String.Format("{0:##.##}", byteCount / 1048576.0) + " MB"; else if (byteCount >= 1024.0) size = String.Format("{0:##.##}", byteCount / 1024.0) + " KB"; else if (byteCount > 0 && byteCount < 1024.0) size = byteCount.ToString() + " Bytes"; return size; } private void btnBrowse_Click(object sender, EventArgs e) { if (openFile1.ShowDialog() == DialogResult.OK) { FileInfo thisFile = new FileInfo(openFile1.FileName); string info = ""; info += "File: " + Path.GetFileName(openFile1.FileName); info += Environment.NewLine; info += "File Size: " + GetFileSize((int)thisFile.Length); label1.Text = info; } } This is one way to do it aswell (The number 1073741824.0 is from 1024*1024*1024 aka GB)
Based on NeverHopeless's elegant solution: private static readonly KeyValuePair<long, string>[] Thresholds = { // new KeyValuePair<long, string>(0, " Bytes"), // Don't devide by Zero! new KeyValuePair<long, string>(1, " Byte"), new KeyValuePair<long, string>(2, " Bytes"), new KeyValuePair<long, string>(1024, " KB"), new KeyValuePair<long, string>(1048576, " MB"), // Note: 1024 ^ 2 = 1026 (xor operator) new KeyValuePair<long, string>(1073741824, " GB"), new KeyValuePair<long, string>(1099511627776, " TB"), new KeyValuePair<long, string>(1125899906842620, " PB"), new KeyValuePair<long, string>(1152921504606850000, " EB"), // These don't fit into a int64 // new KeyValuePair<long, string>(1180591620717410000000, " ZB"), // new KeyValuePair<long, string>(1208925819614630000000000, " YB") }; /// <summary> /// Returns x Bytes, kB, Mb, etc... /// </summary> public static string ToByteSize(this long value) { if (value == 0) return "0 Bytes"; // zero is plural for (int t = Thresholds.Length - 1; t > 0; t--) if (value >= Thresholds[t].Key) return ((double)value / Thresholds[t].Key).ToString("0.00") + Thresholds[t].Value; return "-" + ToByteSize(-value); // negative bytes (common case optimised to the end of this routine) } Maybe there are excessive comments, but I tend to leave them to prevent myself from making the same mistakes over on future visits...
No. But you can implement like this; static double ConvertBytesToMegabytes(long bytes) { return (bytes / 1024f) / 1024f; } static double ConvertKilobytesToMegabytes(long kilobytes) { return kilobytes / 1024f; } Also check out How to correctly convert filesize in bytes into mega or gigabytes?
I have combined some of the answers here into two methods that work great. The second method below will convert from a bytes string (like 1.5.1 GB) back to bytes (like 1621350140) as a long type value. I hope this is useful to others looking for a solution to convert bytes to a string and back into bytes. public static string BytesAsString(float bytes) { string[] suffix = { "B", "KB", "MB", "GB", "TB" }; int i; double doubleBytes = 0; for (i = 0; (int)(bytes / 1024) > 0; i++, bytes /= 1024) { doubleBytes = bytes / 1024.0; } return string.Format("{0:0.00} {1}", doubleBytes, suffix[i]); } public static long StringAsBytes(string bytesString) { if (string.IsNullOrEmpty(bytesString)) { return 0; } const long OneKb = 1024; const long OneMb = OneKb * 1024; const long OneGb = OneMb * 1024; const long OneTb = OneGb * 1024; double returnValue; string suffix = string.Empty; if (bytesString.IndexOf(" ") > 0) { returnValue = float.Parse(bytesString.Substring(0, bytesString.IndexOf(" "))); suffix = bytesString.Substring(bytesString.IndexOf(" ") + 1).ToUpperInvariant(); } else { returnValue = float.Parse(bytesString.Substring(0, bytesString.Length - 2)); suffix = bytesString.ToUpperInvariant().Substring(bytesString.Length - 2); } switch (suffix) { case "KB": { returnValue *= OneKb; break; } case "MB": { returnValue *= OneMb; break; } case "GB": { returnValue *= OneGb; break; } case "TB": { returnValue *= OneTb; break; } default: { break; } } return Convert.ToInt64(returnValue); }
I went for JerKimballs solution, and thumbs up to that. However, I would like to add / point out that this is indeed a matter of controversy as a whole. In my research (for other reasons) I have come up with the following pieces of information. When normal people (I have heard they exist) speak of gigabytes they refer to the metric system wherein 1000 to the power of 3 from the original number of bytes == the number of gigabytes. However, of course there is the IEC / JEDEC standards which is nicely summed up in wikipedia, which instead of 1000 to the power of x they have 1024. Which for physical storage devices (and I guess logical such as amazon and others) means an ever increasing difference between metric vs IEC. So for instance 1 TB == 1 terabyte metric is 1000 to the power of 4, but IEC officially terms the similar number as 1 TiB, tebibyte as 1024 to the power of 4. But, alas, in non-technical applications (I would go by audience) the norm is metric, and in my own app for internal use currently I explain the difference in documentation. But for display purposes I do not even offer anything but metric. Internally even though it's not relevant in my app I only store bytes and do the calculation for display. As a side note I find it somewhat lackluster that the .Net framework AFAIK (and I am frequently wrong thank the powers that be) even in it's 4.5 incarnation does not contain anything about this in any libraries internally. One would expect an open source library of some kind to be NuGettable at some point, but I admit this is a small peeve. On the other hand System.IO.DriveInfo and others also only have bytes (as long) which is rather clear.
How about some recursion: private static string ReturnSize(double size, string sizeLabel) { if (size > 1024) { if (sizeLabel.Length == 0) return ReturnSize(size / 1024, "KB"); else if (sizeLabel == "KB") return ReturnSize(size / 1024, "MB"); else if (sizeLabel == "MB") return ReturnSize(size / 1024, "GB"); else if (sizeLabel == "GB") return ReturnSize(size / 1024, "TB"); else return ReturnSize(size / 1024, "PB"); } else { if (sizeLabel.Length > 0) return string.Concat(size.ToString("0.00"), sizeLabel); else return string.Concat(size.ToString("0.00"), "Bytes"); } } Then you can call it: ReturnSize(size, string.Empty);
I recently needed this and required to convert the in bytes to a number in long. Usage: Byte.Kb.ToLong(1) should give 1024. public enum Byte { Kb, Mb, Gb, Tb } public static class ByteSize { private const long OneKb = 1024; private const long OneMb = OneKb * 1024; private const long OneGb = OneMb * 1024; private const long OneTb = OneGb * 1024; public static long ToLong(this Byte size, int value) { return size switch { Byte.Kb => value * OneKb, Byte.Mb => value * OneMb, Byte.Gb => value * OneGb, Byte.Tb => value * OneTb, _ => throw new NotImplementedException("This should never be hit.") }; } } Tests using xunit: [Theory] [InlineData(Byte.Kb, 1, 1024)] [InlineData(Byte.Kb, 2, 2048)] [InlineData(Byte.Mb, 1, 1048576)] [InlineData(Byte.Mb, 2, 2097152)] [InlineData(Byte.Gb, 1, 1073741824)] [InlineData(Byte.Gb, 2, 2147483648)] [InlineData(Byte.Tb, 1, 1099511627776)] [InlineData(Byte.Tb, 2, 2199023255552)] public void ToLong_WhenConverting_ShouldMatchExpected(Byte size, int value, long expected) { var result = size.ToLong(value); result.Should().Be(expected); }
How about: public void printMB(uint sizekB) { double sizeMB = (double) sizekB / 1024; Console.WriteLine("Size is " + sizeMB.ToString("0.00") + "MB"); } E.g. call like printMB(123456); Will result in output "Size is 120,56 MB"
public static class MyExtension { public static string ToPrettySize(this float Size) { return ConvertToPrettySize(Size, 0); } public static string ToPrettySize(this int Size) { return ConvertToPrettySize(Size, 0); } private static string ConvertToPrettySize(float Size, int R) { float F = Size / 1024f; if (F < 1) { switch (R) { case 0: return string.Format("{0:0.00} byte", Size); case 1: return string.Format("{0:0.00} kb", Size); case 2: return string.Format("{0:0.00} mb", Size); case 3: return string.Format("{0:0.00} gb", Size); } } return ConvertToPrettySize(F, ++R); } }
As posted above, the recursion is the favorite way, with the help of logarithm. The following function has 3 arguments : the input, the dimension constraint of the output, that is the third argument. int ByteReDim(unsigned long ival, int constraint, unsigned long *oval) { int base = 1 + (int) log10(ival); (*oval) = ival; if (base > constraint) { (*oval) = (*oval) >> 10; return(1 + ByteReDim((*oval), constraint, oval)); } else return(0); } Now let's convert 12GB of RAM in several units: int main(void) { unsigned long RAM; int unit; // index of below symbols array char symbol[5] = {'B', 'K', 'M', 'G', 'T'}; unit = ByteReDim(12884901888, 12, &RAM); printf("%lu%c\n", RAM, symbol[unit]); // output is 12884901888B unit = ByteReDim(12884901888, 9, &RAM); printf("%lu%c\n", RAM, symbol[unit]); // output is 12582912K unit = ByteReDim(12884901888, 6, &RAM); printf("%lu%c\n", RAM, symbol[unit]); // output is 12288M unit = ByteReDim(12884901888, 3, &RAM); printf("%lu%c\n", RAM, symbol[unit]); // output is 12G }
I use this for Windows (binary prefixes): static readonly string[] BinaryPrefix = { "bytes", "KB", "MB", "GB", "TB" }; // , "PB", "EB", "ZB", "YB" string GetMemoryString(double bytes) { int counter = 0; double value = bytes; string text = ""; do { text = value.ToString("0.0") + " " + BinaryPrefix[counter]; value /= 1024; counter++; } while (Math.Floor(value) > 0 && counter < BinaryPrefix.Length); return text; }
I have incorporated this (with little to no modification) into a UWP DataBinding Converter for my project and thought it might also be useful to others. The code is: using System; using System.Text; using Windows.UI.Xaml.Data; namespace MyApp.Converters { public class ByteSizeConverter : IValueConverter { static readonly string[] sSizeSuffixes = { "bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" }; // The number of decimal places the formatter should include in the scaled output - default 1dp public int DecimalPlaces { get; set; } = 1; public object Convert(object value, Type targetType, object parameter, string language) { Int64 intVal = System.Convert.ToInt64(value); return SizeSuffix(intVal); } public object ConvertBack(object value, Type targetType, object parameter, string language) { // TODO: Parse string into number and suffix // Scale number by suffix multiplier to get bytes throw new NotImplementedException(); } string SizeSuffix(Int64 value) { if (this.DecimalPlaces < 0) { throw new ArgumentOutOfRangeException(String.Format("DecimalPlaces = {0}", this.DecimalPlaces)); } if (value < 0) { return "-" + SizeSuffix(-value); } if (value == 0) { return string.Format("{0:n" + this.DecimalPlaces + "} bytes", 0); } // magnitude is 0 for bytes, 1 for KB, 2, for MB, etc. int magnitude = (int)Math.Log(value, 1024); // clip magnitude - only 8 values currently supported, this prevents out-of-bounds exception magnitude = Math.Min(magnitude, 8); // 1L << (magnitude * 10) == 2 ^ (10 * magnitude) [i.e. the number of bytes in the unit corresponding to magnitude] decimal adjustedSize = (decimal)value / (1L << (magnitude * 10)); // make adjustment when the value is large enough that it would round up to 1000 or more if (Math.Round(adjustedSize, this.DecimalPlaces) >= 1000) { magnitude += 1; adjustedSize /= 1024; } return String.Format("{0:n" + this.DecimalPlaces + "} {1}", adjustedSize, sSizeSuffixes[magnitude]); } } } To use it, add a local resource to your UserControl or Page XAML: <UserControl.Resources> <converters:ByteSizeConverter x:Key="ByteFormat" DecimalPlaces="3" /> </UserControl.Resources> Reference it in a data binding template or data binding instance: <TextBlock HorizontalAlignment="Left" VerticalAlignment="Center" Text="{x:Bind MyItem.FileSize_bytes, Mode=OneWay, Converter={StaticResource ByteFormat}}" /> And hey presto. The magic happens.
https://github.com/logary/logary/blob/master/src/Logary/DataModel.fs#L832-L837 let scaleBytes (value : float) : float * string = let log2 x = log x / log 2. let prefixes = [| ""; "Ki"; "Mi"; "Gi"; "Ti"; "Pi" |] // note the capital K and the 'i' let index = int (log2 value) / 10 1. / 2.**(float index * 10.), sprintf "%s%s" prefixes.[index] (Units.symbol Bytes) (DISCLAIMER: I wrote this code, even the code in the link!)
Here's my spin on #drzaus's answer. I modified it to use rounding errors to our advantage and correctly manage issues around unit boundaries. It also handles negative values. Drop this C# Program into LinqPad: // Kudos: https://stackoverflow.com/a/48467634/117797 void Main() { 0.ToFriendly().Dump(); // 0 B 857.ToFriendly().Dump(); // 857 B (173*1024).ToFriendly().Dump(); // 173 KB (9541*1024).ToFriendly().Dump(); // 9.32 MB (5261890L*1024).ToFriendly().Dump(); // 5.02 GB 1.ToFriendly().Dump(); // 1 B 1024.ToFriendly().Dump(); // 1 KB 1048576.ToFriendly().Dump(); // 1 MB 1073741824.ToFriendly().Dump(); // 1 GB 1099511627776.ToFriendly().Dump(); // 1 TB 1125899906842620.ToFriendly().Dump(); // 1 PB 1152921504606850000.ToFriendly().Dump(); // 1 EB } public static class Extensions { static string[] _byteUnits = new[] { "B", "KB", "MB", "GB", "TB", "PB", "EB" }; public static string ToFriendly(this int number, int decimals = 2) { return ((double)number).ToFriendly(decimals); } public static string ToFriendly(this long number, int decimals = 2) { return ((double)number).ToFriendly(decimals); } public static string ToFriendly(this double number, int decimals = 2) { const double divisor = 1024; int unitIndex = 0; var sign = number < 0 ? "-" : string.Empty; var value = Math.Abs(number); double lastValue = number; while (value > 1) { lastValue = value; // NOTE // The following introduces ever increasing rounding errors, but at these scales we don't care. // It also means we don't have to deal with problematic rounding errors due to dividing doubles. value = Math.Round(value / divisor, decimals); unitIndex++; } if (value < 1 && number != 0) { value = lastValue; unitIndex--; } return $"{sign}{value} {_byteUnits[unitIndex]}"; } } Output is: 0 B 857 B 173 KB 9.32 MB 1.34 MB 5.02 GB 1 B 1 KB 1 MB 1 GB 1 TB 1 PB 1 EB
string Convert(float bytes) { string[] Group = { "Bytes", "KB", "MB", "GB", "TB"}; float B = bytes; int G = 0; while (B >= 1024 && G < 5) { B /= 1024; G += 1; } float truncated = (float)(Math.Truncate((double)B * 100.0) / 100.0); string load = (truncated + " " + Group[G]); return load; }
This is how I do it. Console.Write(FileSizeInBytes > 1048576 ? FileSizeInBytes / 1048576f + " MB" : FileSizeInBytes / 1024f + " KB"); //1048576 = 1024 * 1024
I combined zackmark15's code into an all-purpose file or directory measuring approach: public static string PathSize(string path) { if (String.IsNullOrEmpty(path)) throw new ArgumentNullException(nameof(path)); long bytes; if (File.Exists(path)) bytes = new FileInfo(path).Length; else if (Directory.Exists(path)) bytes = new DirectoryInfo(path).EnumerateFiles("*", SearchOption.AllDirectories).Sum(fileInfo => fileInfo.Length); else throw new ArgumentException("Path does not exist.", nameof(path)); const long UNIT = 1024L; if (bytes < UNIT) return $"{bytes} bytes"; var exp = (int)(Math.Log(bytes) / Math.Log(UNIT)); return $"{bytes / Math.Pow(UNIT, exp):F2} {("KMGTPE")[exp - 1]}B"; }