Cut out Image from Screen and Save [duplicate] - c#

I'm looking for .NET code which performs the same as Snipping Tool - capturing a screen area.
I believe it uses hooks. Would be interesting to know how does it highlight the selected fragment.
Update:
Found http://www.codeproject.com/KB/vb/Screen_Shot.aspx . Though people say it's missing some important files for proper compilation.

The snipping tool effect isn't difficult to implement in Windows Forms. Add a new form to your project and name it "SnippingTool". Make the code look like this:
using System;
using System.Drawing;
using System.Drawing.Drawing2D;
using System.Windows.Forms;
namespace WindowsFormsApplication1 {
public partial class SnippingTool : Form {
public static Image Snip() {
var rc = Screen.PrimaryScreen.Bounds;
using (Bitmap bmp = new Bitmap(rc.Width, rc.Height, System.Drawing.Imaging.PixelFormat.Format32bppPArgb)) {
using (Graphics gr = Graphics.FromImage(bmp))
gr.CopyFromScreen(0, 0, 0, 0, bmp.Size);
using (var snipper = new SnippingTool(bmp)) {
if (snipper.ShowDialog() == DialogResult.OK) {
return snipper.Image;
}
}
return null;
}
}
public SnippingTool(Image screenShot) {
InitializeComponent();
this.BackgroundImage = screenShot;
this.ShowInTaskbar = false;
this.FormBorderStyle = FormBorderStyle.None;
this.WindowState = FormWindowState.Maximized;
this.DoubleBuffered = true;
}
public Image Image { get; set; }
private Rectangle rcSelect = new Rectangle();
private Point pntStart;
protected override void OnMouseDown(MouseEventArgs e) {
// Start the snip on mouse down
if (e.Button != MouseButtons.Left) return;
pntStart = e.Location;
rcSelect = new Rectangle(e.Location, new Size(0, 0));
this.Invalidate();
}
protected override void OnMouseMove(MouseEventArgs e) {
// Modify the selection on mouse move
if (e.Button != MouseButtons.Left) return;
int x1 = Math.Min(e.X, pntStart.X);
int y1 = Math.Min(e.Y, pntStart.Y);
int x2 = Math.Max(e.X, pntStart.X);
int y2 = Math.Max(e.Y, pntStart.Y);
rcSelect = new Rectangle(x1, y1, x2 - x1, y2 - y1);
this.Invalidate();
}
protected override void OnMouseUp(MouseEventArgs e) {
// Complete the snip on mouse-up
if (rcSelect.Width <= 0 || rcSelect.Height <= 0) return;
Image = new Bitmap(rcSelect.Width, rcSelect.Height);
using (Graphics gr = Graphics.FromImage(Image)) {
gr.DrawImage(this.BackgroundImage, new Rectangle(0, 0, Image.Width, Image.Height),
rcSelect, GraphicsUnit.Pixel);
}
DialogResult = DialogResult.OK;
}
protected override void OnPaint(PaintEventArgs e) {
// Draw the current selection
using (Brush br = new SolidBrush(Color.FromArgb(120, Color.White))) {
int x1 = rcSelect.X; int x2 = rcSelect.X + rcSelect.Width;
int y1 = rcSelect.Y; int y2 = rcSelect.Y + rcSelect.Height;
e.Graphics.FillRectangle(br, new Rectangle(0, 0, x1, this.Height));
e.Graphics.FillRectangle(br, new Rectangle(x2, 0, this.Width - x2, this.Height));
e.Graphics.FillRectangle(br, new Rectangle(x1, 0, x2 - x1, y1));
e.Graphics.FillRectangle(br, new Rectangle(x1, y2, x2 - x1, this.Height - y2));
}
using (Pen pen = new Pen(Color.Red, 3)) {
e.Graphics.DrawRectangle(pen, rcSelect);
}
}
protected override bool ProcessCmdKey(ref Message msg, Keys keyData) {
// Allow canceling the snip with the Escape key
if (keyData == Keys.Escape) this.DialogResult = DialogResult.Cancel;
return base.ProcessCmdKey(ref msg, keyData);
}
}
}
Usage:
var bmp = SnippingTool.Snip();
if (bmp != null) {
// Do something with the bitmap
//...
}

This is a modified #Hans's version that is compatible with multiple monitors and works well with DPI scaling (tested on Windows 7 and Windows 10).
public sealed partial class SnippingTool : Form
{
public static event EventHandler Cancel;
public static event EventHandler AreaSelected;
public static Image Image { get; set; }
private static SnippingTool[] _forms;
private Rectangle _rectSelection;
private Point _pointStart;
public SnippingTool(Image screenShot, int x, int y, int width, int height)
{
InitializeComponent();
BackgroundImage = screenShot;
BackgroundImageLayout = ImageLayout.Stretch;
ShowInTaskbar = false;
FormBorderStyle = FormBorderStyle.None;
StartPosition = FormStartPosition.Manual;
SetBounds(x, y, width, height);
WindowState = FormWindowState.Maximized;
DoubleBuffered = true;
Cursor = Cursors.Cross;
TopMost = true;
}
private void OnCancel(EventArgs e)
{
Cancel?.Invoke(this, e);
}
private void OnAreaSelected(EventArgs e)
{
AreaSelected?.Invoke(this, e);
}
private void CloseForms()
{
for (int i = 0; i < _forms.Length; i++)
{
_forms[i].Dispose();
}
}
public static void Snip()
{
var screens = ScreenHelper.GetMonitorsInfo();
_forms = new SnippingTool[screens.Count];
for (int i = 0; i < screens.Count; i++)
{
int hRes = screens[i].HorizontalResolution;
int vRes = screens[i].VerticalResolution;
int top = screens[i].MonitorArea.Top;
int left = screens[i].MonitorArea.Left;
var bmp = new Bitmap(hRes, vRes, PixelFormat.Format32bppPArgb);
using (var g = Graphics.FromImage(bmp))
{
g.CopyFromScreen(left, top, 0, 0, bmp.Size);
}
_forms[i] = new SnippingTool(bmp, left, top, hRes, vRes);
_forms[i].Show();
}
}
#region Overrides
protected override void OnMouseDown(MouseEventArgs e)
{
// Start the snip on mouse down
if (e.Button != MouseButtons.Left)
{
return;
}
_pointStart = e.Location;
_rectSelection = new Rectangle(e.Location, new Size(0, 0));
Invalidate();
}
protected override void OnMouseMove(MouseEventArgs e)
{
// Modify the selection on mouse move
if (e.Button != MouseButtons.Left)
{
return;
}
int x1 = Math.Min(e.X, _pointStart.X);
int y1 = Math.Min(e.Y, _pointStart.Y);
int x2 = Math.Max(e.X, _pointStart.X);
int y2 = Math.Max(e.Y, _pointStart.Y);
_rectSelection = new Rectangle(x1, y1, x2 - x1, y2 - y1);
Invalidate();
}
protected override void OnMouseUp(MouseEventArgs e)
{
// Complete the snip on mouse-up
if (_rectSelection.Width <= 0 || _rectSelection.Height <= 0)
{
CloseForms();
OnCancel(new EventArgs());
return;
}
Image = new Bitmap(_rectSelection.Width, _rectSelection.Height);
var hScale = BackgroundImage.Width / (double)Width;
var vScale = BackgroundImage.Height / (double)Height;
using (Graphics gr = Graphics.FromImage(Image))
{
gr.DrawImage(BackgroundImage,
new Rectangle(0, 0, Image.Width, Image.Height),
new Rectangle((int)(_rectSelection.X * hScale), (int)(_rectSelection.Y * vScale), (int)(_rectSelection.Width * hScale), (int)(_rectSelection.Height * vScale)),
GraphicsUnit.Pixel);
}
CloseForms();
OnAreaSelected(new EventArgs());
}
protected override void OnPaint(PaintEventArgs e)
{
// Draw the current selection
using (Brush br = new SolidBrush(Color.FromArgb(120, Color.White)))
{
int x1 = _rectSelection.X;
int x2 = _rectSelection.X + _rectSelection.Width;
int y1 = _rectSelection.Y;
int y2 = _rectSelection.Y + _rectSelection.Height;
e.Graphics.FillRectangle(br, new Rectangle(0, 0, x1, Height));
e.Graphics.FillRectangle(br, new Rectangle(x2, 0, Width - x2, Height));
e.Graphics.FillRectangle(br, new Rectangle(x1, 0, x2 - x1, y1));
e.Graphics.FillRectangle(br, new Rectangle(x1, y2, x2 - x1, Height - y2));
}
using (Pen pen = new Pen(Color.Red, 2))
{
e.Graphics.DrawRectangle(pen, _rectSelection);
}
}
protected override bool ProcessCmdKey(ref Message msg, Keys keyData)
{
// Allow canceling the snip with the Escape key
if (keyData == Keys.Escape)
{
Image = null;
CloseForms();
OnCancel(new EventArgs());
}
return base.ProcessCmdKey(ref msg, keyData);
}
#endregion
}
Usage:
SnippingTool.AreaSelected += OnAreaSelected;
SnippingTool.Snip();
private static void OnAreaSelected(object sender, EventArgs e)
{
var bmp = SnippingTool.Image;
// Do something with the bitmap
//...
}
Note you need a helper class to get the actual monitor resolution and avoid problems with DPI scaling.
This is the code:
public class DeviceInfo
{
public string DeviceName { get; set; }
public int VerticalResolution { get; set; }
public int HorizontalResolution { get; set; }
public Rectangle MonitorArea { get; set; }
}
public static class ScreenHelper
{
private const int DektopVertRes = 117;
private const int DesktopHorzRes = 118;
[StructLayout(LayoutKind.Sequential)]
internal struct Rect
{
public int left;
public int top;
public int right;
public int bottom;
}
[StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
internal struct MONITORINFOEX
{
public int Size;
public Rect Monitor;
public Rect WorkArea;
public uint Flags;
[MarshalAs(UnmanagedType.ByValTStr, SizeConst = 32)]
public string DeviceName;
}
private delegate bool MonitorEnumDelegate(IntPtr hMonitor, IntPtr hdcMonitor, ref Rect lprcMonitor, IntPtr dwData);
[DllImport("user32.dll")]
private static extern bool EnumDisplayMonitors(IntPtr hdc, IntPtr lprcClip, MonitorEnumDelegate lpfnEnum, IntPtr dwData);
[DllImport("gdi32.dll")]
private static extern IntPtr CreateDC(string lpszDriver, string lpszDevice, string lpszOutput, IntPtr lpInitData);
[DllImport("user32.dll", CharSet = CharSet.Unicode)]
private static extern bool GetMonitorInfo(IntPtr hMonitor, ref MONITORINFOEX lpmi);
[DllImport("User32.dll")]
private static extern int ReleaseDC(IntPtr hwnd, IntPtr dc);
[DllImport("gdi32.dll")]
private static extern int GetDeviceCaps(IntPtr hdc, int nIndex);
private static List<DeviceInfo> _result;
public static List<DeviceInfo> GetMonitorsInfo()
{
_result = new List<DeviceInfo>();
EnumDisplayMonitors(IntPtr.Zero, IntPtr.Zero, MonitorEnum, IntPtr.Zero);
return _result;
}
private static bool MonitorEnum(IntPtr hMonitor, IntPtr hdcMonitor, ref Rect lprcMonitor, IntPtr dwData)
{
var mi = new MONITORINFOEX();
mi.Size = Marshal.SizeOf(typeof(MONITORINFOEX));
bool success = GetMonitorInfo(hMonitor, ref mi);
if (success)
{
var dc = CreateDC(mi.DeviceName, mi.DeviceName, null, IntPtr.Zero);
var di = new DeviceInfo
{
DeviceName = mi.DeviceName,
MonitorArea = new Rectangle(mi.Monitor.left, mi.Monitor.top, mi.Monitor.right-mi.Monitor.right, mi.Monitor.bottom-mi.Monitor.top),
VerticalResolution = GetDeviceCaps(dc, DektopVertRes),
HorizontalResolution = GetDeviceCaps(dc, DesktopHorzRes)
};
ReleaseDC(IntPtr.Zero, dc);
_result.Add(di);
}
return true;
}
}
Here is the complete source code

It takes a full-screen screenshot, then (probably) copies it, applies the translucent effect & displays it. When you click-drag it can then overlay the corresponding region from the original capture.
You can get a screenshot using CopyFromScreen() or using the GDI API.

Related

Paint event doesn't invoked

Trying to implement a random walk by moving the ball across the screen. Algorithm is looks like working but debug shows that the paint event doesn't invoked.
First of all, try to do this in pattern MVC. For clarity, I will give the code of these components.
View:
public partial class ViewBallBounce : Form, Observer
{
private ModelRandomWalk modelRandomWalk;
private ControllerRandomWalk controllerRandomWalk;
static int i = 0;
public ViewBallBounce(ModelRandomWalk _modelRandomWalk)
{
InitializeComponent();
this.modelRandomWalk = _modelRandomWalk;
controllerRandomWalk = new ControllerRandomWalk(modelRandomWalk, this, ClientSize.Width, ClientSize.Height);
modelRandomWalk.Register(this);
this.SetStyle(ControlStyles.AllPaintingInWmPaint | ControlStyles.UserPaint | ControlStyles.DoubleBuffer, true);
this.UpdateStyles();
}
public void UpdateState()
{
this.Refresh();
}
private void ViewBallBounce_Load(object sender, EventArgs e)
{
controllerRandomWalk.Bounce();
}
private void ViewBallBounce_Paint(object sender, PaintEventArgs e)
{
Graphics g = e.Graphics;
g.SmoothingMode = System.Drawing.Drawing2D.SmoothingMode.AntiAlias;
g.Clear(BackColor);
i++;
g.FillEllipse(Brushes.Blue, modelRandomWalk.X, modelRandomWalk.Y,
modelRandomWalk.Width, modelRandomWalk.Height);
g.DrawEllipse(Pens.Black, modelRandomWalk.X, modelRandomWalk.Y,
modelRandomWalk.Width, modelRandomWalk.Height);
}
}
Control:
public class ControllerRandomWalk
{
private ModelRandomWalk modelRandomWalk;
private ViewBallBounce viewBallBounce;
public int clientH, clientW;
public ControllerRandomWalk(ModelRandomWalk modelRandomWalk, ViewBallBounce viewBallBounce, int _width, int _height)
{
this.modelRandomWalk = modelRandomWalk;
this.viewBallBounce = viewBallBounce;
this.clientH = _height;
this.clientW = _width;
}
public int Bounce()
{
return modelRandomWalk.bounce(100, clientH, clientW);
}
}
Model:
public class ModelRandomWalk
{
private int x, y;
private int velocityX, velocityY;
private int width, height;
public int X { get { return x; } }
public int Y { get { return y; } }
public int VelocityX { get { return velocityX; } }
public int VelocityY { get { return velocityY; } }
public int Width { get { return width; } }
public int Height { get { return height; } }
public void updatePosition(int key)
{
if (key == 0) this.x += velocityX;
else this.y += velocityY;
}
public void updateVelocity(int key)
{
if (key == 0) this.velocityX = -this.velocityX;
else this.velocityY = -this.velocityY;
}
public ModelRandomWalk(int _x, int _y, int _width, int _height)
{
this.x = _x;
this.y = _y;
this.width = _width;
this.height = _height;
}
public int bounce(int _steps, int _h, int _w)
{
int steps = _steps;
int clientH = _h, clientW = _w;
for(int i = 0; i < steps; i++)
{
Random rnd = new Random();
int v = rnd.Next(0, 4); // 0 = +v_x, 1 = +v_y, 2 = -v_x, 3 = -v_y
switch (v)
{
case 0:
velocityX++;
break;
case 1:
velocityY++;
break;
case 2:
velocityX--;
break;
case 3:
velocityY--;
break;
}
updatePosition(0);
if (X < 0)
{
x = 6;
updateVelocity(0);
}
else if (X + Width > clientW)
{
x = clientW - 6;
updateVelocity(0);
}
updatePosition(1);
if (Y < 0)
{
y = 6;
updateVelocity(1);
}
else if (Y + Height > clientH)
{
y = clientH - 6;
updateVelocity(1);
}
UpdateObservers();
}
return 0;
}
private ArrayList listeners = new ArrayList();
public void Register(Observer o)
{
listeners.Add(o);
o.UpdateState();
}
public void Deregister(Observer o)
{
listeners.Remove(0);
}
public void UpdateObservers()
{
foreach (Observer el in listeners)
{
el.UpdateState();
}
}
}
Sorry for long post. The main problem in these lines, when ViewBallBounce_Paint doesn't invoke.
public void UpdateState()
{
this.Refresh();
}
private void ViewBallBounce_Paint(object sender, PaintEventArgs e)
{
Graphics g = e.Graphics;
g.SmoothingMode = System.Drawing.Drawing2D.SmoothingMode.AntiAlias;
g.Clear(BackColor);
i++; //just checking has it invoked
g.FillEllipse(Brushes.Blue, modelRandomWalk.X, modelRandomWalk.Y,
modelRandomWalk.Width, modelRandomWalk.Height);
g.DrawEllipse(Pens.Black, modelRandomWalk.X, modelRandomWalk.Y,
modelRandomWalk.Width, modelRandomWalk.Height);
}
I have tried a lot of ways to set breakpoints and don't understand why after Refresh nothing happens. By the way, breakpoint in ViewBallBounce_Paint shows very strange modelRandomWalk.X or modelRandomWalk.Y as 224 or -78 and they don't change.
What's could be wrong?
UPD: New form is openned and the ball is drawen but it doesn't move. Changed steps in cycle to 10 and find out ball had moved only after ending the cycle in Model Component. Nevertheless, this.Refresh has worked but the paint event has invoked after 10 iterations. By the way, the movement occurred taking into account the changes in speeds and positions in the Model component. Wrote Thread.Sleep(1000) in ViewBallBounce_Paint but there were no changes.

Cursor is moving but it cant move inside games

I am programming a little tool for myself just for fun. It is for a game where you can play a guitar and I want to make a programm, that automates the guitar to play some nice little songs. The problem is: I can move the cursor without any problem at my Desktop. The cursor can move in the settings menu in the game too. But when I try it, when im in the game (controlling the player/camera) the cursor has no effect. The Playermodel is not even moving the head. (It is Windows Forms)
I have tried activateing the programm in the Menu (to make sure it works) and it worked and switched to the Player/camera and it just wont move.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Runtime.InteropServices;
namespace WindowsFormsApp2
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
ghk = new KeyHandler(Keys.Multiply, this);
ghk.Register();
KeyPositioner();
}
bool guitarPlay = false;
private void HandleHotkey()
{
if(checkBox1.Checked)
{
guitarPlay = true;
}
if(guitarPlay = true)
{
for(int i = Cursor.Position.Y; i < 1000; i++)
{
Cursor.Position = new Point(i);
System.Threading.Thread.Sleep(100);
}
//Cursor.Position = new Point(Cursor.Position.X - 0, Cursor.Position.Y + 30);
//System.Threading.Thread.Sleep(100);
//Cursor.Position = new Point(Cursor.Position.X - 0, Cursor.Position.Y - 80);
//System.Threading.Thread.Sleep(100);
//Cursor.Position = new Point(Cursor.Position.X - 0, Cursor.Position.Y - 90);
//System.Threading.Thread.Sleep(100);
//Cursor.Position = new Point(Cursor.Position.X - 0, Cursor.Position.Y + 10);
//System.Threading.Thread.Sleep(100);
//Cursor.Position = new Point(Cursor.Position.X - 0, Cursor.Position.Y - 70);
}
KeyPositioner();
// Do stuff...
//this.Cursor = new Cursor(Cursor.Current.Handle);
Cursor.Position = new Point(Cursor.Position.X - 0, Cursor.Position.Y + 10);
KeyPositioner();
}
private void KeyPositioner()
{
int posx = Cursor.Position.X;
int posy = Cursor.Position.Y;
string posxx;
string posyy;
posxx = Convert.ToString(posx);
posyy = Convert.ToString(posy);
label1.Text = posxx;
label4.Text = posyy;
}
protected override void WndProc(ref Message m)
{
if (m.Msg == Constants.WM_HOTKEY_MSG_ID)
HandleHotkey();
base.WndProc(ref m);
}
private void Form1_Load(object sender, EventArgs e)
{
}
public class KeyHandler
{
[DllImport("user32.dll")]
private static extern bool RegisterHotKey(IntPtr hWnd, int id, int fsModifiers, int vk);
[DllImport("user32.dll")]
private static extern bool UnregisterHotKey(IntPtr hWnd, int id);
private int key;
private IntPtr hWnd;
private int id;
public KeyHandler(Keys key, Form form)
{
this.key = (int)key;
this.hWnd = form.Handle;
id = this.GetHashCode();
}
public override int GetHashCode()
{
return key ^ hWnd.ToInt32();
}
public bool Register()
{
return RegisterHotKey(hWnd, id, 0, key);
}
public bool Unregiser()
{
return UnregisterHotKey(hWnd, id);
}
}
private KeyHandler ghk;
public static class Constants
{
//windows message id for hotkey
public const int WM_HOTKEY_MSG_ID = 0x0312;
}
private void label1_Click(object sender, EventArgs e)
{
}
private void label4_Click(object sender, EventArgs e)
{
}
private void checkBox1_CheckedChanged(object sender, EventArgs e)
{
}
}
}
I expect that the code can move teh camera view of the player model to.

Large, odd ratio image resize in C#

I have a particular problem that I need help with. I am working with complex proteomics data and one of our plots involves a heatmap of the raw data. These heatmaps I calculate as a raw image that I then resize to fit my chart canvas. The image files that are produced that way are usually very in-balanced when it comes to the width vs height.
Usually, these images are around 10 to a 100 pixels wide and 5000 to 8000 pixels high (this is the size of my raw 2D data array that I have to convert into an image). The target resolution afterwards would be something of 1300 x 600 pixels.
I usually use this function for resizing my image to a target size
public static Image Resize(Image img, int width, int height) {
Bitmap bmp = new Bitmap(width, height);
Graphics graphic = Graphics.FromImage((Image)bmp);
graphic.InterpolationMode = InterpolationMode.NearestNeighbor;
graphic.PixelOffsetMode = PixelOffsetMode.Half;
graphic.DrawImage(img, 0, 0, width, height);
graphic.Dispose();
return (Image)bmp;
}
This usually works fine for the dimension described above. But now I have a new dataset with the dimensions of 6 x 54343 pixels.
When using the same code on this image the resized image is half blank.
Original Image:
http://files.biognosys.ch/FileSharing/20170427_StackOverflow/raw.png
(the original image does not show properly in most browsers so use "save link as...")
How it should look (using photoshop):
http://files.biognosys.ch/FileSharing/20170427_StackOverflow/photoshop_resize.png
How it looks when I use the code snipped above
http://files.biognosys.ch/FileSharing/20170427_StackOverflow/code_resized.png
Please keep in mind, that this has worked for years without problem for images of 6 x 8000 so I guess I am not doing anything fundamentally wrong here.
It is also important that I have NearestNeighbor interpolation for the resizing so any solution that involves other interpolations that do not result in the "How it should look" image are eventually not useful for me.
Oli
It looks like you've hit some legacy limitation from 16-bit Windows era. The obvious way to work it around is to pre-split the source image into smaller chunks using just memory operations and than apply all those chunks with resizing using Graphics. This method assumes your source image is Bitmap rather than just Image but this doesn't seem to be a limitation for you. Here is a sketch of the code:
[DllImport("kernel32.dll", EntryPoint = "CopyMemory", SetLastError = true)]
public static extern void CopyMemoryUnmanaged(IntPtr dest, IntPtr src, int count);
// in case you can't use P/Invoke, copy via intermediate .Net buffer
static void CopyMemoryNet(IntPtr dst, IntPtr src, int count)
{
byte[] buffer = new byte[count];
Marshal.Copy(src, buffer, 0, count);
Marshal.Copy(buffer, 0, dst, count);
}
static Image CopyImagePart(Bitmap srcImg, int startH, int endH)
{
var width = srcImg.Width;
var height = endH - startH;
var srcBitmapData = srcImg.LockBits(new Rectangle(0, startH, width, height), ImageLockMode.ReadOnly, srcImg.PixelFormat);
var dstImg = new Bitmap(width, height, srcImg.PixelFormat);
var dstBitmapData = dstImg.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadWrite, srcImg.PixelFormat);
int bytesCount = Math.Abs(srcBitmapData.Stride) * height;
CopyMemoryUnmanaged(dstBitmapData.Scan0, srcBitmapData.Scan0, bytesCount);
// in case you can't use P/Invoke, copy via intermediate .Net buffer
//CopyMemoryNet(dstBitmapData.Scan0, srcBitmapData.Scan0, bytesCount);
srcImg.UnlockBits(srcBitmapData);
dstImg.UnlockBits(dstBitmapData);
return dstImg;
}
public static Image ResizeInParts(Bitmap srcBmp, int width, int height)
{
int srcStep = srcBmp.Height;
int dstStep = height;
while (srcStep > 30000)
{
srcStep /= 2;
dstStep /= 2;
}
var resBmp = new Bitmap(width, height);
using (Graphics graphic = Graphics.FromImage(resBmp))
{
graphic.InterpolationMode = InterpolationMode.NearestNeighbor;
graphic.PixelOffsetMode = PixelOffsetMode.Half;
for (int srcTop = 0, dstTop = 0; srcTop < srcBmp.Height; srcTop += srcStep, dstTop += dstStep)
{
int srcBottom = srcTop + srcStep;
int dstH = dstStep;
if (srcBottom > srcBmp.Height)
{
srcBottom = srcBmp.Height;
dstH = height - dstTop;
}
using (var imgPart = CopyImagePart(srcBmp, srcTop, srcBottom))
{
graphic.DrawImage(imgPart, 0, dstTop, width, dstH);
}
}
}
return resBmp;
}
Here is what I get for your example image:
It is not the same as your photoshop_resize.png but is quite similar to your code_resized.png
This code can be improved to better handle various "edges" such as cases when srcBmp.Height is not even or edges between different parts (pixels on the edges are interpolated using only half of the pixels they should be) but this is not easy to do without assuming some "good" size of both source and resized image or re-implementing interpolation logic yourself. Still this code might already be good enough for your usage given your scaling factors.
Here is a solution that seems to work. It's based on Windows WIC ("Windows Imaging Component"). It's a native component that Windows (and WPF) uses for all imaging operations.
I have provided a small .NET interop layer for it. It has not all WIC features but it will allow you to load/scale/save a file/stream image. The Scale method has a scaling option similar to the GDI+ one.
It seems to work ok with your sample although the result is not strictly equivalent to the photoshop one. This is how you can use it:
using (var bmp = WicBitmapSource.Load("input.png"))
{
bmp.Scale(1357, 584, WicBitmapInterpolationMode.NearestNeighbor);
bmp.Save("output.png");
}
...
public enum WicBitmapInterpolationMode
{
NearestNeighbor = 0,
Linear = 1,
Cubic = 2,
Fant = 3,
HighQualityCubic = 4,
}
public sealed class WicBitmapSource : IDisposable
{
private IWICBitmapSource _source;
private WicBitmapSource(IWICBitmapSource source, Guid format)
{
_source = source;
Format = format;
Stats();
}
public Guid Format { get; }
public int Width { get; private set; }
public int Height { get; private set; }
public double DpiX { get; private set; }
public double DpiY { get; private set; }
private void Stats()
{
if (_source == null)
{
Width = 0;
Height = 0;
DpiX = 0;
DpiY = 0;
return;
}
int w, h;
_source.GetSize(out w, out h);
Width = w;
Height = h;
double dpix, dpiy;
_source.GetResolution(out dpix, out dpiy);
DpiX = dpix;
DpiY = dpiy;
}
private void CheckDisposed()
{
if (_source == null)
throw new ObjectDisposedException(null);
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
~WicBitmapSource()
{
Dispose(false);
}
private void Dispose(bool disposing)
{
if (_source != null)
{
Marshal.ReleaseComObject(_source);
_source = null;
}
}
public void Save(string filePath)
{
Save(filePath, Format, Guid.Empty);
}
public void Save(string filePath, Guid pixelFormat)
{
Save(filePath, Format, pixelFormat);
}
public void Save(string filePath, Guid encoderFormat, Guid pixelFormat)
{
if (filePath == null)
throw new ArgumentNullException(nameof(filePath));
if (encoderFormat == Guid.Empty)
{
string ext = Path.GetExtension(filePath).ToLowerInvariant();
// we support only png & jpg
if (ext == ".png")
{
encoderFormat = new Guid(0x1b7cfaf4, 0x713f, 0x473c, 0xbb, 0xcd, 0x61, 0x37, 0x42, 0x5f, 0xae, 0xaf);
}
else if (ext == ".jpeg" || ext == ".jpe" || ext == ".jpg" || ext == ".jfif" || ext == ".exif")
{
encoderFormat = new Guid(0x19e4a5aa, 0x5662, 0x4fc5, 0xa0, 0xc0, 0x17, 0x58, 0x02, 0x8e, 0x10, 0x57);
}
}
if (encoderFormat == Guid.Empty)
throw new ArgumentException();
using (var file = File.OpenWrite(filePath))
{
Save(file, encoderFormat, pixelFormat);
}
}
public void Save(Stream stream)
{
Save(stream, Format, Guid.Empty);
}
public void Save(Stream stream, Guid pixelFormat)
{
Save(stream, Format, pixelFormat);
}
public void Save(Stream stream, Guid encoderFormat, Guid pixelFormat)
{
if (stream == null)
throw new ArgumentNullException(nameof(stream));
CheckDisposed();
Save(_source, stream, encoderFormat, pixelFormat, WICBitmapEncoderCacheOption.WICBitmapEncoderNoCache, null);
}
public void Scale(int? width, int? height, WicBitmapInterpolationMode mode)
{
if (!width.HasValue && !height.HasValue)
throw new ArgumentException();
int neww;
int newh;
if (width.HasValue && height.HasValue)
{
neww = width.Value;
newh = height.Value;
}
else
{
int w = Width;
int h = Height;
if (w == 0 || h == 0)
return;
if (width.HasValue)
{
neww = width.Value;
newh = (width.Value * h) / w;
}
else
{
newh = height.Value;
neww = (height.Value * w) / h;
}
}
if (neww <= 0 || newh <= 0)
throw new ArgumentException();
CheckDisposed();
_source = Scale(_source, neww, newh, mode);
Stats();
}
// we support only 1-framed files (unlike TIF for example)
public static WicBitmapSource Load(string filePath)
{
if (filePath == null)
throw new ArgumentNullException(nameof(filePath));
return LoadBitmapSource(filePath, 0, WICDecodeOptions.WICDecodeMetadataCacheOnDemand);
}
public static WicBitmapSource Load(Stream stream)
{
if (stream == null)
throw new ArgumentNullException(nameof(stream));
return LoadBitmapSource(stream, 0, WICDecodeOptions.WICDecodeMetadataCacheOnDemand);
}
private static WicBitmapSource LoadBitmapSource(string filePath, int frameIndex, WICDecodeOptions metadataOptions)
{
var wfac = (IWICImagingFactory)new WICImagingFactory();
IWICBitmapDecoder decoder = null;
try
{
decoder = wfac.CreateDecoderFromFilename(filePath, null, GenericAccessRights.GENERIC_READ, metadataOptions);
return new WicBitmapSource(decoder.GetFrame(frameIndex), decoder.GetContainerFormat());
}
finally
{
Release(decoder);
Release(wfac);
}
}
private static WicBitmapSource LoadBitmapSource(Stream stream, int frameIndex, WICDecodeOptions metadataOptions)
{
var wfac = (IWICImagingFactory)new WICImagingFactory();
IWICBitmapDecoder decoder = null;
try
{
decoder = wfac.CreateDecoderFromStream(new ManagedIStream(stream), null, metadataOptions);
return new WicBitmapSource(decoder.GetFrame(frameIndex), decoder.GetContainerFormat());
}
finally
{
Release(decoder);
Release(wfac);
}
}
private static IWICBitmapScaler Scale(IWICBitmapSource source, int width, int height, WicBitmapInterpolationMode mode)
{
var wfac = (IWICImagingFactory)new WICImagingFactory();
IWICBitmapScaler scaler = null;
try
{
scaler = wfac.CreateBitmapScaler();
scaler.Initialize(source, width, height, mode);
Marshal.ReleaseComObject(source);
return scaler;
}
finally
{
Release(wfac);
}
}
private static void Save(IWICBitmapSource source, Stream stream, Guid containerFormat, Guid pixelFormat, WICBitmapEncoderCacheOption cacheOptions, WICRect rect)
{
var wfac = (IWICImagingFactory)new WICImagingFactory();
IWICBitmapEncoder encoder = null;
IWICBitmapFrameEncode frame = null;
try
{
encoder = wfac.CreateEncoder(containerFormat, null);
encoder.Initialize(new ManagedIStream(stream), cacheOptions);
encoder.CreateNewFrame(out frame, IntPtr.Zero);
frame.Initialize(IntPtr.Zero);
if (pixelFormat != Guid.Empty)
{
frame.SetPixelFormat(pixelFormat);
}
frame.WriteSource(source, rect);
frame.Commit();
encoder.Commit();
}
finally
{
Release(frame);
Release(encoder);
Release(wfac);
}
}
private static void Release(object obj)
{
if (obj != null)
{
Marshal.ReleaseComObject(obj);
}
}
[ComImport]
[Guid("CACAF262-9370-4615-A13B-9F5539DA4C0A")]
private class WICImagingFactory
{
}
[StructLayout(LayoutKind.Sequential)]
private class WICRect
{
public int X;
public int Y;
public int Width;
public int Height;
}
[Flags]
private enum WICDecodeOptions
{
WICDecodeMetadataCacheOnDemand = 0x0,
WICDecodeMetadataCacheOnLoad = 0x1,
}
[Flags]
private enum WICBitmapEncoderCacheOption
{
WICBitmapEncoderCacheInMemory = 0x0,
WICBitmapEncoderCacheTempFile = 0x1,
WICBitmapEncoderNoCache = 0x2,
}
[Flags]
private enum GenericAccessRights : uint
{
GENERIC_READ = 0x80000000,
GENERIC_WRITE = 0x40000000,
GENERIC_EXECUTE = 0x20000000,
GENERIC_ALL = 0x10000000,
GENERIC_READ_WRITE = GENERIC_READ | GENERIC_WRITE
}
[Guid("ec5ec8a9-c395-4314-9c77-54d7a935ff70"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
private interface IWICImagingFactory
{
IWICBitmapDecoder CreateDecoderFromFilename([MarshalAs(UnmanagedType.LPWStr)] string wzFilename, [MarshalAs(UnmanagedType.LPArray, SizeConst = 1)] Guid[] pguidVendor, GenericAccessRights dwDesiredAccess, WICDecodeOptions metadataOptions);
IWICBitmapDecoder CreateDecoderFromStream(IStream pIStream, [MarshalAs(UnmanagedType.LPArray, SizeConst = 1)] Guid[] pguidVendor, WICDecodeOptions metadataOptions);
void NotImpl2();
void NotImpl3();
void NotImpl4();
IWICBitmapEncoder CreateEncoder([MarshalAs(UnmanagedType.LPStruct)] Guid guidContainerFormat, [MarshalAs(UnmanagedType.LPArray, SizeConst = 1)] Guid[] pguidVendor);
void NotImpl6();
void NotImpl7();
IWICBitmapScaler CreateBitmapScaler();
// not fully impl...
}
[Guid("00000120-a8f2-4877-ba0a-fd2b6645fb94"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
private interface IWICBitmapSource
{
void GetSize(out int puiWidth, out int puiHeight);
Guid GetPixelFormat();
void GetResolution(out double pDpiX, out double pDpiY);
void NotImpl3();
void NotImpl4();
}
[Guid("00000302-a8f2-4877-ba0a-fd2b6645fb94"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
private interface IWICBitmapScaler : IWICBitmapSource
{
#region IWICBitmapSource
new void GetSize(out int puiWidth, out int puiHeight);
new Guid GetPixelFormat();
new void GetResolution(out double pDpiX, out double pDpiY);
new void NotImpl3();
new void NotImpl4();
#endregion IWICBitmapSource
void Initialize(IWICBitmapSource pISource, int uiWidth, int uiHeight, WicBitmapInterpolationMode mode);
}
[Guid("9EDDE9E7-8DEE-47ea-99DF-E6FAF2ED44BF"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
private interface IWICBitmapDecoder
{
void NotImpl0();
void NotImpl1();
Guid GetContainerFormat();
void NotImpl3();
void NotImpl4();
void NotImpl5();
void NotImpl6();
void NotImpl7();
void NotImpl8();
void NotImpl9();
IWICBitmapFrameDecode GetFrame(int index);
}
[Guid("3B16811B-6A43-4ec9-A813-3D930C13B940"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
private interface IWICBitmapFrameDecode : IWICBitmapSource
{
// not fully impl...
}
[Guid("00000103-a8f2-4877-ba0a-fd2b6645fb94"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
private interface IWICBitmapEncoder
{
void Initialize(IStream pIStream, WICBitmapEncoderCacheOption cacheOption);
Guid GetContainerFormat();
void NotImpl2();
void NotImpl3();
void NotImpl4();
void NotImpl5();
void NotImpl6();
void CreateNewFrame(out IWICBitmapFrameEncode ppIFrameEncode, IntPtr encoderOptions);
void Commit();
// not fully impl...
}
[Guid("00000105-a8f2-4877-ba0a-fd2b6645fb94"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
private interface IWICBitmapFrameEncode
{
void Initialize(IntPtr pIEncoderOptions);
void SetSize(int uiWidth, int uiHeight);
void SetResolution(double dpiX, double dpiY);
void SetPixelFormat([MarshalAs(UnmanagedType.LPStruct)] Guid pPixelFormat);
void NotImpl4();
void NotImpl5();
void NotImpl6();
void NotImpl7();
void WriteSource(IWICBitmapSource pIBitmapSource, WICRect prc);
void Commit();
// not fully impl...
}
private class ManagedIStream : IStream
{
private Stream _stream;
public ManagedIStream(Stream stream)
{
_stream = stream;
}
public void Read(byte[] buffer, int count, IntPtr pRead)
{
int read = _stream.Read(buffer, 0, count);
if (pRead != IntPtr.Zero)
{
Marshal.WriteInt32(pRead, read);
}
}
public void Seek(long offset, int origin, IntPtr newPosition)
{
long pos = _stream.Seek(offset, (SeekOrigin)origin);
if (newPosition != IntPtr.Zero)
{
Marshal.WriteInt64(newPosition, pos);
}
}
public void SetSize(long newSize)
{
_stream.SetLength(newSize);
}
public void Stat(out System.Runtime.InteropServices.ComTypes.STATSTG stg, int flags)
{
const int STGTY_STREAM = 2;
stg = new System.Runtime.InteropServices.ComTypes.STATSTG();
stg.type = STGTY_STREAM;
stg.cbSize = _stream.Length;
stg.grfMode = 0;
if (_stream.CanRead && _stream.CanWrite)
{
const int STGM_READWRITE = 0x00000002;
stg.grfMode |= STGM_READWRITE;
return;
}
if (_stream.CanRead)
{
const int STGM_READ = 0x00000000;
stg.grfMode |= STGM_READ;
return;
}
if (_stream.CanWrite)
{
const int STGM_WRITE = 0x00000001;
stg.grfMode |= STGM_WRITE;
return;
}
throw new IOException();
}
public void Write(byte[] buffer, int count, IntPtr written)
{
_stream.Write(buffer, 0, count);
if (written != IntPtr.Zero)
{
Marshal.WriteInt32(written, count);
}
}
public void Clone(out IStream ppstm) { throw new NotImplementedException(); }
public void Commit(int grfCommitFlags) { throw new NotImplementedException(); }
public void CopyTo(IStream pstm, long cb, IntPtr pcbRead, IntPtr pcbWritten) { throw new NotImplementedException(); }
public void LockRegion(long libOffset, long cb, int dwLockType) { throw new NotImplementedException(); }
public void Revert() { throw new NotImplementedException(); }
public void UnlockRegion(long libOffset, long cb, int dwLockType) { throw new NotImplementedException(); }
}
}

Restore maximzed state on secondary monitor

I have a problem about restore state of window when I maximized it on secondary monitor.
I maximized window on the not primary screen and then close.
When reopen window, it also maximized, but it is maximized on primary screen.
I want is maximized on the not primary screen (the screen display window when close).
Please help me if you know.
Note: if state of window is normal, window will be restored correct screen.
My code as below:
if (ShellState == WindowState.Maximized)
{
ShellState = WindowState.Normal;
LeftPosition = Screen.AllScreens[selectedScreen].WorkingArea.Left;
TopPosition = Screen.AllScreens[selectedScreen].WorkingArea.Top;
ShellHeight = Screen.AllScreens[selectedScreen].WorkingArea.Height;
ShellWidth = Screen.AllScreens[selectedScreen].WorkingArea.Width;
ShellState = WindowState.Maximized;
}
We had many problems on multi-screen systems using the standard WPF tools for storing and restoring the window state and size, as long as the screen assignment.
We endet with creating a custom behavior that uses the native WinAPI functions.
Here is the (simplified) source code of our behavior. You can use it in your application instead of the WPF tools.
You have to change the way the window placement will be stored. This can be a dependency property providing a container, a static Properties.Settings reference or something else. In the code below, a static ApplicationSettings reference is used as an example.
class WindowPlacementPersistenceBehavior : Behavior<Window>
{
protected override void OnAttached()
{
base.OnAttached();
this.AssociatedObject.SourceInitialized += this.AssociatedObject_SourceInitialized;
this.AssociatedObject.Closing += this.AssociatedObject_Closing;
}
protected override void OnDetaching()
{
this.AssociatedObject.SourceInitialized -= this.AssociatedObject_SourceInitialized;
this.AssociatedObject.Closing -= this.AssociatedObject_Closing;
base.OnDetaching();
}
private void AssociatedObject_Closing(object sender, CancelEventArgs e)
{
WINDOWPLACEMENT wp;
NativeMethods.GetWindowPlacement(new WindowInteropHelper(this.AssociatedObject).Handle, out wp);
// Here you can store the window placement
ApplicationSettings.WindowPlacement = wp.ToString();
}
private void AssociatedObject_SourceInitialized(object sender, EventArgs e)
{
// Here you can load the window placement
WINDOWPLACEMENT wp = WINDOWPLACEMENT.Parse(ApplicationSettings.WindowPlacement);
if (wp.ShowCmd == NativeMethods.SW_SHOWMINIMIZED)
{
// Don't start in the minimized state
wp.ShowCmd = NativeMethods.SW_SHOWNORMAL;
}
try
{
NativeMethods.SetWindowPlacement(new WindowInteropHelper(this.AssociatedObject).Handle, ref wp);
}
catch
{
}
}
[Serializable]
[StructLayout(LayoutKind.Sequential)]
private struct RECT
{
public int Left;
public int Top;
public int Right;
public int Bottom;
public static RECT Parse(string input)
{
RECT result;
string[] items = input.Split(';');
result.Left = int.Parse(items[0]);
result.Top = int.Parse(items[1]);
result.Right = int.Parse(items[2]);
result.Bottom = int.Parse(items[3]);
return result;
}
public override string ToString()
{
return this.Left + ";" + this.Top + ";" + this.Right + ";" + this.Bottom;
}
}
[Serializable]
[StructLayout(LayoutKind.Sequential)]
private struct POINT
{
public int X;
public int Y;
public static POINT Parse(string input)
{
POINT result;
string[] items = input.Split(';');
result.X = int.Parse(items[0]);
result.Y = int.Parse(items[1]);
return result;
}
public override string ToString()
{
return this.X + ";" + this.Y;
}
}
[Serializable]
[StructLayout(LayoutKind.Sequential)]
private struct WINDOWPLACEMENT
{
public int Length;
public int Flags;
public int ShowCmd;
public POINT MinPosition;
public POINT MaxPosition;
public RECT NormalPosition;
public static WINDOWPLACEMENT Parse(string input)
{
WINDOWPLACEMENT result = default(WINDOWPLACEMENT);
result.Length = Marshal.SizeOf(typeof(WINDOWPLACEMENT));
try
{
string[] items = input.Split('/');
result.Flags = int.Parse(items[0]);
result.ShowCmd = int.Parse(items[1]);
result.MinPosition = POINT.Parse(items[2]);
result.MaxPosition = POINT.Parse(items[3]);
result.NormalPosition = RECT.Parse(items[4]);
}
catch
{
}
return result;
}
public override string ToString()
{
return this.Flags + "/" + this.ShowCmd + "/" + this.MinPosition.ToString() + "/" + this.MaxPosition.ToString() + "/" + this.NormalPosition.ToString();
}
}
private static class NativeMethods
{
public const int SW_SHOWNORMAL = 1;
public const int SW_SHOWMINIMIZED = 2;
[DllImport("user32.dll")]
public static extern bool SetWindowPlacement(IntPtr hWnd, [In] ref WINDOWPLACEMENT lpwndpl);
[DllImport("user32.dll")]
public static extern bool GetWindowPlacement(IntPtr hWnd, [Out] out WINDOWPLACEMENT lpwndpl);
}
}
To use this behavior, just add it to your window in XAML:
<Window
xmlns:v="clr-namespace:YourNameSpace"
xmlns:i="clr-namespace:System.Windows.Interactivity;assembly=System.Windows.Interactivity">
<i:Interaction.Behaviors>
<v:WindowPlacementPersistenceBehavior />
</i:Interaction.Behaviors>
</Window>

Binding Library Mono for Android

I want to build an application with monodroid to have a live video stream from an IPCamera (with MJpeg format) to my tablet. after digging the internet I found that there is a Mjpeg Library project written in Java from here. it has two files MjpegView.java and MjpegInputStream.Java which I put them both here:
MjpegView.java
package de.mjpegsample.MjpegView;
import java.io.IOException;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.graphics.PorterDuff;
import android.graphics.PorterDuffXfermode;
import android.graphics.Rect;
import android.graphics.Typeface;
import android.util.AttributeSet;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
public class MjpegView extends SurfaceView implements SurfaceHolder.Callback {
public final static int POSITION_UPPER_LEFT = 9;
public final static int POSITION_UPPER_RIGHT = 3;
public final static int POSITION_LOWER_LEFT = 12;
public final static int POSITION_LOWER_RIGHT = 6;
public final static int SIZE_STANDARD = 1;
public final static int SIZE_BEST_FIT = 4;
public final static int SIZE_FULLSCREEN = 8;
private MjpegViewThread thread;
private MjpegInputStream mIn = null;
private boolean showFps = false;
private boolean mRun = false;
private boolean surfaceDone = false;
private Paint overlayPaint;
private int overlayTextColor;
private int overlayBackgroundColor;
private int ovlPos;
private int dispWidth;
private int dispHeight;
private int displayMode;
public class MjpegViewThread extends Thread {
private SurfaceHolder mSurfaceHolder;
private int frameCounter = 0;
private long start;
private Bitmap ovl;
public MjpegViewThread(SurfaceHolder surfaceHolder, Context context) { mSurfaceHolder = surfaceHolder; }
private Rect destRect(int bmw, int bmh) {
int tempx;
int tempy;
if (displayMode == MjpegView.SIZE_STANDARD) {
tempx = (dispWidth / 2) - (bmw / 2);
tempy = (dispHeight / 2) - (bmh / 2);
return new Rect(tempx, tempy, bmw + tempx, bmh + tempy);
}
if (displayMode == MjpegView.SIZE_BEST_FIT) {
float bmasp = (float) bmw / (float) bmh;
bmw = dispWidth;
bmh = (int) (dispWidth / bmasp);
if (bmh > dispHeight) {
bmh = dispHeight;
bmw = (int) (dispHeight * bmasp);
}
tempx = (dispWidth / 2) - (bmw / 2);
tempy = (dispHeight / 2) - (bmh / 2);
return new Rect(tempx, tempy, bmw + tempx, bmh + tempy);
}
if (displayMode == MjpegView.SIZE_FULLSCREEN) return new Rect(0, 0, dispWidth, dispHeight);
return null;
}
public void setSurfaceSize(int width, int height) {
synchronized(mSurfaceHolder) {
dispWidth = width;
dispHeight = height;
}
}
private Bitmap makeFpsOverlay(Paint p, String text) {
Rect b = new Rect();
p.getTextBounds(text, 0, text.length(), b);
int bwidth = b.width()+2;
int bheight = b.height()+2;
Bitmap bm = Bitmap.createBitmap(bwidth, bheight, Bitmap.Config.ARGB_8888);
Canvas c = new Canvas(bm);
p.setColor(overlayBackgroundColor);
c.drawRect(0, 0, bwidth, bheight, p);
p.setColor(overlayTextColor);
c.drawText(text, -b.left+1, (bheight/2)-((p.ascent()+p.descent())/2)+1, p);
return bm;
}
public void run() {
start = System.currentTimeMillis();
PorterDuffXfermode mode = new PorterDuffXfermode(PorterDuff.Mode.DST_OVER);
Bitmap bm;
int width;
int height;
Rect destRect;
Canvas c = null;
Paint p = new Paint();
String fps = "";
while (mRun) {
if(surfaceDone) {
try {
c = mSurfaceHolder.lockCanvas();
synchronized (mSurfaceHolder) {
try {
bm = mIn.readMjpegFrame();
destRect = destRect(bm.getWidth(),bm.getHeight());
c.drawColor(Color.BLACK);
c.drawBitmap(bm, null, destRect, p);
if(showFps) {
p.setXfermode(mode);
if(ovl != null) {
height = ((ovlPos & 1) == 1) ? destRect.top : destRect.bottom-ovl.getHeight();
width = ((ovlPos & 8) == 8) ? destRect.left : destRect.right -ovl.getWidth();
c.drawBitmap(ovl, width, height, null);
}
p.setXfermode(null);
frameCounter++;
if((System.currentTimeMillis() - start) >= 1000) {
fps = String.valueOf(frameCounter)+"fps";
frameCounter = 0;
start = System.currentTimeMillis();
ovl = makeFpsOverlay(overlayPaint, fps);
}
}
} catch (IOException e) {}
}
} finally { if (c != null) mSurfaceHolder.unlockCanvasAndPost(c); }
}
}
}
}
private void init(Context context) {
SurfaceHolder holder = getHolder();
holder.addCallback(this);
thread = new MjpegViewThread(holder, context);
setFocusable(true);
overlayPaint = new Paint();
overlayPaint.setTextAlign(Paint.Align.LEFT);
overlayPaint.setTextSize(12);
overlayPaint.setTypeface(Typeface.DEFAULT);
overlayTextColor = Color.WHITE;
overlayBackgroundColor = Color.BLACK;
ovlPos = MjpegView.POSITION_LOWER_RIGHT;
displayMode = MjpegView.SIZE_STANDARD;
dispWidth = getWidth();
dispHeight = getHeight();
}
public void startPlayback() {
if(mIn != null) {
mRun = true;
thread.start();
}
}
public void stopPlayback() {
mRun = false;
boolean retry = true;
while(retry) {
try {
thread.join();
retry = false;
} catch (InterruptedException e) {}
}
}
public MjpegView(Context context, AttributeSet attrs) { super(context, attrs); init(context); }
public void surfaceChanged(SurfaceHolder holder, int f, int w, int h) { thread.setSurfaceSize(w, h); }
public void surfaceDestroyed(SurfaceHolder holder) {
surfaceDone = false;
stopPlayback();
}
public MjpegView(Context context) { super(context); init(context); }
public void surfaceCreated(SurfaceHolder holder) { surfaceDone = true; }
public void showFps(boolean b) { showFps = b; }
public void setSource(MjpegInputStream source) { mIn = source; startPlayback();}
public void setOverlayPaint(Paint p) { overlayPaint = p; }
public void setOverlayTextColor(int c) { overlayTextColor = c; }
public void setOverlayBackgroundColor(int c) { overlayBackgroundColor = c; }
public void setOverlayPosition(int p) { ovlPos = p; }
public void setDisplayMode(int s) { displayMode = s; }
}
MjpegInputStream.Java
package de.mjpegsample.MjpegView;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.Properties;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
public class MjpegInputStream extends DataInputStream {
private final byte[] SOI_MARKER = { (byte) 0xFF, (byte) 0xD8 };
private final byte[] EOF_MARKER = { (byte) 0xFF, (byte) 0xD9 };
private final String CONTENT_LENGTH = "Content-Length";
private final static int HEADER_MAX_LENGTH = 100;
private final static int FRAME_MAX_LENGTH = 40000 + HEADER_MAX_LENGTH;
private int mContentLength = -1;
public static MjpegInputStream read(String url) {
HttpResponse res;
DefaultHttpClient httpclient = new DefaultHttpClient();
try {
res = httpclient.execute(new HttpGet(URI.create(url)));
return new MjpegInputStream(res.getEntity().getContent());
} catch (ClientProtocolException e) {
} catch (IOException e) {}
return null;
}
public MjpegInputStream(InputStream in) { super(new BufferedInputStream(in, FRAME_MAX_LENGTH)); }
private int getEndOfSeqeunce(DataInputStream in, byte[] sequence) throws IOException {
int seqIndex = 0;
byte c;
for(int i=0; i < FRAME_MAX_LENGTH; i++) {
c = (byte) in.readUnsignedByte();
if(c == sequence[seqIndex]) {
seqIndex++;
if(seqIndex == sequence.length) return i + 1;
} else seqIndex = 0;
}
return -1;
}
private int getStartOfSequence(DataInputStream in, byte[] sequence) throws IOException {
int end = getEndOfSeqeunce(in, sequence);
return (end < 0) ? (-1) : (end - sequence.length);
}
private int parseContentLength(byte[] headerBytes) throws IOException, NumberFormatException {
ByteArrayInputStream headerIn = new ByteArrayInputStream(headerBytes);
Properties props = new Properties();
props.load(headerIn);
return Integer.parseInt(props.getProperty(CONTENT_LENGTH));
}
public Bitmap readMjpegFrame() throws IOException {
mark(FRAME_MAX_LENGTH);
int headerLen = getStartOfSequence(this, SOI_MARKER);
reset();
byte[] header = new byte[headerLen];
readFully(header);
try {
mContentLength = parseContentLength(header);
} catch (NumberFormatException nfe) {
mContentLength = getEndOfSeqeunce(this, EOF_MARKER);
}
reset();
byte[] frameData = new byte[mContentLength];
skipBytes(headerLen);
readFully(frameData);
return BitmapFactory.decodeStream(new ByteArrayInputStream(frameData));
}
}
so I converted that (actually create a c# wrapper) with Binding Library project.
but although I followed the Sample code tutorial of this project as following:
The sample itself:
public class MjpegSample extends Activity {
private MjpegView mv;
public void onCreate(Bundle icicle) {
super.onCreate(icicle);
//sample public cam
String URL = "http://webcam5.hrz.tu-darmstadt.de/axis-cgi/mjpg/video.cgi?resolution=320x240";
requestWindowFeature(Window.FEATURE_NO_TITLE);
getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN,
WindowManager.LayoutParams.FLAG_FULLSCREEN);
mv = new MjpegView(this);
setContentView(mv);
mv.setSource(MjpegInputStream.read(URL));
mv.setDisplayMode(MjpegView.SIZE_BEST_FIT);
mv.showFps(true);
}
What I have Done in Monodroid:
namespace AndroidApplication8
{
[Activity(Label = "AndroidApplication8", MainLauncher = true, Icon = "#drawable/icon")]
public class Activity1 : Activity
{
int count = 1;
protected override void OnCreate(Bundle bundle)
{
base.OnCreate(bundle);
String URL = "rtsp://192.168.1.3/Mjpeg/video.cgi";
var mv = new MjpegView(this);
SetContentView(mv);
**mv.SetSource(MjpegInputStream.Read(URL));
mv.SetDisplayMode(MjpegView.SizeBestFit);
mv.StartPlayback();
}
}
}
but it gives me an error in the line indicated with ** when it wants to execute MjpegInputStream.Read()
and it jumps to the class converted from the native Java files without any more information.
You should check your video type.For example if your video encoding is compressed over there(before getting to your android device) you should encode it before put it into your browser.This could let you write a code in java for example to verify the incoming stream from cameras first(don't use build-in browser of android) and then decode it manually.
Good luck!

Categories