I want to add bloom effect in my MonoGame project, I found out about this: http://xbox.create.msdn.com/en-US/education/catalog/sample/bloom and I tried to add it to my project, but for some reason it told me "Could not load BloomExtract asset as a non-content file!" when I tried to run the project. I tried every solution I could find, including making it copy always. After that I found out that MonoGame can't load .fx files, so I converted them to .mgfxo, after that I changed the build action for it to embedded resource and added this code:
Stream s = Assembly.GetExecutingAssembly().GetManifestResourceStream("Morior.Content.BloomExtract.mgfxo");
BinaryReader Reader = new BinaryReader(s);
bloomExtractEffect = new Effect(GraphicsDevice, Reader.ReadBytes((int)Reader.BaseStream.Length));
But now it throws "This MGFX effect was built for a different platform" when I try to run it, so I'm really out of options, how can I add simple bloom??
edit: I managed to convert it to directx 11 without errors, but now for some reason theres a blank back screen\red screen in my game. sigh, seems like I wont be able to make it work.
EDIT: I think i figured the problems out, but the bloom itself wont work, here is the draw of bloom:
#region Using Statements
using System;
using Microsoft.Xna.Framework;
using Microsoft.Xna.Framework.Content;
using Microsoft.Xna.Framework.Graphics;
using System.IO;
#endregion
namespace BloomPostprocess
{
public class BloomComponent : DrawableGameComponent
{
#region Fields
SpriteBatch spriteBatch;
Effect bloomExtractEffect;
Effect bloomCombineEffect;
Effect gaussianBlurEffect;
RenderTarget2D renderTarget1;
RenderTarget2D renderTarget2;
// Choose what display settings the bloom should use.
public BloomSettings Settings
{
get { return settings; }
set { settings = value; }
}
BloomSettings settings = BloomSettings.PresetSettings[0];
// Optionally displays one of the intermediate buffers used
// by the bloom postprocess, so you can see exactly what is
// being drawn into each rendertarget.
public enum IntermediateBuffer
{
PreBloom,
BlurredHorizontally,
BlurredBothWays,
FinalResult,
}
public IntermediateBuffer ShowBuffer
{
get { return showBuffer; }
set { showBuffer = value; }
}
IntermediateBuffer showBuffer = IntermediateBuffer.FinalResult;
#endregion
#region Initialization
public BloomComponent(Game game)
: base(game)
{
if (game == null)
throw new ArgumentNullException("game");
}
/// <summary>
/// Load your graphics content.
/// </summary>
public void LoadContent(GraphicsDevice g, ContentManager theContentManager)
{
spriteBatch = new SpriteBatch(g);
bloomExtractEffect = theContentManager.Load<Effect>("BloomExtract");
bloomCombineEffect = theContentManager.Load<Effect>("BloomCombine");
gaussianBlurEffect = theContentManager.Load<Effect>("GaussianBlur");
// Look up the resolution and format of our main backbuffer.
PresentationParameters pp = g.PresentationParameters;
int width = pp.BackBufferWidth;
int height = pp.BackBufferHeight;
SurfaceFormat format = pp.BackBufferFormat;
// Create a texture for rendering the main scene, prior to applying bloom.
// Create two rendertargets for the bloom processing. These are half the
// size of the backbuffer, in order to minimize fillrate costs. Reducing
// the resolution in this way doesn't hurt quality, because we are going
// to be blurring the bloom images in any case.
width /= 2;
height /= 2;
renderTarget1 = new RenderTarget2D(g, width, height, false, format, DepthFormat.Depth24);
renderTarget2 = new RenderTarget2D(g, width, height, false, format, DepthFormat.Depth24);
}
/// <summary>
/// Unload your graphics content.
/// </summary>
public void UnloadContent(ContentManager theContentManager)
{
renderTarget1.Dispose();
renderTarget2.Dispose();
}
#endregion
#region Draw
/// <summary>
/// This should be called at the very start of the scene rendering. The bloom
/// component uses it to redirect drawing into its custom rendertarget, so it
/// can capture the scene image in preparation for applying the bloom filter.
/// </summary>
public void BeginDraw(RenderTarget2D renderTarget)
{
if (Visible)
{
GraphicsDevice.SetRenderTarget(renderTarget);
}
}
/// <summary>
/// This is where it all happens. Grabs a scene that has already been rendered,
/// and uses postprocess magic to add a glowing bloom effect over the top of it.
/// </summary>
public void Draw(GameTime gameTime, RenderTarget2D renderTarget)
{
// Pass 1: draw the scene into rendertarget 1, using a
// shader that extracts only the brightest parts of the image.
bloomExtractEffect.Parameters["BloomThreshold"].SetValue(
Settings.BloomThreshold);
DrawFullscreenQuad(renderTarget, renderTarget1,
bloomExtractEffect,
IntermediateBuffer.PreBloom);
// Pass 2: draw from rendertarget 1 into rendertarget 2,
// using a shader to apply a horizontal gaussian blur filter.
SetBlurEffectParameters(1.0f / (float)renderTarget1.Width, 0);
DrawFullscreenQuad(renderTarget1, renderTarget2,
gaussianBlurEffect,
IntermediateBuffer.BlurredHorizontally);
// Pass 3: draw from rendertarget 2 back into rendertarget 1,
// using a shader to apply a vertical gaussian blur filter.
SetBlurEffectParameters(0, 1.0f / (float)renderTarget1.Height);
DrawFullscreenQuad(renderTarget2, renderTarget1,
gaussianBlurEffect,
IntermediateBuffer.BlurredBothWays);
// Pass 4: draw both rendertarget 1 and the original scene
// image back into the main backbuffer, using a shader that
// combines them to produce the final bloomed result.
GraphicsDevice.SetRenderTarget(null);
EffectParameterCollection parameters = bloomCombineEffect.Parameters;
parameters["BloomIntensity"].SetValue(Settings.BloomIntensity);
parameters["BaseIntensity"].SetValue(Settings.BaseIntensity);
parameters["BloomSaturation"].SetValue(Settings.BloomSaturation);
parameters["BaseSaturation"].SetValue(Settings.BaseSaturation);
GraphicsDevice.Textures[1] = renderTarget;
Viewport viewport = GraphicsDevice.Viewport;
DrawFullscreenQuad(renderTarget1,
viewport.Width, viewport.Height,
bloomCombineEffect,
IntermediateBuffer.FinalResult);
}
/// <summary>
/// Helper for drawing a texture into a rendertarget, using
/// a custom shader to apply postprocessing effects.
/// </summary>
void DrawFullscreenQuad(Texture2D texture, RenderTarget2D renderTarget,
Effect effect, IntermediateBuffer currentBuffer)
{
GraphicsDevice.SetRenderTarget(renderTarget);
DrawFullscreenQuad(texture,
renderTarget.Width, renderTarget.Height,
effect, currentBuffer);
}
/// <summary>
/// Helper for drawing a texture into the current rendertarget,
/// using a custom shader to apply postprocessing effects.
/// </summary>
void DrawFullscreenQuad(Texture2D texture, int width, int height,
Effect effect, IntermediateBuffer currentBuffer)
{
// If the user has selected one of the show intermediate buffer options,
// we still draw the quad to make sure the image will end up on the screen,
// but might need to skip applying the custom pixel shader.
if (showBuffer < currentBuffer)
{
effect = null;
}
spriteBatch.Begin(0, BlendState.Opaque, null, null, null, effect);
spriteBatch.Draw(texture, new Rectangle(0, 0, width, height), Color.White);
spriteBatch.End();
}
/// <summary>
/// Computes sample weightings and texture coordinate offsets
/// for one pass of a separable gaussian blur filter.
/// </summary>
void SetBlurEffectParameters(float dx, float dy)
{
// Look up the sample weight and offset effect parameters.
EffectParameter weightsParameter, offsetsParameter;
weightsParameter = gaussianBlurEffect.Parameters["SampleWeights"];
offsetsParameter = gaussianBlurEffect.Parameters["SampleOffsets"];
// Look up how many samples our gaussian blur effect supports.
int sampleCount = weightsParameter.Elements.Count;
// Create temporary arrays for computing our filter settings.
float[] sampleWeights = new float[sampleCount];
Vector2[] sampleOffsets = new Vector2[sampleCount];
// The first sample always has a zero offset.
sampleWeights[0] = ComputeGaussian(0);
sampleOffsets[0] = new Vector2(0);
// Maintain a sum of all the weighting values.
float totalWeights = sampleWeights[0];
// Add pairs of additional sample taps, positioned
// along a line in both directions from the center.
for (int i = 0; i < sampleCount / 2; i++)
{
// Store weights for the positive and negative taps.
float weight = ComputeGaussian(i + 1);
sampleWeights[i * 2 + 1] = weight;
sampleWeights[i * 2 + 2] = weight;
totalWeights += weight * 2;
// To get the maximum amount of blurring from a limited number of
// pixel shader samples, we take advantage of the bilinear filtering
// hardware inside the texture fetch unit. If we position our texture
// coordinates exactly halfway between two texels, the filtering unit
// will average them for us, giving two samples for the price of one.
// This allows us to step in units of two texels per sample, rather
// than just one at a time. The 1.5 offset kicks things off by
// positioning us nicely in between two texels.
float sampleOffset = i * 2 + 1.5f;
Vector2 delta = new Vector2(dx, dy) * sampleOffset;
// Store texture coordinate offsets for the positive and negative taps.
sampleOffsets[i * 2 + 1] = delta;
sampleOffsets[i * 2 + 2] = -delta;
}
// Normalize the list of sample weightings, so they will always sum to one.
for (int i = 0; i < sampleWeights.Length; i++)
{
sampleWeights[i] /= totalWeights;
}
// Tell the effect about our new filter settings.
weightsParameter.SetValue(sampleWeights);
offsetsParameter.SetValue(sampleOffsets);
}
/// <summary>
/// Evaluates a single point on the gaussian falloff curve.
/// Used for setting up the blur filter weightings.
/// </summary>
float ComputeGaussian(float n)
{
float theta = Settings.BlurAmount;
return (float)((1.0 / Math.Sqrt(2 * Math.PI * theta)) *
Math.Exp(-(n * n) / (2 * theta * theta)));
}
#endregion
}
}
here is the main:
protected override void Draw(GameTime gameTime)
{
bloom.Draw(gameTime, renderTarget);
GraphicsDevice.Clear(Color.Black);
DrawSceneToTexture(renderTarget,gameTime);
spriteBatch.Begin(SpriteSortMode.Immediate, BlendState.AlphaBlend,
SamplerState.LinearClamp, DepthStencilState.Default,
RasterizerState.CullNone);
spriteBatch.Draw(renderTarget, rec, Color.White);
spriteBatch.End();
base.Draw(gameTime);
}
protected void DrawSceneToTexture(RenderTarget2D renderTarget,GameTime gametime)
{
GraphicsDevice.SetRenderTarget(renderTarget);
GraphicsDevice.DepthStencilState = new DepthStencilState() { DepthBufferEnable = true };
// Draw the scene
GraphicsDevice.Clear(Color.CornflowerBlue);
spriteBatch.Begin(SpriteSortMode.Deferred,
BlendState.AlphaBlend,
SamplerState.PointClamp,
null, null, null, null);
bloom.BeginDraw(renderTarget);
mLeavusSprite.Draw(this.spriteBatch);
b.Draw(this.spriteBatch);
spriteBatch.Draw(cursorTex, cursorPos, cursorSource,
Color.White, 0.0f, Vector2.Zero, 3f, SpriteEffects.None, 0);
spriteBatch.End();
// Drop the render target
GraphicsDevice.SetRenderTarget(null);
}
Related
I am trying to take a Ui object's screen space position and translate that to what I am calling 'monitor space'.
As far as I can tell, screen space, in Unity, is relative to the applications' window. That is, even if the app is not full screen, and moved around on your monitor, 0,0 will still be the lower left of the app window.
I need to translate one of those screen space values into the actual position within the user's monitor. This is especially important when considering that the user might have multiple monitors.
I am not finding anything to get this done, though.
I am hoping to find a platform agnostic solution, but if it must be Windows-only than I can make that work as well.
Any help on this would be greatly appreciated.
Thank you
Now after TEEBQNE's answer I also wanted to give it a shot using the native solution.
As mentioned this will be only for Windows PC Standalone and requires
Unity's new Input System (see Quick Start)
One of the solutions from Getting mouse position in c#
For example if you want to use System.Windows.Forms then copy the according DLL from
C:\Windows\Microsoft.NET\Framework64\v4.x.xx
into your project under Assets/Plugins
Then in code you can use
using System.Windows.Forms;
If this is more efficient (or even works this way) I can't tell - only on the phone here - but I hope the idea gets clear ;)
So the idea is:
store initial cursor position
Set your cursor to certain positions of interest using WarpCursorPosition using Unity screen coordinates as input
read out the resulting absolute monitor coordinates using the native stuff
in the end reset the cursor to the original position
This might look somewhat like
using UnityEngine;
using UnityEngine.InputSystem;
public static class MonitorUtils
{
// Store reference to main Camera (Camera.main is expensive)
private static Camera _mainCamera;
// persistent array to fetch rect corners
// cheaper then everytime creating and throwing away a new array
// especially when fetching them every frame
private static readonly Vector3[] corners = new Vector3[4];
// For getting the UI rect corners in Monitor pixel coordinates
public static void GetMonitorRectCorners(this RectTransform rectTransform, Vector2Int[] output, bool isScreenSpaceCanvas = true, Camera camera = null)
{
// Lazy initialization of optional parameter
if (!camera) camera = GetMainCamera();
// Store initial mouse position
var originalMousePosition = Mouse.current.position.ReadValue();
// Get the four world space positions of your RectTtansform's corners
// in the order bottom left, top left, top right, bottom right
// See https://docs.unity3d.com/ScriptReference/RectTransform.GetWorldCorners.html
rectTransform.GetWorldCorners(corners);
// Iterate the four corners
for (var i = 0; i < 4; i++)
{
if (!isScreenSpaceCanvas)
{
// Get the monitor position from the world position (see below)
output[i] = WorldToMonitorPoint(corners[i], camera);
}
else
{
// Get the monitor position from the screen position (see below)
output[i] = ScreenToMonitorPoint(corners[i], camera);
}
}
// Restore mouse position
Mouse.current.WarpCursorPosition(originalMousePosition);
}
// For getting a single Unity world space position in Monitor pixel coordinates
public static Vector2Int WorldToMonitorPoint(Vector3 worldPoint, Camera camera = null)
{
// Lazy initialization of optional parameter
if (!camera) camera = GetMainCamera();
var screenPos = camera.WorldToScreenPoint(worldPoint);
return ScreenToMonitorPoint(screenPos, camera);
}
// For getting a single Unity world space position in Monitor pixel coordinates
public static Vector2Int ScreenToMonitorPoint(Vector3 screenPos, Camera camera = null)
{
// Lazy initialization of optional parameter
if (!camera) camera = GetMainCamera();
// Set the system cursor position there based on Unity screen space
Mouse.current.WarpCursorPosition(screenPos);
// Then get the actual system mouse position (see below)
return GetSystemMousePosition();
}
// Get and store the main camera
private static Camera GetMainCamera()
{
if (!_mainCamera) _mainCamera = Camera.main;
return _mainCamera;
}
// Convert the system mouse position to Vector2Int for working
// with it in Unity
private static Vector2Int GetSystemMousePosition()
{
var point = System.Windows.Forms.Cursor.Position;
return new Vector2Int(point.X, point.Y);
}
}
So you can either simply use
var monitorPosition = MonitorUtils.WorldToMonitorPoint(someUnityWorldPosition);
// or if you already have the `Camera` reference
//var monitorPosition = MonitorUtils.WorldToMonitorPoint(someUnityWorldPosition, someCamera);
or if you already have a screen space position like e.g. in a ScreenSpace Overlay canvas
var monitorPosition = MonitorUtils.ScreenToMonitorPoint(someUnityWorldPosition);
// or if you already have the `Camera` reference
//var monitorPosition = MonitorUtils.ScreenToMonitorPoint(someUnityWorldPosition, someCamera);
or you can get all four corners of a UI element at once using e.g.
var monitorCorners = new Vector2Int [4];
someRectTransform.GetMonitorRectCorners(monitorCorners, isScreenSpaceCanvas);
// or again if you already have a camera reference
//someRectTransform.GetMonitorRectCorners(monitorCorners, isScreenSpaceCanvas, someCamera);
Little example
public class Example : MonoBehaviour
{
[Header("References")]
[SerializeField] private Camera mainCamera;
[SerializeField] private RectTransform _rectTransform;
[SerializeField] private Canvas _canvas;
[Header("Debugging")]
[SerializeField] private bool isScreenSpace;
[Header("Output")]
[SerializeField] private Vector2Int bottomLeft;
[SerializeField] private Vector2Int topLeft;
[SerializeField] private Vector2Int topRight;
[SerializeField] private Vector2Int bottomRight;
private readonly Vector2Int[] _monitorPixelCornerCoordinates = new Vector2Int[4];
private void Awake()
{
if (!mainCamera) mainCamera = Camera.main;
if (!_canvas) _canvas = GetComponentInParent<Canvas>();
isScreenSpace = _canvas.renderMode == RenderMode.ScreenSpaceOverlay;
}
private void Update()
{
if (Keyboard.current.spaceKey.isPressed)
{
_rectTransform.GetMonitorRectCorners(_monitorPixelCornerCoordinates, isScreenSpace);
bottomLeft = _monitorPixelCornerCoordinates[0];
topLeft = _monitorPixelCornerCoordinates[1];
topRight = _monitorPixelCornerCoordinates[2];
bottomRight = _monitorPixelCornerCoordinates[3];
}
}
}
You will see that moving your mouse each and every frame isn't a good idea though ^^
Now you can see the four corners being updated depending on the actual position on the screen.
Note: while Unity Screenspace is 0,0 at the bottom left in normal display pixels 0,0 is actually rather top-left. So you might need to invert these.
Alright first off - sorry for the late response just got back and was able to type up an answer.
From what I have found, this solution does not work in the editor and produces odd results on Mac with retina display. In the editor, the Screen and Display spaces appear to be exactly the same. There is probably a solution to fix this but I did not look into the specifics. As for Mac, for whatever reason, the internal resolution outputted is always half the actual resolution. I am not sure if this is just a retina display bug with Unity or a general Mac bug. I tested and ran this test script on both a Windows computer and Mac with a retina display. I have yet to test it on any mobile platform.
I do not know exactly what you would like to achieve with the values you wish to find, so I set up a demo scene displays the values instead of using them.
Here is the demo script:
using UnityEngine;
using System.Collections.Generic;
using UnityEngine.UI;
public class TestScript : MonoBehaviour
{
[SerializeField] private RectTransform rect = null;
[SerializeField] private List<Text> text = new List<Text>();
[SerializeField] private Canvas parentCanvas = null;
[SerializeField] private Camera mainCam = null;
private void Start()
{
// determine the canvas mode of our UI object
if (parentCanvas == null)
parentCanvas = GetComponentInParent<Canvas>();
// only need a camera in the case of camera space canvas
if (parentCanvas.renderMode == RenderMode.ScreenSpaceCamera && mainCam == null)
mainCam = Camera.main;
// generate initial data points
GenerateData();
}
/// <summary>
/// Onclick of our button to test generating data when the object moves
/// </summary>
public void GenerateData()
{
// the anchored position is relative to screen space if the canvas is an overlay - if not, it will need to be converted to screen space based on our camera
Vector3 screenPos = parentCanvas.renderMode == RenderMode.ScreenSpaceCamera ? mainCam.WorldToScreenPoint(transform.position) : rect.transform.position;
// our object relative to screen position
text[0].text = "Screen Pos: " + screenPos;
// the dimensions of our screen (The current window that is rendering our game)
text[1].text = "Screen dimensions: " + Screen.width + " " + Screen.height;
// find our width / height normalized relative to the screen space dimensions
float x = Mathf.Clamp01(screenPos.x / Screen.width);
float y = Mathf.Clamp01(screenPos.y / Screen.height);
// our normalized screen positions
text[2].text = "Normalized Screen Pos: " + x + " " + y;
// grab the dimensions of the main renderer - the current monitor our game is rendered on
#if UNITY_STANDALONE_OSX
text[3].text = "Display dimensions: " + (Display.main.systemWidth * 2f) + " " + (Display.main.systemHeight * 2f);
// now find the coordinates our the UI object transcribed from screen space normalized coordinates to our monitor / resolution coordinates
text[4].text = "Display relative pos: " + (Display.main.systemWidth * x * 2f) + " " + (Display.main.systemHeight * y * 2f);
#else
text[3].text = "Display dimensions: " + Display.main.systemWidth + " " + Display.main.systemHeight;
// now find the coordinates our the UI object transcribed from screen space normalized coordinates to our monitor / resolution coordinates
text[4].text = "Display relative pos: " + (Display.main.systemWidth * x) + " " + (Display.main.systemHeight * y);
#endif
}
/// <summary>
/// Just for debugging - can be deleted
/// </summary>
private void Update()
{
if (Input.GetKey(KeyCode.A))
{
rect.anchoredPosition += new Vector2(-10f, 0f);
}
if (Input.GetKey(KeyCode.W))
{
rect.anchoredPosition += new Vector2(0f, 10f);
}
if (Input.GetKey(KeyCode.S))
{
rect.anchoredPosition += new Vector2(0f, -10f);
}
if (Input.GetKey(KeyCode.D))
{
rect.anchoredPosition += new Vector2(10f, 0f);
}
}
}
I accounted for the parent canvas being either Overlay or Camera mode and put in a check for an OSX build to adjust to the proper screen dimensions.
Here is a gif of the build on OSX. I set the window to be 1680x1050 and my computer's current resolution is 2880x1800. I had also test it on Windows but did not record it as the example looks nearly identical.
Let me know if you have more questions about the implementation or if there are issues with other platforms I did not test.
Edit: Just realized you want the screen space coordinate relative to the monitor space. I will correct the snippet in a little bit - in a meeting right now.
Edit2: After a bit more looking, it will not be easy to get the exact coordinates without the window being centered or getting the standalone window's position. I do not believe there is an easy way to get this information without a dll, so here is a implementation for mac and a solution for windows.
Currently, the solution I have will only get the screen position if the standalone player is windowed and centered on your screen. If the player is centered on the screen, I know that the center of my monitor is half the dimensions of its resolution, and know that the center point of my window matches up to this point. I can now get the bottom left corner of my window relative to my monitor and not a (0,0) coordinate. As the screen space has the bottom left corner at (0,0), you can now adjust the position to monitor space by adding the position of the newly calculated bottom left position.
Here is the new new GenerateData method:
/// <summary>
/// Onclick of our button to test generating data when the object moves
/// </summary>
public void GenerateData()
{
// the anchored position is relative to screen space if the canvas is an overlay - if not, it will need to be converted to screen space based on our camera
Vector3 screenPos = parentCanvas.renderMode == RenderMode.ScreenSpaceCamera ? mainCam.WorldToScreenPoint(transform.position) : rect.transform.position;
// grab the display dimensions
Vector2 displayDimensions;
// bug or something with mac or retina display on mac where the main.system dimensions are half of what they actually are
#if UNITY_STANDALONE_OSX || UNITY_EDITOR_OSX
displayDimensions = new Vector2(Display.main.systemWidth * 2f, Display.main.systemHeight * 2f);
#else
displayDimensions = new Vector2(Display.main.systemWidth, Display.main.systemHeight);
#endif
// the centerpoint of our display coordinates
Vector2 displayCenter = new Vector2(displayDimensions.x / 2f, displayDimensions.y / 2f);
// half our screen dimensions to find our screen space relative to monitor space
Vector2 screenDimensionsHalf = new Vector2(Screen.width / 2f, Screen.height / 2f);
// find the corners of our window relative to the monitor space
Vector2[] displayCorners = new Vector2[] {
new Vector2(displayCenter.x - screenDimensionsHalf.x, displayCenter.y - screenDimensionsHalf.y), // bottom left
new Vector2(displayCenter.x - screenDimensionsHalf.x, displayCenter.y + screenDimensionsHalf.y), // top left
new Vector2(displayCenter.x + screenDimensionsHalf.x, displayCenter.y + screenDimensionsHalf.y), // top right
new Vector2(displayCenter.x + screenDimensionsHalf.x, displayCenter.y - screenDimensionsHalf.y) // bottom right
};
for (int z = 0; z < 4; ++z)
{
text[z].text = displayCorners[z].ToString();
}
// outputting our screen position relative to our monitor
text[4].text = (new Vector2(screenPos.x, screenPos.y) + displayCorners[0]).ToString();
}
Once you are able to either get or set the windowed screen, you can properly re-orient the lower-left corner relative to the monitor dimensions or you can set the window back to the center point of your monitor. The above snippet would also work for a full-screen player. You would just need to determine how far off the aspect ratio of the player window is to your monitor, which allows you to find how large the black bars would be on the edges.
I assumed what you had wanted was straightforward but from what I can tell an OS-agnostic solution would be difficult. My above solution should work for any platform when the player is windowed if you can either get or set the standalone window position and for any platform that is full-screened with the theoretical approach I mentioned.
If you want more info on how to adjust the implementation for the full-screened window let me know.
I draw images from GUI.Label/GUI.Box but Unity driwed use "blackout" (or "red filter") before render images.
https://i.gyazo.com/255e4947a7ca95f835104a1f8ffa2e61.png
In screen right original apple - bright. In left apple be drawed unity, but this dark.
I used GUI.Color = new Color(1f,1f,1f,1f) but it didn't help me
Next, a write editor (editor windows) and draw this:
https://i.gyazo.com/06908720a2b00c538558c61092a98329.png
Apples were dark. It's my ItemDrawService:
public class ItemDrawService {
private GUIStyle labelStyle = null;
public ItemDrawService(GUIStyle labelStyle){
this.labelStyle=labelStyle;
}
/// <summary>
/// Отрисовка предмета в инвентаре
/// </summary>
/// <param name="item">Рисуемый предмет</param>
/// <param name="offsetX">Смещение (позиция) инвентаря по x</param>
/// <param name="offsetY">Смещение (позиция) инвентаря по y</param>
/// <param name="fixWebPosition">при fixWebPosition=true предмет рисуется "в сетке"</param>
/// <param name="drawIcon">при drawIcon=false, предмет рисуется без иконки</param>
public void DrawItem(ItemSlot item, float offsetX, float offsetY, bool fixWebPosition = true, bool drawIcon = true){
Rect cellRectangle;
if (fixWebPosition) {
cellRectangle = new Rect(offsetX + CellSettings.cellPaddingX + (item.position.X - 1) * CellSettings.cellWidth,
offsetY + CellSettings.cellPaddingY + (item.position.Y - 1) * CellSettings.cellHeight,
CellSettings.cellWidth,
CellSettings.cellHeight);
} else {
cellRectangle = new Rect(offsetX,
offsetY,
CellSettings.cellWidth,
CellSettings.cellHeight);
}
if (drawIcon)
labelStyle.normal.background = item.item.resource.icon;
string description = item.item.getCount()>1? item.item.getCount().ToString()+CLang.getInstance().get(Dictionary.K_COUNT) : "";
GUI.color = new Color(1f,1f,1f,1f);
GUI.Label(cellRectangle, description, labelStyle);
}
}
How I do the drawing intact?
I find this in documentation:
Legacy GUI and Linear Authored Textures
Rendering elements of the Legacy GUI System is always done in gamma
space. This means that, for the legacy GUI system, GUI textures should
not have their gamma removed on read. This can be achieved in two
ways:
Set the texture type to GUI in the texture importer
Check the ‘Bypass sRGB Sampling’ checkbox in the advanced texture importer
I need to draw a large amount of 2D elements in WPF, such as lines and polygons. Their position also needs to be updated constantly.
I have looked at many of the answers here which mostly suggested using DrawingVisual or overriding the OnRender function. To test these methods I've implemented a simple particle system rendering 10000 ellipses and I find that the drawing performance is still really terrible using both of these approaches. On my PC I can't get much above 5-10 frames a second. which is totally unacceptable when you consider that I easily draw 1/2 million particles smoothly using other technologies.
So my question is, am I running against a technical limitation here of WPF or am I missing something? Is there something else I can use? any suggestions welcome.
Here the code I tried
content of MainWindow.xaml:
<Window x:Class="WpfApplication1.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Title="MainWindow" Height="500" Width="500" Loaded="Window_Loaded">
<Grid Name="xamlGrid">
</Grid>
</Window>
content of MainWindow.xaml.cs:
using System.Windows.Threading;
namespace WpfApplication1
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
public MainWindow()
{
InitializeComponent();
}
EllipseBounce[] _particles;
DispatcherTimer _timer = new DispatcherTimer();
private void Window_Loaded(object sender, RoutedEventArgs e)
{
//particles with Ellipse Geometry
_particles = new EllipseBounce[10000];
//define area particles can bounce around in
Rect stage = new Rect(0, 0, 500, 500);
//seed particles with random velocity and position
Random rand = new Random();
//populate
for (int i = 0; i < _particles.Length; i++)
{
Point pos = new Point((float)(rand.NextDouble() * stage.Width + stage.X), (float)(rand.NextDouble() * stage.Height + stage.Y));
Point vel = new Point((float)(rand.NextDouble() * 5 - 2.5), (float)(rand.NextDouble() * 5 - 2.5));
_particles[i] = new EllipseBounce(stage, pos, vel, 2);
}
//add to particle system - this will draw particles via onrender method
ParticleSystem ps = new ParticleSystem(_particles);
//at this element to the grid (assumes we have a Grid in xaml named 'xmalGrid'
xamlGrid.Children.Add(ps);
//set up and update function for the particle position
_timer.Tick += _timer_Tick;
_timer.Interval = new TimeSpan(0, 0, 0, 0, 1000 / 60); //update at 60 fps
_timer.Start();
}
void _timer_Tick(object sender, EventArgs e)
{
for (int i = 0; i < _particles.Length; i++)
{
_particles[i].Update();
}
}
}
/// <summary>
/// Framework elements that draws particles
/// </summary>
public class ParticleSystem : FrameworkElement
{
private DrawingGroup _drawingGroup;
public ParticleSystem(EllipseBounce[] particles)
{
_drawingGroup = new DrawingGroup();
for (int i = 0; i < particles.Length; i++)
{
EllipseGeometry eg = particles[i].EllipseGeometry;
Brush col = Brushes.Black;
col.Freeze();
GeometryDrawing gd = new GeometryDrawing(col, null, eg);
_drawingGroup.Children.Add(gd);
}
}
protected override void OnRender(DrawingContext drawingContext)
{
base.OnRender(drawingContext);
drawingContext.DrawDrawing(_drawingGroup);
}
}
/// <summary>
/// simple class that implements 2d particle movements that bounce from walls
/// </summary>
public class SimpleBounce2D
{
protected Point _position;
protected Point _velocity;
protected Rect _stage;
public SimpleBounce2D(Rect stage, Point pos,Point vel)
{
_stage = stage;
_position = pos;
_velocity = vel;
}
public double X
{
get
{
return _position.X;
}
}
public double Y
{
get
{
return _position.Y;
}
}
public virtual void Update()
{
UpdatePosition();
BoundaryCheck();
}
private void UpdatePosition()
{
_position.X += _velocity.X;
_position.Y += _velocity.Y;
}
private void BoundaryCheck()
{
if (_position.X > _stage.Width + _stage.X)
{
_velocity.X = -_velocity.X;
_position.X = _stage.Width + _stage.X;
}
if (_position.X < _stage.X)
{
_velocity.X = -_velocity.X;
_position.X = _stage.X;
}
if (_position.Y > _stage.Height + _stage.Y)
{
_velocity.Y = -_velocity.Y;
_position.Y = _stage.Height + _stage.Y;
}
if (_position.Y < _stage.Y)
{
_velocity.Y = -_velocity.Y;
_position.Y = _stage.Y;
}
}
}
/// <summary>
/// extend simplebounce2d to add ellipse geometry and update position in the WPF construct
/// </summary>
public class EllipseBounce : SimpleBounce2D
{
protected EllipseGeometry _ellipse;
public EllipseBounce(Rect stage,Point pos, Point vel, float radius)
: base(stage, pos, vel)
{
_ellipse = new EllipseGeometry(pos, radius, radius);
}
public EllipseGeometry EllipseGeometry
{
get
{
return _ellipse;
}
}
public override void Update()
{
base.Update();
_ellipse.Center = _position;
}
}
}
I believe the sample code provided is pretty much as good as it gets, and is showcasing the limits of the framework. In my measurements I profiled an average cost of 15-25ms is attributed to render-overhead. In essence we speak here about just the modification of the centre (dependency-) property, which is quite expensive. I presume it is expensive because it propagates the changes to mil-core directly.
One important note is that the overhead cost is proportional to the amount of objects whose position are changed in the simulation. Rendering a large quantity of objects on itself is not an issue when a majority of objects are temporal coherent i.e. don't change positions.
The best alternative approach for this situation is to resort to D3DImage, which is an element for the Windows Presentation Foundation to present information rendered with DirectX. Generally spoken that approach should be effective, performance wise.
You could try a WriteableBitmap, and produce the image using faster code on a background thread. However, the only thing you can do with it is copy bitmap data, so you either have to code your own primitive drawing routines, or (which might even work in your case) create a "stamp" image which you copy to everywhere your particles go...
The fastest WPF drawing method I have found is to:
create a DrawingGroup "backingStore".
during OnRender(), draw my drawing group to the drawing context
anytime I want, backingStore.Open() and draw new graphics objects into it
The surprising thing about this for me, coming from Windows.Forms.. is that I can update my DrawingGroup after I've added it to the DrawingContext during OnRender(). This is updating the existing retained drawing commands in the WPF drawing tree and triggering an efficient repaint.
In a simple app I've coded in both Windows.Forms and WPF (SoundLevelMonitor), this method empirically feels pretty similar in performance to immediate OnPaint() GDI drawing.
I think WPF did a dis-service by calling the method OnRender(), it might be better termed AccumulateDrawingObjects()
This basically looks like:
DrawingGroup backingStore = new DrawingGroup();
protected override void OnRender(DrawingContext drawingContext) {
base.OnRender(drawingContext);
Render(); // put content into our backingStore
drawingContext.DrawDrawing(backingStore);
}
// I can call this anytime, and it'll update my visual drawing
// without ever triggering layout or OnRender()
private void Render() {
var drawingContext = backingStore.Open();
Render(drawingContext);
drawingContext.Close();
}
I've also tried using RenderTargetBitmap and WriteableBitmap, both to an Image.Source, and written directly to a DrawingContext. The above method is faster.
In windows forms these kind of things made me fall back to;
Set Visible=False for the highest level container (e.g. canvas of the form itself)
Draw a lot
Set Visible=True
Not sure if WPF supports this.
Here are some of the things you may try: (I tried them with your sample and it seems to look faster (at least on my system)).
Use Canvas instead of Grid (unless you have other reasons). Play BitmapScalingMode and CachingHint:
<Canvas Name="xamlGrid" RenderOptions.BitmapScalingMode="LowQuality" RenderOptions.CachingHint="Cache" IsHitTestVisible = "False">
</Canvas>
Add a StaticResource for Brush used in GeometryDrawing:
<SolidColorBrush x:Key="MyBrush" Color="DarkBlue"/>
in code use as:
GeometryDrawing gd = new GeometryDrawing((SolidColorBrush)this.FindResource("MyBrush"), null, eg);
I hope this helps.
I'm trying to draw 2D polygons with wide, colored outlines without using a custom shader.
(if I were to write one it'd probably be slower than using the CPU since I'm not well-versed in shaders)
To do so I plan to draw the polygons like normal, and then use the resulting depth-buffer as a stencil when drawing the same, expanded geometry.
The XNA "GraphicsDevice" can draw primitives given any array of IVertexType instances:
DrawUserPrimitives<T>(PrimitiveType primitiveType, T[] vertexData, int vertexOffset, int primitiveCount, VertexDeclaration vertexDeclaration) where T : struct;
I've defined an IvertexType for 2D coordinate space:
public struct VertexPosition2DColor : IVertexType
{
public VertexPosition2DColor (Vector2 position, Color color) {
this.position = position;
this.color = color;
}
public Vector2 position;
public Color color;
public static VertexDeclaration declaration = new VertexDeclaration (
new VertexElement(0, VertexElementFormat.Vector2, VertexElementUsage.Position, 0),
new VertexElement(sizeof(float)*2, VertexElementFormat.Color, VertexElementUsage.Color, 0)
);
VertexDeclaration IVertexType.VertexDeclaration {
get {return declaration;}
}
}
I've defined an array class for storing a polygon's vertices, colors, and edge normals:
I hope to pass this class as the T[] parameter in the GraphicDevice's DrawPrimitives function.
The goal is for the outline vertices to be GPU-calculated since it's apparently good at such things.
internal class VertexOutlineArray : Array
{
internal VertexOutlineArray (Vector2[] positions, Vector2[] normals, Color[] colors, Color[] outlineColors, bool outlineDrawMode) {
this.positions = positions;
this.normals = normals;
this.colors = colors;
this.outlineColors = outlineColors;
this.outlineDrawMode = outlineDrawMode;
}
internal Vector2[] positions, normals;
internal Color[] colors, outlineColors;
internal float outlineWidth;
internal bool outlineDrawMode;
internal void SetVertex(int index, Vector2 position, Vector2 normal, Color color, Color outlineColor) {
positions[index] = position;
normals[index] = normal;
colors[index] = color;
outlineColors[index] = outlineColor;
}
internal VertexPosition2DColor this[int i] {
get {return (outlineDrawMode)? new VertexPosition2DColor(positions[i] + outlineWidth*normals[i], outlineColors[i])
: new VertexPosition2DColor(positions[i], colors[i]);
}
}
}
I want to be able to render the shape and it's outline like so:
the depth buffer is used as a stencil when drawing the expanded outliner geometry
protected override void RenderLocally (GraphicsDevice device)
{
// Draw shape
mVertices.outlineDrawMode = true; //mVertices is a VertexOutlineArray instance
device.RasterizerState = RasterizerState.CullNone;
device.PresentationParameters.DepthStencilFormat = DepthFormat.Depth16;
device.Clear(ClearOptions.DepthBuffer, Color.SkyBlue, 0, 0);
device.DrawUserPrimitives<VertexPosition2DColor>(PrimitiveType.TriangleList, (VertexPosition2DColor[])mVertices, 0, mVertices.Length -2, VertexPosition2DColor.declaration);
// Draw outline
mVertices.outlineDrawMode = true;
device.DepthStencilState = new DepthStencilState {
DepthBufferWriteEnable = true,
DepthBufferFunction = CompareFunction.Greater //keeps the outline from writing over the shape
};
device.DrawUserPrimitives(PrimitiveType.TriangleList, mVertices.ToArray(), 0, mVertices.Count -2);
}
This doesn't work though, because I'm unable to pass my VertexArray class as a T[]. How can I amend this or otherwise accomplish the goal of doing outline calculations on the GPU without a custom shader?
I am wondering why you dont simply write a class that draws the outline using pairs of thin triangles as lines? You could create a generalized polyline routine that receives an input of the 2d points and a width of the line and the routine spits out a VertexBuffer.
I realize this isn't answering your question but I cant see what the advantage is of trying to do it your way. Is there a specific effect you want to achieve or are you going to be manipulating the data very frequently or scaling the polygons alot?
The problem you are likely having is that XNA4 for Windows Phone 7 does not support custom shaders at all. In fact they purposefully limited it to a set of predefined shaders because of the number of permutations that would have to be tested. The ones currently supported are:
AlphaTestEffect
BasicEffect
EnvironmentMapEffect
DualTextureEffect
SkinnedEffect
You can read about them here:
http://msdn.microsoft.com/en-us/library/bb203872(v=xnagamestudio.40).aspx
I have not tested creating or utilizing a IVertexType with Vector2 position and normal and so I cant comment on if it is supported or not. If I were to do this I would use just the BasicEffect and VertexPositionNormal for the main polygonal shape rendering and adjust the DiffuseColor for each polygon. For rendering the outline you use the existing VertexBuffer and scale it appropriately by calling GraphicsDevice.Viewport.Unproject() to determine the 3d coordinate distance require to produce a n-pixel 2d screen distance(your outline width).
Remember that when you are using the BasicEffect, or any effect for that matter, that you have to loop through the EffectPass array of the CurrentTechnique and call the Apply() method for each pass before you make your Draw call.
device.DepthStencilState = DepthStencilState.Default;
device.BlendState = BlendState.AlphaBlend;
device.RasterizerState = RasterizerState.CullCounterClockwise;
//Set the appropriate vertex and indice buffers
device.SetVertexBuffer(_polygonVertices);
device.Indices = _polygonIndices;
foreach (EffectPass pass in _worldEffect.CurrentTechnique.Passes)
{
pass.Apply();
PApp.Graphics.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0, _polygonVertices.VertexCount, 0, _polygonIndices.IndexCount / 3);
}
I have 2 sprites which when drawn together make up the correct image on the screen. Drawing them both at the same time is not an option.
Imagine this class:
class MyImage
{
Vector2 drawOffset; // this gets added before the image is drawn
Vector2 sourceRect; // this is where it is on the source texturepage
void Draw(Vector2 position)
{
position = position + drawOffset;
spriteBatch.Draw(sourceTexture, position, sourceRect, Color.White);
}
}
And this code calling into it:
MyImage a = new MyImage(); // assume they get initialised correctly
MyImage b = new MyImage(); // with different drawOffsets and sourceRects
a.Draw(position); // this composes the final
b.Draw(position); // screen image from the 2 source images
Now I'd like to add scale and rotation to the Draw() function, but am having real trouble getting the parameters to the SpriteBatch.Draw function correct. This would be the version which takes scale, rotation and an origin. I need the final composed image to scale and rotate correctly (around some arbitrary centre) but can't for the life of me work out how to manipulate the scale, rotation and origin parameters to make the 2 images appear to scale and rotate in concert. Has anyone done something like this before? Happy to mod the question based on feedback if anything's unclear. If images would help I can get them posted somewhere...
I've looked at rotation around point xna 2D but am still stumped.
Cheers,
Charlie.
Thanks so much for the answer below - using it I've managed to get the images rendering correctly. One other issue remains, which is that I seem to need to use a lot of spritebatch.Begin/End pairs (one per image render). I don't have a way to measure performance on this device yet and the framerate's not chugging so I guess it's not a problem.
Here's my code:
// gr is the graphic object:
// gr.position is the location of the image in the atlas
// gr.DrawOffset is the draw offset so the image is placed correctly in it's virtual box
// gr.pageIndex is the index into the texture/atlas array
// hw,hh are half the width/height of the image (it always rotates around it's centre in fact)
Matrix m = Matrix.CreateTranslation(-hw, -hh, 0) *
Matrix.CreateRotationZ(rotation) * // rotation : parameter
Matrix.CreateScale(scale) * // scale : parameter
Matrix.CreateTranslation(pos.X + hw, pos.Y + hh, 0); // pos : parameter!
spriteBatch.Begin(SpriteSortMode.Deferred, null, null, null, null, null, m);
spriteBatch.Draw(page[gr.pageIndex].texture, gr.DrawOffset, gr.position, color);
spriteBatch.End();
If you are going to work with the SpriteBatch.Draw to draw the textures I would suggest that you forgo trying to manipulate the origin, scale arguments to try an achieve this, simply I doubt it can be done this way. But you do have an alternative, you can manipulate the SpriteBatch Matrix.
Here is a quick and dirty example, note that the texture I used here is 128x96 so I hard coded the values for that image size. Do not look for any best practices in this code, I wrote it to try and show the concept as cleanly as possible.
using System;
using Microsoft.Xna.Framework;
using Microsoft.Xna.Framework.Content;
using Microsoft.Xna.Framework.Graphics;
using Microsoft.Xna.Framework.Input;
namespace WindowsGame1
{
/// <summary>
/// This is the main type for your game
/// </summary>
public class Game1 : Microsoft.Xna.Framework.Game
{
GraphicsDeviceManager graphics;
SpriteBatch spriteBatch;
private Texture2D _texture;
private MyImage _image1;
private MyImage _image2;
// Attributes of the composed sprite
private float _angle = 0.0f;
private Vector2 _position = new Vector2(100, 100);
private Vector2 _rotationPoint = new Vector2(96, 48);
public Game1()
{
graphics = new GraphicsDeviceManager(this);
Content.RootDirectory = "Content";
}
/// <summary>
/// Allows the game to perform any initialization it needs to before starting to run.
/// This is where it can query for any required services and load any non-graphic
/// related content. Calling base.Initialize will enumerate through any components
/// and initialize them as well.
/// </summary>
protected override void Initialize()
{
// TODO: Add your initialization logic here
base.Initialize();
}
/// <summary>
/// LoadContent will be called once per game and is the place to load
/// all of your content.
/// </summary>
protected override void LoadContent()
{
// Create a new SpriteBatch, which can be used to draw textures.
spriteBatch = new SpriteBatch(GraphicsDevice);
_texture = Content.Load<Texture2D>("Gravitar");
// Create the two MyImage instances
_image1 = new MyImage(_texture, Vector2.Zero, Vector2.Zero);
_image2 = new MyImage(_texture, new Vector2(64, 0), new Vector2(64, 0));
}
/// <summary>
/// UnloadContent will be called once per game and is the place to unload
/// all content.
/// </summary>
protected override void UnloadContent()
{
// TODO: Unload any non ContentManager content here
}
/// <summary>
/// Allows the game to run logic such as updating the world,
/// checking for collisions, gathering input, and playing audio.
/// </summary>
/// <param name="gameTime">Provides a snapshot of timing values.</param>
protected override void Update(GameTime gameTime)
{
// Allows the game to exit
if (GamePad.GetState(PlayerIndex.One).Buttons.Back == ButtonState.Pressed)
this.Exit();
float elapsedTime = (float)gameTime.ElapsedGameTime.TotalSeconds;
_angle += 0.5f * elapsedTime;
if (Mouse.GetState().LeftButton == ButtonState.Pressed)
{
_angle = 0.0f;
}
if (Keyboard.GetState().IsKeyDown(Keys.Left))
_position += new Vector2(-10, 0)*elapsedTime;
if (Keyboard.GetState().IsKeyDown(Keys.Right))
_position += new Vector2(10, 0) * elapsedTime;
base.Update(gameTime);
}
/// <summary>
/// This is called when the game should draw itself.
/// </summary>
/// <param name="gameTime">Provides a snapshot of timing values.</param>
protected override void Draw(GameTime gameTime)
{
GraphicsDevice.Clear(Color.CornflowerBlue);
// Setup the sprite batch matrix
// Notice that we first translate to the point or rotation
// then rotate and when we translate to the desired position we
// need to compensate for the first translation so that the texture
// appears at the correct location
Matrix m =
Matrix.CreateScale(1.5f)
* Matrix.CreateTranslation(-_rotationPoint.X, -_rotationPoint.Y, 0)
* Matrix.CreateRotationZ(_angle)
* Matrix.CreateTranslation(_position.X + _rotationPoint.X, _position.Y + _rotationPoint.Y, 0);
// Begin the SpriteBatch passing the matrix
spriteBatch.Begin(SpriteSortMode.Deferred, null, null, null, null, null, m);
_image1.Draw(spriteBatch);
_image2.Draw(spriteBatch);
spriteBatch.End();
base.Draw(gameTime);
}
class MyImage
{
Vector2 _drawOffset;
Vector2 _sourcePoint;
Texture2D _sourceTexture;
public MyImage(Texture2D sourceTexture, Vector2 sourcePoint, Vector2 drawOffset)
{
_drawOffset = drawOffset;
_sourcePoint = sourcePoint;
_sourceTexture = sourceTexture;
}
public void Draw(SpriteBatch spriteBatch)
{
spriteBatch.Draw(_sourceTexture, _drawOffset,
new Rectangle((int)_sourcePoint.X, (int)_sourcePoint.Y, 64, 96),
Color.White);
}
}
}
}