I'm trying to build a 3D object model. But my code just has rendered a 3D model with a specific colour in the image.
How can I create a 3D object with 6 images for each surface like a Rubik cube?
This is my code, using Aspose 3D lib and C#:
private void Form1_Load(object sender, EventArgs e)
{
//Create a FBX file with embedded textures
Scene scene = new Scene();
scene.Open("BetterShirt.obj");
//Create an embedded texture
Texture tex = new Texture()
{
Content = CreateTextureContent(),
FileName = "face.png",
WrapModeU = Aspose.ThreeD.Shading.WrapMode.Wrap,
};
tex.SetProperty("TexProp", "value");
//create a material with custom property
//Aspose.ThreeD.Shading.
Material mat = scene.RootNode.ChildNodes[0].Material;
mat.SetTexture(Material.MapDiffuse, tex);
mat.SetProperty("MyProp", 1.0);
scene.RootNode.ChildNodes[0].Material = mat;
//save this to file
scene.Save("exported.obj", FileFormat.WavefrontOBJ);
}
private static byte[] CreateTextureContent()
{
using (var bitmap = new Bitmap(256, 256))
{
using (var g = Graphics.FromImage(bitmap))
{
g.Clear(Color.White);
LinearGradientBrush brush = new LinearGradientBrush(new Rectangle(0, 0, 128, 128),
Color.Moccasin, Color.Blue, 45);
using (var font = new Font(FontFamily.GenericSerif, 40))
{
g.DrawString("Aspose.3D", font, brush, Point.Empty);
}
}
using (var ms = new MemoryStream())
{
//bitmap.Save(ms, ImageFormat.Png);
return ms.ToArray();
}
}
}
build an 3D object model with 6 images
We have devised below code based on your requirements. Comments have also been added for your reference. Please try using it in your environment and then share your kind feedback with us.
private static void RubikCube()
{
Bitmap[] bitmaps = CreateRubikBitmaps();
Scene scene = new Scene();
//create a box and convert it to mesh, so we can manually specify the material per face
var box = (new Box()).ToMesh();
//create a material mapping, the box mesh generated from Box primitive class contains 6 polygons, then we can reference the material of polygon(specified by MappingMode.Polygon) by index(ReferenceMode.Index)
var materials = (VertexElementMaterial)box.CreateElement(VertexElementType.Material, MappingMode.Polygon, ReferenceMode.Index);
//and each polygon uses different materials, the indices of these materials are specified below
materials.SetIndices(new int[] {0, 1, 2, 3, 4, 5});
//create the node and materials(referenced above)
var boxNode = scene.RootNode.CreateChildNode(box);
for (int i = 0; i < bitmaps.Length; i++)
{
//create material with texture
var material = new LambertMaterial();
var tex = new Texture();
using (var ms = new MemoryStream())
{
bitmaps[i].Save(ms, ImageFormat.Png);
var bytes = ms.ToArray();
//Save it to Texture.Content as embedded texture, thus the scene with textures can be exported into a single FBX file.
tex.Content = bytes;
//Give it a name and save it to disk so it can be opened with .obj file
tex.FileName = string.Format("cube_{0}.png", i);
File.WriteAllBytes(tex.FileName, bytes);
//Dispose the bitmap since we're no longer need it.
bitmaps[i].Dispose();
}
//the texture is used as diffuse
material.SetTexture(Material.MapDiffuse, tex);
//attach it to the node where contains the box mesh
boxNode.Materials.Add(material);
}
//save it to file
//3D viewer of Windows 10 does not support multiple materials, you'll see same textures in each face, but the tools from Autodesk does
scene.Save("test.fbx", FileFormat.FBX7500ASCII);
//NOTE: Multiple materials of mesh in Aspose.3D's OBJ Exporter is not supported yet.
//But we can split the mesh with multiple materials into different meshes by using PolygonModifier.SplitMesh
PolygonModifier.SplitMesh(scene, SplitMeshPolicy.CloneData);
//following code will also generate a material library file(test.mtl) which uses the textures exported in above code.
scene.Save("test.obj", FileFormat.WavefrontOBJ);
}
private static Bitmap[] CreateRubikBitmaps()
{
Brush[] colors = { Brushes.White, Brushes.Red, Brushes.Blue, Brushes.Yellow, Brushes.Orange, Brushes.Green};
Bitmap[] bitmaps = new Bitmap[6];
//initialize the cell colors
int[] cells = new int[6 * 9];
for (int i = 0; i < cells.Length; i++)
{
cells[i] = i / 9;
}
//shuffle the cells
Random random = new Random();
Array.Sort(cells, (a, b) => random.Next(-1, 2));
//paint each face
// size of each face is 256px
const int size = 256;
// size of cell is 80x80
const int cellSize = 80;
// calculate padding size between each cell
const int paddingSize = (size - cellSize * 3) / 4;
int cellId = 0;
for (int i = 0; i < 6; i++)
{
bitmaps[i] = new Bitmap(size, size, System.Drawing.Imaging.PixelFormat.Format32bppRgb);
using (Graphics g = Graphics.FromImage(bitmaps[i]))
{
g.Clear(Color.Black);
for (int j = 0; j < 9; j++)
{
//calculate the cell's position
int row = j / 3;
int column = j % 3;
int y = row * (cellSize + paddingSize) + paddingSize;
int x = column * (cellSize + paddingSize) + paddingSize;
Brush cellBrush = colors[cells[cellId++]];
//paint cell
g.FillRectangle(cellBrush, x, y, cellSize, cellSize);
}
}
}
return bitmaps;
}
PS: I work with Aspose as Developer Evangelist.
Related
I have created a program to draw square grids on a selected image. It works fine for images that has small resolution, but it doesn't work properly on large images.
The all grid lines are not visible seem when the image is saved as file.
The image I am testing has resolution 3600x4320 and can be shown in the link.
How can I fix this problem?
My code:
Image drawGrid(int n, string imgPath)
{
Image img = Image.FromFile(imgPath);
Graphics grp = Graphics.FromImage(img);
grp.SmoothingMode = System.Drawing.Drawing2D.SmoothingMode.HighQuality;
float m = img.Width * 1f / n;
for (int i = 1; i < n; i++)
{
var x = new PointF(i * m, 0);
var y = new PointF(i * m, img.Height);
grp.DrawLine(Pens.Red, x, y);
}
for (int i = 1; i <= (int)(this.Height / m); i++)
{
var x = new PointF(0, i * m);
var y = new PointF(img.Width, i * m);
grp.DrawLine(new Pen(Color.Red, 5f), x, y);
}
return img;
}
void BtnExportClick(object sender, EventArgs e)
{
if(saveFileDialog1.ShowDialog() == DialogResult.OK)
{
int n = (int)numericUpDown1.Value;
drawGrid(n, txtImagePath.Text).Save(saveFileDialog1.FileName, System.Drawing.Imaging.ImageFormat.Jpeg);
MessageBox.Show("Done");
}
}
The result image is below (resolution reduced to upload)
The grid lines not shown correctly.
The major problem is in this line:
for (int i = 1; i <= (int)(this.Height / m); i++)
▶ this.Height is clearly not what you wanted to write, let's replace it with the Image.Height
▶ grp.DrawLine(Pens.Red, x, y); and grp.DrawLine(new Pen(Color.Red, 5f), x, y); will draw lines of different size (1 and 5 pixels). In the sample code, the two methods accept Color and float arguments that define the Pen color and size.
▶ grp.SmoothingMode: we don't want any smoothing mode here, not needed to draw straight lines and it will add anti-alias which will be clearly visible, especially when saving the Image in JPEG format (it will anti-alias - sort of, it actually mangles the colors - these lines by itself).
▶ You're not disposing of any of the Graphics object you create. This is quite important with both frequent graphics operations and when working with large Bitmaps.
The Pen and Graphics objects needs to be disposed.
Since it's not exactly clear if you want to generate a grid that has the same number of lines in both dimensions - hence, a grid with Cells in which the Width is not equal to the Height, most probably - or a grid with squared Cells (this is what the sample Image seems to show, not the code), I posted two method that draw both grid types:
First method, same number of lines in both width and height:
var gridSizeX = (float)image.Width / lines;
var gridSizeY = (float)image.Height / lines;
private Image DrawGridLines(int lines, string imgPath, Color penColor, float penSize)
{
var image = Image.FromStream(new MemoryStream(File.ReadAllBytes(imgPath)), true);
using (var g = Graphics.FromImage(image)) {
g.PixelOffsetMode = PixelOffsetMode.Half;
var gridSizeX = (float)image.Width / lines;
var gridSizeY = (float)image.Height / lines;
for (int i = 1; i < lines; i++) {
var pointX1 = new PointF(0, i * gridSizeY);
var pointX2 = new PointF(image.Width, i * gridSizeY);
var pointY1 = new PointF(i * gridSizeX, 0);
var pointY2 = new PointF(i * gridSizeX, image.Height);
using (var pen = new Pen(penColor, penSize)) {
g.DrawLine(pen, pointX1, pointX2);
g.DrawLine(pen, pointY1, pointY2);
}
}
return image;
}
}
Second method, drawing a squared grid. The integer value, gridSection, is used to define a grid Cell based on the minimum dimension of the Bitmap.
This dimension is then used to determine how many lines to draw in the other dimension.
The grid size is calculated on the minimum dimension:
var gridSize = (float)Math.Min(image.Width, image.Height) / gridSection;
And the Cell are determined as a consequence:
var gridStepMin = Math.Min(image.Width, image.Height) / gridSize;
var gridStepMax = Math.Max(image.Width, image.Height) / gridSize;
private Image DrawSquaredGrid(int gridSection, string imgPath, Color penColor, float penSize)
{
var image = Image.FromStream(new MemoryStream(File.ReadAllBytes(imgPath)), true);
using (var g = Graphics.FromImage(image)) {
g.PixelOffsetMode = PixelOffsetMode.Half;
var gridSize = (float)Math.Min(image.Width, image.Height) / gridSection;
var gridStepMin = Math.Min(image.Width, image.Height) / gridSize;
var gridStepMax = Math.Max(image.Width, image.Height) / gridSize;
for (int i = 1; i < gridStepMin; i++) {
var pointY1 = new PointF(i * gridSize, 0);
var pointY2 = new PointF(i * gridSize, image.Height);
using (var pen = new Pen(penColor, penSize)) {
g.DrawLine(pen, pointY1, pointY2);
}
}
for (int i = 1; i < gridStepMax; i++) {
var pointX1 = new PointF(0, i * gridSize);
var pointX2 = new PointF(image.Width, i * gridSize);
using (var pen = new Pen(penColor, penSize)) {
g.DrawLine(pen, pointX1, pointX2);
}
}
return image;
}
}
The SaveFileDialog is refactored to allow multiple Image formats. and to call one of the drawing methods based on a selection (in the sample code, a CheckBox (chkSquared) is used select one of the Grid types).
You can add more formats, the ImageFormatFromFileName() methods selects the ImageFormat type based on the SaveFileDialog.FielName extension.
private void BtnExportClick(object sender, EventArgs e)
{
string imagePath = [Some Path];
using (var sfd = new SaveFileDialog()) {
sfd.Filter = "PNG Image (*.png)|*.png|TIFF Image (*.tif)|*.tif|JPEG Image (*.jpg)|*.jpg";
sfd.RestoreDirectory = true;
sfd.AddExtension = true;
if (sfd.ShowDialog() == DialogResult.OK) {
Image image = null;
if (chkSquared.Checked) {
image = DrawSquaredGrid((int)numericUpDown1.Value, imagePath, Color.Red, 5.0f);
}
else {
image = DrawGridLines((int)numericUpDown1.Value, imagePath, Color.Red, 5.0f);
}
image.Save(sfd.FileName, ImageFormatFromFileName(sfd.FileName));
MessageBox.Show("Done");
image.Dispose();
}
}
}
private ImageFormat ImageFormatFromFileName(string fileName)
{
string fileType = Path.GetExtension(fileName).Remove(0, 1);
if (fileType.Equals("tif")) fileType = "tiff";
if (fileType.Equals("jpg")) fileType = "jpeg";
return (ImageFormat)new ImageFormatConverter().ConvertFromString(fileType);
}
I'm generating a barcode depending on how many inputs that the user set in the numericUpDown control. The problem is when generating a lot of barcodes, the other barcodes cannot be seen in the printpreviewdialog because it I cannot apply a nextline or \n every 4-5 Images.
int x = 0, y = 10;
for (int i = 1; i <= int.Parse(txtCount.Text); i++)
{
idcount++;
connection.Close();
Zen.Barcode.Code128BarcodeDraw barcode = Zen.Barcode.BarcodeDrawFactory.Code128WithChecksum;
Random random = new Random();
string randomtext = "MLQ-";
int j;
for (j = 1; j <= 6; j++)
{
randomtext += random.Next(0, 9).ToString();
Image barcodeimg = barcode.Draw(randomtext, 50);
resultimage = new Bitmap(barcodeimg.Width, barcodeimg.Height + 20);
using (var graphics = Graphics.FromImage(resultimage))
using (var font = new Font("Arial", 11)) // Any font you want
using (var brush = new SolidBrush(Color.Black))
using (var format = new StringFormat() { Alignment = StringAlignment.Center, LineAlignment = StringAlignment.Far}) // Also, horizontally centered text, as in your example of the expected output
{
graphics.Clear(Color.White);
graphics.DrawImage(barcodeimg, 0, 0);
graphics.DrawString(randomtext, font, brush, resultimage.Width / 2, resultimage.Height, format);
}
x += 25;
}
e.Graphics.DrawImage(resultimage, x, y);
}
There's no "new lines" in rasterized graphics. There's pixels. You've got the right idea, every n number of images, add a new line. But since you're working with pixels, let's say every 4 images you're going to need to add a vertical offset by modifying the y coordinate of all your graphics draw calls. This offset, combined with a row height in pixels could look something like this:
var rowHeight = 250; // pixels
var maxColumns = 4;
var verticalOffset = (i % maxColums) * rowHeight;
Then, when you can supply a y coordinate, starting at or near 0, add the vertical offset to it.
I have another problem with XNA. I have a Texture2D containing many tiles. Then i have a list of rectangles which are supposed to be the bounds of each 50x50 pixels tile.
Now I would like to be able to save any tile as a small PNG-file.
Here is my code:
//declare the rectangles and spritesets
Texture2D tileSheet;
List<Rectangle> tileSet;
//load every tile, also works great
tileSheet = Content.Load<Texture2D>(#"Tiles/Object");
int noOfTilesX = (int)tileSheet.Width / 50;
int noOfTilesY = (int)tileSheet.Height / 50;
tileSet = new List<Rectangle>(noOfTilesX * noOfTilesY);
for (int j = 0; j < noOfTilesY; j++)
{
for (int i = 0; i < noOfTilesX; i++)
{
bounds = new Rectangle(i * 50, j * 50, 50, 50);
tileSet.Add(bounds);
}
}
//save as pngs if they do not exist
if (!File.Exists(#"C:\Test.png"))
{
Stream stream = File.Create(#"C:\Test.png");
//works, but it is only the complete file, i'd like to save a single tile
tileSheet.SaveAsPng(stream, tileSheet.Width, tileSheet.Height);
}
The problem is: I cannot save a single tile, it always saves my whole Texture2D and stretches it to the resolution i pass in the parameters. Searching in the internet didn't tell me anything, it just told me how to use the stream for a complete texture2D.
I just answered a question where someone wanted a new texture by taking a part of a larger texture, much like your tile-problem.
You can use this extension method and then save the result like you would do normally:
public static class TextureExtension
{
/// <summary>
/// Creates a new texture from an area of the texture.
/// </summary>
/// <param name="graphics">The current GraphicsDevice</param>
/// <param name="rect">The dimension you want to have</param>
/// <returns>The partial Texture.</returns>
public static Texture2D CreateTexture(this Texture2D src, GraphicsDevice graphics, Rectangle rect)
{
Texture2D tex = new Texture2D(graphics, rect.Width, rect.Height);
int count = rect.Width * rect.Height;
Color[] data = new Color[count];
src.GetData(0, rect, data, 0, count);
tex.SetData(data);
return tex;
}
}
Suggested usage:
using (FileStream stream = File.OpenWrite("path"))
{
tileTexture.CreateTexture(GraphicsDevice, new Rectangle(50, 50, 100, 100)).SaveAsJpeg(stream, 100, 100);
}
Try something like this. I haven't tested it so there might be some errors, but you just need to create a separate texture for each tile and then save each of them.
//declare the rectangles and spritesets
Texture2D tileSheet;
List<Rectangle> tileSet;
//load every tile, also works great
tileSheet = Content.Load<Texture2D>(#"Tiles/Object");
int noOfTilesX = (int)tileSheet.Width / 50;
int noOfTilesY = (int)tileSheet.Height / 50;
tileSet = new List<Rectangle>(noOfTilesX * noOfTilesY);
// Gets the color data of the tile sheet.
Color[] tileSheetPixels = new Color[tileSheet.Width * tileSheet.Height];
tileSheetPixels.GetData<Color>(tileSheetPixels);
for (int j = 0; j < noOfTilesY; j++)
{
for (int i = 0; i < noOfTilesX; i++)
{
bounds = new Rectangle(i * 50, j * 50, 50, 50);
tileSet.Add(bounds);
// Creates a new texture for a single tile.
Texture2D singleTile = new Texture2D(graphics, 50, 50);
Color[] pixels = new Color[50 * 50];
// Gets the pixels that correspond to the single tile and saves them in another
// color array.
for (int k = 0; k < pixels.Length; k++)
{
pixels[k] = new Color();
int x = bounds.X;
x += (k % 50);
int y = bounds.Y;
y += (k / 50);
pixels[k] = tileSheetPixels[y * 50 + x];
}
// Sets the color data of the single tile texture to the color array
// created above.
singleTile.SetData<Color>(pixels);
//save as pngs if they do not exist
if (!File.Exists(#"C:\tile_" + i + "_" + j + ".png"))
{
Stream stream = File.Create(#"C:\tile_" + i + "_" + j + ".png");
singleTile.SaveAsPng(stream, 50, 50);
}
}
}
I want to change the offset (2) of terrain texture through code.
I have added a road image as a texture on the terrain.
I've found related code online, but I am unable to figure out the role of renderer in this case.
More than code, I just want to know the first step that should be taken in order to change texture through code. (Settings basically).
And please mention the role of renderer.
In Unity Terrains textures are handled by the SplatPrototype class. See documentation
A Splat prototype is just a texture that is used by the TerrainData.
So if you want to change the Terrain's Texture you have to create a new SplatPrototype and set it to the splatPrototype variable of TerrainData.
There you can set the values of metallic, normalMap, smoothness, texture, tileSize and tileOffset of your choice.
You can use the following method:
private void SetTerrainSplatMap(Terrain terrain, Texture2D[] textures)
{
var terrainData = terrain.terrainData;
// The Splat map (Textures)
SplatPrototype[] splatPrototype = new SplatPrototype[terrainData.splatPrototypes.Length];
for (int i = 0; i < terrainData.splatPrototypes.Length; i++)
{
splatPrototype[i] = new SplatPrototype();
splatPrototype[i].texture = textures[i]; //Sets the texture
splatPrototype[i].tileSize = new Vector2(terrainData.splatPrototypes[i].tileSize.x, terrainData.splatPrototypes[i].tileSize.y); //Sets the size of the texture
splatPrototype[i].tileOffset = new Vector2(terrainData.splatPrototypes[i].tileOffset.x, terrainData.splatPrototypes[i].tileOffset.y); //Sets the size of the texture
}
terrainData.splatPrototypes = splatPrototype;
}
THIS WOKED FOR ME
splat[i].tileOffset = new Vector2(tar.splatPrototypes[i].tileOffset.x, tar.splatPrototypes[i].tileOffset.y+5f);
Splat Prototypes is Deprecated. I used TerrainLayers instead to edit the tiling size of the texture.
float[,,] splatMapData = terrain.terrainData.GetAlphamaps(0, 0, 100, 100);
for (int i = 26; i < 100; i++)
{
for (int j=0; j < 100; j++)
{
splatMapData[i, j, 0] = 0;
splatMapData[i, j, 1] = 0;
splatMapData[i, j, 2] = 1;
}
}
TerrainLayer[] layers = terrain.terrainData.terrainLayers;
layers[2].tileSize = new Vector2(100, 100);
terrain.terrainData.SetAlphamaps(0, 0, splatMapData);
terrain.Flush();
I am trying to use the Clipper library to modify a graphics path.
I have list of widths that represent outlines / strokes. I want to start with the largest first and work my way down to the smallest.
For this example, we will add 2 strokes with widths of 20 and 10.
I want to take take my graphics path, and expand / offset it by 20 pixels into a new graphics path. I do not want to alter the original path. Then I want to fill the new graphics path with a solid color.
Next, I want to take my original graphics path, and expand / offset it by 10 pixels into a new graphics path. I want to fill this new path with a different color.
Then I want to fill my original path with a different color.
What is the proper way to do this. I have the following method that I created to try and do this, but it is not working properly.
private void createImage(Graphics g, GraphicsPath gp, List<int> strokeWidths)
{
ClipperOffset pathConverter = new ClipperOffset();
Clipper c = new Clipper();
gp.Flatten();
foreach(int strokeSize in strokeWidths)
{
g.clear();
ClipperPolygons polyList = new ClipperPolygons();
GraphicsPath gpTest = (GraphicsPath)gp.Clone();
PathToPolygon(gpTest, polyList, 100);
gpTest.Reset();
c.Execute(ClipType.ctUnion, polyList, PolyFillType.pftPositive, PolyFillType.pftEvenOdd);
pathConverter.AddPaths(polyList, JoinType.jtMiter, EndType.etClosedPolygon);
pathConverter.Execute(ref polyList, strokeSize * 100);
for (int i = 0; i < polyList.Count; i++)
{
// reverses scaling
PointF[] pts2 = PolygonToPointFArray(polyList[i], 100);
gpTest.AddPolygon(pts2);
}
g.FillPath(new SolidBrush(Color.Red), gpTest);
}
}
private void PathToPolygon(GraphicsPath path, ClipperPolygons polys, Single scale)
{
GraphicsPathIterator pathIterator = new GraphicsPathIterator(path);
pathIterator.Rewind();
polys.Clear();
PointF[] points = new PointF[pathIterator.Count];
byte[] types = new byte[pathIterator.Count];
pathIterator.Enumerate(ref points, ref types);
int i = 0;
while (i < pathIterator.Count)
{
ClipperPolygon pg = new ClipperPolygon();
polys.Add(pg);
do
{
IntPoint pt = new IntPoint((int)(points[i].X * scale), (int)(points[i].Y * scale));
pg.Add(pt);
i++;
}
while (i < pathIterator.Count && types[i] != 0);
}
}
private PointF[] PolygonToPointFArray(ClipperPolygon pg, float scale)
{
PointF[] result = new PointF[pg.Count];
for (int i = 0; i < pg.Count; ++i)
{
result[i].X = (float)pg[i].X / scale;
result[i].Y = (float)pg[i].Y / scale;
}
return result;
}
While you've made a pretty reasonable start, you seem to be getting muddled in your createImage() function. You mention wanting different colors with the different offsets and so you're missing a colors array to match your strokeWidths array. Also, it's unclear to me what you're doing with the clipping (union) stuff, but it's probably unnecessary.
So in pseudo-code I suggest something like the following ....
static bool CreateImage(Graphics g, GraphicsPath gp,
List<int> offsets, List<Color> colors)
{
const scale = 100;
if (colors.Count < offsets.Count) return false;
//convert GraphicsPath path to Clipper paths ...
Clipper.Paths cpaths = GPathToCPaths(gp.Flatten(), scale);
//setup the ClipperOffset object ...
ClipperOffset co = new ClipperOffsets();
co.AddPaths(cpaths, JoinType.jtMiter, EndType.etClosedPolygon);
//now loop through each offset ...
foreach(offset in offsets, color in colors)
{
Clipper.Paths csolution = new Clipper.Paths();
co.Execute(csolution, offset);
if (csolution.IsEmpty) break; //useful for negative offsets
//now convert back to floating point coordinate array ...
PointF[] solution = CPathToPointFArray(csolution, scale);
DrawMyPaths(Graphics g, solution, color);
}
}
And something to watch for if you were to use increasingly larger offsets, each polygon drawn in the 'foreach' loop would hide previously drawn polygons.