[Please NOTE: This article is OBSOLETE. It has been re-written and completed in this newer posts: part 1 and part 2]
Put in the Mesh class all the info wee need
Very much like we did in the Custom Content Processing post, we are going to use the “Tag” property of a mesh to store what we want, which in this case is something like this:
public class MeshData
{
public VertexPositionNormalTexture[] Vertices;
public int[] Indices;
public Vector3[] FaceNormals;
public MeshData(VertexPositionNormalTexture[] Vertices,
int[] Indices,
Vector3[] pFaceNormals)
{
this.Vertices = Vertices;
this.Indices = Indices;
this.FaceNormals = pFaceNormals;
}
}
When VisualStudio passes every model through our ContentProcessor, it will write the model´s data to an XNB file. When it finds a MeshData object, will search for a writer that is able to serialize it, so we have to write our custom ContentTypeWriter for the MeshData class:
[ContentTypeWriter]
public class ModelVertexDataWriter : ContentTypeWriter<MeshData>
{
protected override void Write(
ContentWriter output, MeshData value)
{
output.Write((int)value.Vertices.Length);
for (int x = 0; x < value.Vertices.Length; x++)
{
output.Write(value.Vertices[x].Position);
output.Write(value.Vertices[x].Normal);
output.Write(value.Vertices[x].TextureCoordinate);
}
output.Write(value.Indices.Length);
for (int x = 0; x < value.Indices.Length; x++)
output.Write(value.Indices[x]);
output.Write(value.FaceNormals.Length);
for (int x = 0; x < value.FaceNormals.Length; x++)
output.Write(value.FaceNormals[x]);
}
public override string GetRuntimeType(
TargetPlatform targetPlatform)
{
return typeof(MeshData).AssemblyQualifiedName;
}
public override string GetRuntimeReader(
TargetPlatform targetPlatform)
{
return "ContentProcessors.ModelVertexDataReader, ContentProcessors, Version=1.0.0.0, Culture=neutral";
}
}
In a similar way, when the ContentPipeline tries to read back the XNB file, it will search for a deserializer for the type MeshData, so we have to write our own ContentTypeReader:
public class ModelVertexDataReader : ContentTypeReader<MeshData>
{
protected override MeshData Read(
ContentReader input, MeshData existingInstance)
{
int i;
i = input.ReadInt32();
VertexPositionNormalTexture[] vb = new VertexPositionNormalTexture[i];
for (int x = 0; x < i; x++)
{
vb[x].Position = input.ReadVector3();
vb[x].Normal = input.ReadVector3();
vb[x].TextureCoordinate = input.ReadVector2();
}
i = input.ReadInt32();
int[] ib = new int[i];
for (int x = 0; x < i; x++)
ib[x] = input.ReadInt32();
i = input.ReadInt32();
Vector3[] normals = new Vector3[i];
for (int x = 0; x < i; x++)
normals[x] = input.ReadVector3();
return new MeshData(vb, ib, normals);
}
}
Finally, our Custom Content Processor that fills up the MeshData objects for each model goes through it:
Note: some parts taken from ZiggyWare:
http://www.ziggyware.com/readarticle.php?article_id=74
[ContentProcessor(DisplayName = "Custom Mesh Processor")]
public class PositionNormalTexture : ModelProcessor
{
public override ModelContent Process(
NodeContent input, ContentProcessorContext context)
{
ModelContent model = base.Process(input, context);
foreach (ModelMeshContent mesh in model.Meshes)
{
// Put the data in the tag.
VertexPositionNormalTexture[] vb;
MemoryStream ms =
new MemoryStream(mesh.VertexBuffer.VertexData);
BinaryReader reader = new BinaryReader(ms);
VertexElement[] elems = mesh.MeshParts[0].GetVertexDeclaration();
int num = mesh.VertexBuffer.VertexData.Length /
VertexDeclaration.GetVertexStrideSize(elems, 0);
vb = new VertexPositionNormalTexture[num];
for (int i = 0; i < num; i++)
{
foreach (VertexElement e in elems)
{
switch (e.VertexElementUsage)
{
case VertexElementUsage.Position:
{
vb[i].Position.X =
reader.ReadSingle();
vb[i].Position.Y =
reader.ReadSingle();
vb[i].Position.Z =
reader.ReadSingle();
}
break;
case VertexElementUsage.Normal:
{
vb[i].Normal.X =
reader.ReadSingle();
vb[i].Normal.Y =
reader.ReadSingle();
vb[i].Normal.Z =
reader.ReadSingle();
}
break;
case
VertexElementUsage.TextureCoordinate:
{
if (e.UsageIndex != 0)
continue;
vb[i].TextureCoordinate.X =
reader.ReadSingle();
vb[i].TextureCoordinate.Y =
reader.ReadSingle();
}
break;
default:
{
Console.WriteLine(e.VertexElementFormat.ToString());
switch (e.VertexElementFormat)
{
case VertexElementFormat.Color:
{
reader.ReadUInt32();
}
break;
case VertexElementFormat.Vector3:
{
reader.ReadSingle();
reader.ReadSingle();
reader.ReadSingle();
}
break;
case VertexElementFormat.Vector2:
{
reader.ReadSingle();
reader.ReadSingle();
}
break;
}
}
break;
}
}
} // for i < num
reader.Close();
int[] ib = new int[mesh.IndexBuffer.Count];
mesh.IndexBuffer.CopyTo(ib, 0);
Vector3[] normals = new Vector3[mesh.IndexBuffer.Count / 3];
for (int i = 0, conta = 0; i < mesh.IndexBuffer.Count; i += 3, conta++)
{
Vector3 v0 = vb[mesh.IndexBuffer[i]].Position;
Vector3 v1 = vb[mesh.IndexBuffer[i+1]].Position;
Vector3 v2 = vb[mesh.IndexBuffer[i+2]].Position;
Vector3 edge1 = v1 - v0;
Vector3 edge2 = v2 - v0;
Vector3 normal = Vector3.Cross(edge1, edge2);
normal.Normalize();
normals[conta] = normal;
}
mesh.Tag = new MeshData(vb, ib, normals);
} // foreach mesh
return model;
}
}
In the next chapter will focus on the Collision Detection implementation using all this info. Cheers !