|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178 |
- //using UnityEngine;
- //using OpenCvSharp;
- //using System.Threading.Tasks;
-
-
- //public class CannyTest : MonoBehaviour
- //{
-
- // // Video parameters
- // public MeshRenderer CameraRenderer;
- // public MeshRenderer ProcessedImageRenderer;
- // public Camera textureCamera;
-
- // // Video size
- // private const int imWidth = 1280; //TODO: Set width and height based on agent observation size
- // private const int imHeight = 720;
- // private int imFrameRate;
-
- // // OpenCVSharp parameters
- // private Mat videoSourceImage;
- // private Mat cannyImage;
- // private Texture2D processedTexture;
- // private Vec3b[] videoSourceImageData;
- // private byte[] cannyImageData;
-
- // // Frame rate parameter
- // private int updateFrameCount = 0;
- // private int textureCount = 0;
- // private int displayCount = 0;
-
- // void Start()
- // {
- // // assign the camera texture to the meshrenderer
- // CameraRenderer.material.mainTexture = textureCamera.targetTexture;
-
- // // initialize video / image with given size
- // videoSourceImage = new Mat(imHeight, imWidth, MatType.CV_8UC3);
- // videoSourceImageData = new Vec3b[imHeight * imWidth];
- // cannyImage = new Mat(imHeight, imWidth, MatType.CV_8UC1);
- // cannyImageData = new byte[imHeight * imWidth];
-
- // // create processed video texture as Texture2D object
- // processedTexture = new Texture2D(imWidth, imHeight, TextureFormat.RGBA32, true, true);
-
- // // assign the processedTexture to the meshrenderer for display
- // ProcessedImageRenderer.material.mainTexture = processedTexture;
- // }
-
-
-
- // void Update()
- // {
-
- // updateFrameCount++;
-
-
-
- // if (textureCamera.targetTexture.didUpdateThisFrame)
- // {
-
- // textureCount++;
-
- // // convert texture of original video to OpenCVSharp Mat object
- // TextureToMat();
- // // update the opencv window of source video
- // UpdateWindow(videoSourceImage);
- // // create the canny edge image out of source image
- // ProcessImage(videoSourceImage);
- // // convert the OpenCVSharp Mat of canny image to Texture2D
- // // the texture will be displayed automatically
- // MatToTexture();
-
- // }
-
- // }
- // else
- // {
- // Debug.Log("Can't find camera!");
- // }
-
-
- // // output frame rate information
- // if (updateFrameCount % 30 == 0)
- // {
- // Debug.Log("Frame count: " + updateFrameCount + ", Texture count: " + textureCount + ", Display count: " + displayCount);
- // }
-
-
- // }
-
-
- // // Convert Unity Texture2D object to OpenCVSharp Mat object
- // void TextureToMat()
- //{
- // // Color32 array : r, g, b, a
- // Color32[] c = _webcamTexture.GetPixels32();
-
- // // Parallel for loop
- // // convert Color32 object to Vec3b object
- // // Vec3b is the representation of pixel for Mat
- // Parallel.For(0, imHeight, i =>
- // {
- // for (var j = 0; j < imWidth; j++)
- // {
- // var col = c[j + i * imWidth];
- // var vec3 = new Vec3b
- // {
- // Item0 = col.b,
- // Item1 = col.g,
- // Item2 = col.r
- // };
- // // set pixel to an array
- // videoSourceImageData[j + i * imWidth] = vec3;
- // }
- // });
- // // assign the Vec3b array to Mat
- // videoSourceImage.SetArray(0, 0, videoSourceImageData);
- //}
-
-
-
- //// Convert OpenCVSharp Mat object to Unity Texture2D object
- //void MatToTexture()
- //{
- // // cannyImageData is byte array, because canny image is grayscale
- // cannyImage.GetArray(0, 0, cannyImageData);
- // // create Color32 array that can be assigned to Texture2D directly
- // Color32[] c = new Color32[imHeight * imWidth];
-
- // // parallel for loop
- // Parallel.For(0, imHeight, i =>
- // {
- // for (var j = 0; j < imWidth; j++)
- // {
- // byte vec = cannyImageData[j + i * imWidth];
- // var color32 = new Color32
- // {
- // r = vec,
- // g = vec,
- // b = vec,
- // a = 0
- // };
- // c[j + i * imWidth] = color32;
- // }
- // });
-
- // processedTexture.SetPixels32(c);
- // // to update the texture, OpenGL manner
- // processedTexture.Apply();
- //}
-
-
-
- //// Simple example of canny edge detect
- //void ProcessImage(Mat _image)
- //{
- // Cv2.Flip(_image, _image, FlipMode.X);
- // Cv2.Canny(_image, cannyImage, 100, 100);
- //}
-
-
- //// Display the original video in a opencv window
- //void UpdateWindow(Mat _image)
- //{
- // Cv2.Flip(_image, _image, FlipMode.X);
- // Cv2.ImShow("Copy video", _image);
- // displayCount++;
- //}
-
- //// close the opencv window
- //public void OnDestroy()
- //{
- // Cv2.DestroyAllWindows();
-
- //}
-
-
- //}
|