diff --git a/.gitignore b/.gitignore index 0210746..05fc76d 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,4 @@ sysinfo.txt # Builds *.apk *.unitypackage +envs/ diff --git a/Assets/Brains/CozmoLearning.asset b/Assets/Brains/CozmoLearning.asset index b50a161..5b0060c 100644 --- a/Assets/Brains/CozmoLearning.asset +++ b/Assets/Brains/CozmoLearning.asset @@ -15,13 +15,14 @@ MonoBehaviour: brainParameters: vectorObservationSize: 0 numStackedVectorObservations: 1 - vectorActionSize: 01000000 + vectorActionSize: 0300000003000000 cameraResolutions: - width: 84 height: 84 blackAndWhite: 1 vectorActionDescriptions: - - + - forward + - rotate vectorActionSpaceType: 0 model: {fileID: 0} inferenceDevice: 0 diff --git a/Assets/Scenes/CozmoTraining.unity b/Assets/Scenes/CozmoTraining.unity index 17a4150..3f45c43 100644 --- a/Assets/Scenes/CozmoTraining.unity +++ b/Assets/Scenes/CozmoTraining.unity @@ -130,7 +130,7 @@ GameObject: m_Icon: {fileID: 0} m_NavMeshLayer: 0 m_StaticEditorFlags: 0 - m_IsActive: 0 + m_IsActive: 1 --- !u!65 &99578046 BoxCollider: m_ObjectHideFlags: 0 @@ -284,7 +284,7 @@ GameObject: - component: {fileID: 589453882} m_Layer: 0 m_Name: Camera - m_TagString: Untagged + m_TagString: MainCamera m_Icon: {fileID: 0} m_NavMeshLayer: 0 m_StaticEditorFlags: 0 @@ -377,7 +377,8 @@ MonoBehaviour: broadcastHub: broadcastingBrains: - {fileID: 11400000, guid: 94e62c225e4fe8148a7543c5fd1acfd4, type: 2} - _brainsToControl: [] + _brainsToControl: + - {fileID: 11400000, guid: 94e62c225e4fe8148a7543c5fd1acfd4, type: 2} maxSteps: 0 trainingConfiguration: width: 80 @@ -471,7 +472,7 @@ PrefabInstance: - target: {fileID: 7570006595835424293, guid: 0f97dac5215d69a4795763340d82925d, type: 3} propertyPath: m_LocalPosition.x - value: 0.5 + value: 0.332 objectReference: {fileID: 0} - target: {fileID: 7570006595835424293, guid: 0f97dac5215d69a4795763340d82925d, type: 3} @@ -481,7 +482,7 @@ PrefabInstance: - target: {fileID: 7570006595835424293, guid: 0f97dac5215d69a4795763340d82925d, type: 3} propertyPath: m_LocalPosition.z - value: 0.5 + value: -0.199 objectReference: {fileID: 0} - target: {fileID: 7570006595835424293, guid: 0f97dac5215d69a4795763340d82925d, type: 3} @@ -557,3 +558,46 @@ MonoBehaviour: resetOnDone: 1 onDemandDecision: 0 numberOfActionsBetweenDecisions: 1 +--- !u!65 &7570006596986120126 +BoxCollider: + m_ObjectHideFlags: 0 + m_CorrespondingSourceObject: {fileID: 0} + m_PrefabInstance: {fileID: 0} + m_PrefabAsset: {fileID: 0} + m_GameObject: {fileID: 7570006596986120123} + m_Material: {fileID: 0} + m_IsTrigger: 0 + m_Enabled: 1 + serializedVersion: 2 + m_Size: {x: 0.05, y: 0.07, z: 0.08} + m_Center: {x: 0, y: 0.035, z: 0} +--- !u!54 &7570006596986120127 +Rigidbody: + m_ObjectHideFlags: 0 + m_CorrespondingSourceObject: {fileID: 0} + m_PrefabInstance: {fileID: 0} + m_PrefabAsset: {fileID: 0} + m_GameObject: {fileID: 7570006596986120123} + serializedVersion: 2 + m_Mass: 1 + m_Drag: 0 + m_AngularDrag: 0.05 + m_UseGravity: 1 + m_IsKinematic: 0 + m_Interpolate: 0 + m_Constraints: 84 + m_CollisionDetection: 0 +--- !u!114 &7570006596986120128 +MonoBehaviour: + m_ObjectHideFlags: 0 + m_CorrespondingSourceObject: {fileID: 0} + m_PrefabInstance: {fileID: 0} + m_PrefabAsset: {fileID: 0} + m_GameObject: {fileID: 7570006596986120123} + m_Enabled: 1 + m_EditorHideFlags: 0 + m_Script: {fileID: 11500000, guid: d2056048a78a0cc4c97e7899ba1c0e31, type: 3} + m_Name: + m_EditorClassIdentifier: + m_Speed: 0.1 + m_TurnSpeed: 50 diff --git a/Assets/Scripts/Movemnet.meta b/Assets/Scripts/Movemnet.meta new file mode 100644 index 0000000..8bb21aa --- /dev/null +++ b/Assets/Scripts/Movemnet.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 8400d2753da4d924bbdb3537db7363ac +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/Assets/Scripts/Movemnet/CozmoMovement.cs b/Assets/Scripts/Movemnet/CozmoMovement.cs new file mode 100644 index 0000000..136c6ae --- /dev/null +++ b/Assets/Scripts/Movemnet/CozmoMovement.cs @@ -0,0 +1,90 @@ +using System.Collections; +using System.Collections.Generic; +using UnityEngine; + +public class CozmoMovement : MonoBehaviour +{ + + public float m_Speed = 12f; // How fast the tank moves forward and back. + public float m_TurnSpeed = 180f; // How fast the tank turns in degrees per second. + + + private string m_MovementAxisName; // The name of the input axis for moving forward and back. + private string m_TurnAxisName; // The name of the input axis for turning. + private Rigidbody m_Rigidbody; // Reference used to move the tank. + private float m_MovementInputValue; // The current value of the movement input. + private float m_TurnInputValue; // The current value of the turn input. + + + private void Awake() + { + m_Rigidbody = GetComponent(); + } + + + private void OnEnable() + { + // When the tank is turned on, make sure it's not kinematic. + m_Rigidbody.isKinematic = false; + + // Also reset the input values. + m_MovementInputValue = 0f; + m_TurnInputValue = 0f; + } + + + private void OnDisable() + { + // When the tank is turned off, set it to kinematic so it stops moving. + m_Rigidbody.isKinematic = true; + } + + + private void Start() + { + // The axes names are based on player number. + m_MovementAxisName = "Vertical"; + m_TurnAxisName = "Horizontal"; + + } + + + private void Update() + { + // Store the value of both input axes. + m_MovementInputValue = Input.GetAxis(m_MovementAxisName); + m_TurnInputValue = Input.GetAxis(m_TurnAxisName); + } + + + private void FixedUpdate() + { + // Adjust the rigidbodies position and orientation in FixedUpdate. + Move(); + Turn(); + } + + + private void Move() + { + // Create a vector in the direction the tank is facing with a magnitude based on the input, speed and the time between frames. + Vector3 movement = transform.forward * m_MovementInputValue * m_Speed * Time.deltaTime; + + // Apply this movement to the rigidbody's position. + m_Rigidbody.MovePosition(m_Rigidbody.position + movement); + } + + + private void Turn() + { + // Determine the number of degrees to be turned based on the input, speed and time between frames. + float turn = m_TurnInputValue * m_TurnSpeed * Time.deltaTime; + + // Make this into a rotation in the y axis. + Quaternion turnRotation = Quaternion.Euler(0f, turn, 0f); + + // Apply this rotation to the rigidbody's rotation. + m_Rigidbody.MoveRotation(m_Rigidbody.rotation * turnRotation); + } + +} diff --git a/Assets/Scripts/Movemnet/CozmoMovement.cs.meta b/Assets/Scripts/Movemnet/CozmoMovement.cs.meta new file mode 100644 index 0000000..e32bf7f --- /dev/null +++ b/Assets/Scripts/Movemnet/CozmoMovement.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: d2056048a78a0cc4c97e7899ba1c0e31 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/Notebooks/.ipynb_checkpoints/getting-started-checkpoint.ipynb b/Notebooks/.ipynb_checkpoints/getting-started-checkpoint.ipynb new file mode 100644 index 0000000..906f552 --- /dev/null +++ b/Notebooks/.ipynb_checkpoints/getting-started-checkpoint.ipynb @@ -0,0 +1,246 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Unity ML-Agents Toolkit\n", + "## Environment Basics\n", + "This notebook contains a walkthrough of the basic functions of the Python API for the Unity ML-Agents toolkit. For instructions on building a Unity environment, see [here](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Getting-Started-with-Balance-Ball.md)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. Set environment parameters\n", + "\n", + "Be sure to set `env_name` to the name of the Unity environment file you want to launch. Ensure that the environment build is in `../envs`." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "env_name = \"../envs/Bachelorarbeit-Cozmo\" # Name of the Unity environment binary to launch\n", + "train_mode = True # Whether to run the environment in training or inference mode" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Load dependencies\n", + "\n", + "The following loads the necessary dependencies and checks the Python version (at runtime). ML-Agents Toolkit (v0.3 onwards) requires Python 3." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Python version:\n", + "3.6.7 |Anaconda, Inc.| (default, Oct 28 2018, 19:44:12) [MSC v.1915 64 bit (AMD64)]\n" + ] + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import sys\n", + "\n", + "from mlagents.envs import UnityEnvironment\n", + "\n", + "%matplotlib inline\n", + "\n", + "print(\"Python version:\")\n", + "print(sys.version)\n", + "\n", + "# check Python version\n", + "if (sys.version_info[0] < 3):\n", + " raise Exception(\"ERROR: ML-Agents Toolkit (v0.3 onwards) requires Python 3\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Start the environment\n", + "`UnityEnvironment` launches and begins communication with the environment when instantiated.\n", + "\n", + "Environments contain _brains_ which are responsible for deciding the actions of their associated _agents_. Here we check for the first brain available, and set it as the default brain we will be controlling from Python." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env = UnityEnvironment(file_name=env_name)\n", + "\n", + "# Set the default brain to work with\n", + "default_brain = env.brain_names[0]\n", + "brain = env.brains[default_brain]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Examine the observation and state spaces\n", + "We can reset the environment to be provided with an initial set of observations and states for all the agents within the environment. In ML-Agents, _states_ refer to a vector of variables corresponding to relevant aspects of the environment for an agent. Likewise, _observations_ refer to a set of relevant pixel-wise visuals for an agent." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'env' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;31m# Reset the environment\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0menv_info\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0menv\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreset\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtrain_mode\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtrain_mode\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mdefault_brain\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;31m# Examine the state space for the default brain\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Agent state looks like: \\n{}\"\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0menv_info\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvector_observations\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mNameError\u001b[0m: name 'env' is not defined" + ] + } + ], + "source": [ + "# Reset the environment\n", + "env_info = env.reset(train_mode=train_mode)[default_brain]\n", + "\n", + "# Examine the state space for the default brain\n", + "print(\"Agent state looks like: \\n{}\".format(env_info.vector_observations[0]))\n", + "\n", + "# Examine the observation space for the default brain\n", + "for observation in env_info.visual_observations:\n", + " print(\"Agent observations look like:\")\n", + " if observation.shape[3] == 3:\n", + " plt.imshow(observation[0,:,:,:])\n", + " else:\n", + " plt.imshow(observation[0,:,:,0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5. Take random actions in the environment\n", + "Once we restart an environment, we can step the environment forward and provide actions to all of the agents within the environment. Here we simply choose random actions based on the `action_space_type` of the default brain. \n", + "\n", + "Once this cell is executed, 10 messages will be printed that detail how much reward will be accumulated for the next 10 episodes. The Unity environment will then pause, waiting for further signals telling it what to do next. Thus, not seeing any animation is expected when running this cell." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'env' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mepisode\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0menv_info\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0menv\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreset\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtrain_mode\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtrain_mode\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mdefault_brain\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[0mdone\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mFalse\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[0mepisode_rewards\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;32mwhile\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mdone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mNameError\u001b[0m: name 'env' is not defined" + ] + } + ], + "source": [ + "for episode in range(10):\n", + " env_info = env.reset(train_mode=train_mode)[default_brain]\n", + " done = False\n", + " episode_rewards = 0\n", + " while not done:\n", + " action_size = brain.vector_action_space_size\n", + " if brain.vector_action_space_type == 'continuous':\n", + " env_info = env.step(np.random.randn(len(env_info.agents), \n", + " action_size[0]))[default_brain]\n", + " else:\n", + " action = np.column_stack([np.random.randint(0, action_size[i], size=(len(env_info.agents))) for i in range(len(action_size))])\n", + " env_info = env.step(action)[default_brain]\n", + " episode_rewards += env_info.rewards[0]\n", + " done = env_info.local_done[0]\n", + " print(\"Total reward this episode: {}\".format(episode_rewards))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 6. Close the environment when finished\n", + "When we are finished using an environment, we can close it with the function below." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'env' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0menv\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[1;31mNameError\u001b[0m: name 'env' is not defined" + ] + } + ], + "source": [ + "env.close()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/Notebooks/UnitySDK.log b/Notebooks/UnitySDK.log new file mode 100644 index 0000000..2bcfe8e --- /dev/null +++ b/Notebooks/UnitySDK.log @@ -0,0 +1,2 @@ +17.04.2019 15:13:21 + diff --git a/Notebooks/getting-started.ipynb b/Notebooks/getting-started.ipynb new file mode 100644 index 0000000..983435c --- /dev/null +++ b/Notebooks/getting-started.ipynb @@ -0,0 +1,270 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Unity ML-Agents Toolkit\n", + "## Environment Basics\n", + "This notebook contains a walkthrough of the basic functions of the Python API for the Unity ML-Agents toolkit. For instructions on building a Unity environment, see [here](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Getting-Started-with-Balance-Ball.md)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. Set environment parameters\n", + "\n", + "Be sure to set `env_name` to the name of the Unity environment file you want to launch. Ensure that the environment build is in `../envs`." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "env_name = \"../envs/Bachelorarbeit-Cozmo\" # Name of the Unity environment binary to launch\n", + "train_mode = True # Whether to run the environment in training or inference mode" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Load dependencies\n", + "\n", + "The following loads the necessary dependencies and checks the Python version (at runtime). ML-Agents Toolkit (v0.3 onwards) requires Python 3." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Python version:\n", + "3.6.7 |Anaconda, Inc.| (default, Oct 28 2018, 19:44:12) [MSC v.1915 64 bit (AMD64)]\n" + ] + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import sys\n", + "\n", + "from mlagents.envs import UnityEnvironment\n", + "\n", + "%matplotlib inline\n", + "\n", + "print(\"Python version:\")\n", + "print(sys.version)\n", + "\n", + "# check Python version\n", + "if (sys.version_info[0] < 3):\n", + " raise Exception(\"ERROR: ML-Agents Toolkit (v0.3 onwards) requires Python 3\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Start the environment\n", + "`UnityEnvironment` launches and begins communication with the environment when instantiated.\n", + "\n", + "Environments contain _brains_ which are responsible for deciding the actions of their associated _agents_. Here we check for the first brain available, and set it as the default brain we will be controlling from Python." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:mlagents.envs:\n", + "'Academy' started successfully!\n", + "Unity Academy name: Academy\n", + " Number of Brains: 1\n", + " Number of Training Brains : 1\n", + " Reset Parameters :\n", + "\t\t\n", + "Unity brain name: CozmoLearning\n", + " Number of Visual Observations (per agent): 1\n", + " Vector Observation space size (per agent): 0\n", + " Number of stacked Vector Observation: 1\n", + " Vector Action space type: discrete\n", + " Vector Action space size (per agent): [3, 3, 3]\n", + " Vector Action descriptions: forward, left-right, rotate\n" + ] + } + ], + "source": [ + "env = UnityEnvironment(file_name=env_name)\n", + "\n", + "# Set the default brain to work with\n", + "default_brain = env.brain_names[0]\n", + "brain = env.brains[default_brain]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Examine the observation and state spaces\n", + "We can reset the environment to be provided with an initial set of observations and states for all the agents within the environment. In ML-Agents, _states_ refer to a vector of variables corresponding to relevant aspects of the environment for an agent. Likewise, _observations_ refer to a set of relevant pixel-wise visuals for an agent." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Agent state looks like: \n", + "[]\n", + "Agent observations look like:\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP4AAAD8CAYAAABXXhlaAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvDW2N/gAAD9dJREFUeJzt3VuMXdV9x/Hvb2ZMCFBk7ABybCdAhQioFYZaFEpVpRAXl0bQh6QCRVVUISFVaQtNpADtQxSpD0SqEvJQRUIQiirKJQQSZCGIZaBVpcpgME0A28EQLoMNdlwcKFfPzL8Pex28be+Zs86cy8xm/T6SdWavsy9rz+F39mU266+IwMzKMrbQHTCz0XPwzQrk4JsVyME3K5CDb1YgB9+sQA6+WYH6Cr6k9ZJ2SNop6fpBdcrMhkvzfYBH0jjwS2AdMAk8AVwZEc8NrntmNgwTfSx7HrAzIl4EkHQXcDkwa/CXLxuL1av72WSlp6+qET+Z2JbnIEfdz0B9Lj8ac22n2z4Mu48fbX+ODb3+2gF+87/TXX/Z/aRwJfBqbXoS+P25Fli9eoKfPfipPjZZmelh3ukRB396iOvuZb+7mR7wr6Vb36b7DP509Ld8x0yXfjT1cyZte7Z96LTPxNxXzt1+B+Mp0bNvZ6zrdv76spfn3EZHP9f4Tb074j8nSVdL2iJpy759g/xP18zmq58j/iSwuja9Cth1+EwRcTNwM8Bv/+6x8R/vrQDyvyXrphu+pw7EOAAfRv6uzMzj6NG07X6+4Tv9HoRefodzmU7rme2oOJa+17sdNReLsTnOiRd6Hzp9WzI2NdD1vhOvZ25//p4ATpd0qqSjgCuAB/pYn5mNyLyP+BExJelvgIeBceCHEfHsXMv8+sBx3LbrwkPamo6+sx2RI7XXv60PTFdHzqmZg99hudeDMct8Tdtvmncm8zq5vmzTevrtb65+l88l9XcDIXf58Yb56svOtZ6xLr+K+rJjXbYzl/qy9U0uGa/uBk2MHbz87ZwFdNv2bOsHeHtqa1a/+rrFHhEPAg/2sw4zGz0/uWdWoP7/qN6D999bwrPb0/3AzhlK7UxFnVPR+tlL05nOzMGTJk13Xo9sq9Y5+3rUZTtq+iNEw+nyrOuJOd4/ZL/n7keTrmea81lP7hl67VfQdPVwyGoyry6yr0Jq802pob2pbw3zzbq99AuJpnXP0o/GdTVsp34/N8bjiDbGGj6AhkNzNM2XtvP+e0saOpO1WjP7uBvpEX/8XbF8S/UV99HRtOHIVz/Sqn4HreH9sak45BVg/MODM6jh/YNH4iPbANR5uqX2/sEzh1pbw9091Z+M6cw7XV9P+nlmpnmZTvtUw6NAXR5GOvR31TBvt7aZI/f70A2kw0r9zpgaDndd2qL7nbUj2ybSoXHs4LEqxmvzpfaoL9t5v77t8SPXHQ37c8hRfLzTVp+vtnynvdY2M5GWmTjYOH3UWO399Lqk1reG9dT71vmrbdPZSOe9ve/mnTr5iG9WIAffrEAjPdVfsv99Trp/RzVxoHpiKaZqTy7NlPlI76Aem2/L/yA0TN1OdNvxzGEXtcsdTaQIL6leX9j/ft4qBt4pM1v0HHyzAo30VP/0s97mwYcfGeUmzeZl/WfPW+guzG764CVxTKe//nzwQZrOu1z2Ed+sQCM94pu1xUMvP5497/rPrB1iT7o44nGPzP95aOAdMbNFz8E3K5BP9c369NArW7Lmu2TlOcPrhDrP8+bN7iO+WYF8xDcbkYdfyxsd55JPr+l95dHb+M5dj/iSfihpj6Rnam3LJG2U9Hx6PaH3nprZQsk51f9XYP1hbdcDmyLidGBTmjazluh6qh8R/ynplMOaLwc+n36+HXgMuG6A/TIr1sO7np7z/XldChxmvjf3To6I3QDp9aS+e2JmIzP0u/r1Sjp79w2zwJSZ5ZrvXf03JK2IiN2SVgB7ZpuxXkln7dlH+38ZN+vTXJcC513ybtY65nvEfwD4avr5q8BP57keM1sAOX/OuxP4b+AMSZOSrgJuBNZJeh5Yl6bNrCVy7upfOctbFw+4L2Y2In5k16xADr5ZgRx8swI5+GYFcvDNCuTgmxXIwTcrkINvViAH36xADr5ZgRx8swI5+GYFcvDNCuTgmxXIwTcrkINvViAH36xAOUNvrZb0qKRtkp6VdE1qdzUds5bKOeJPAd+IiDOB84GvSToLV9Mxa62uwY+I3RHxVPr5bWAbsJKqms7tabbbgT8fVifNbLB6usZPpbTOATaTWU3HBTXMFp/s4Es6DvgxcG1EvJW7XETcHBFrI2LticvH59NHMxuwrOBLWkIV+jsi4r7U/EaqokO3ajpmtrjk3NUXcCuwLSK+W3vL1XTMWiqndt6FwF8Cv5DUKdr1D1TVc+5JlXVeAb48nC6a2aDlVNL5L0CzvO1qOmYt5Cf3zArk4JsVyME3K5CDb1YgB9+sQA6+WYEcfLMCOfhmBXLwzQrk4JsVyME3K5CDb1YgB9+sQA6+WYEcfLMCOfhmBXLwzQqUM+be0ZIel/Q/qZLOt1P7qZI2p0o6d0s6avjdNbNByDnifwBcFBFnA2uA9ZLOB74DfC9V0nkTuGp43TSzQcqppBMR8X9pckn6F8BFwL2p3ZV0zFokd1z98TTC7h5gI/ACsD8iptIsk1RltZqWdSUds0UmK/gRMR0Ra4BVwHnAmU2zzbKsK+mYLTI93dWPiP3AY1RVc5dK6gzPvQrYNdiumdmw5NzVP1HS0vTzJ4EvUFXMfRT4UprNlXTMWiSnks4K4HZJ41RfFPdExAZJzwF3SfonYCtVmS0za4GcSjo/pyqNfXj7i1TX+2bWMn5yz6xADr5ZgRx8swI5+GYFcvDNCuTgmxXIwTcrkINvViAH36xADr5ZgRx8swI5+GYFcvDNCuTgmxXIwTcrkINvViAH36xA2cFPQ2xvlbQhTbuSjllL9XLEv4ZqkM0OV9Ixa6ncghqrgD8DbknTwpV0zFor94h/E/BNYCZNL8eVdMxaK2dc/S8CeyLiyXpzw6yupGPWEjnj6l8IXCbpUuBo4HiqM4ClkibSUd+VdMxaJKda7g0RsSoiTgGuAB6JiK/gSjpmrdXP3/GvA74uaSfVNb8r6Zi1RM6p/kci4jGqopmupGPWYn5yz6xADr5ZgRx8swI5+GYFcvDNCuTgmxXIwTcrkINvViAH36xADr5ZgRx8swI5+GYFcvDNCuTgmxXIwTcrkINvVqCsgTgkvQS8DUwDUxGxVtIy4G7gFOAl4C8i4s3hdNPMBqmXI/4fR8SaiFibpq8HNqWCGpvStJm1QD+n+pdTFdIAF9Qwa5Xc4AfwM0lPSro6tZ0cEbsB0utJw+igmQ1e7mCbF0bELkknARslbc/dQPqiuBrgMyt7GtvTzIYk64gfEbvS6x7gfqrRdd+QtAIgve6ZZVlX0jFbZHJKaB0r6bc6PwN/AjwDPEBVSANcUMOsVXLOvU8G7q8K5DIB/HtEPCTpCeAeSVcBrwBfHl43zWyQugY/Fc44u6F9H3DxMDplZsPlJ/fMCuTgmxXIwTcrkINvViAH36xADr5ZgRx8swI5+GYFcvDNCuTgmxXIwTcrkINvViAH36xADr5ZgRx8swI5+GYFcvDNCpQVfElLJd0rabukbZIukLRM0kZJz6fXE4bdWTMbjNwj/veBhyLic1TDcG3DlXTMWitnlN3jgT8CbgWIiA8jYj+upGPWWjlH/NOAvcBtkrZKuiUNs+1KOmYtlRP8CeBc4AcRcQ7wDj2c1ku6WtIWSVv27pueZzfNbJBygj8JTEbE5jR9L9UXgSvpmLVU1+BHxOvAq5LOSE0XA8/hSjpmrZVbxfJvgTskHQW8CPwV1ZeGK+mYtVBW8CPiaWBtw1uupGPWQn5yz6xADr5ZgRx8swI5+GYFcvDNCuTgmxXIwTcrkINvViAH36xADr5ZgRx8swI5+GYFcvDNCuTgmxXIwTcrkINvViAH36xAOePqnyHp6dq/tyRd60o6Zu2VM9jmjohYExFrgN8D3gXux5V0zFqr11P9i4EXIuJlXEnHrLV6Df4VwJ3pZ1fSMWup7OCnobUvA37UywZcScds8enliP+nwFMR8UaadiUds5bqJfhXcvA0H1xJx6y1soIv6RhgHXBfrflGYJ2k59N7Nw6+e2Y2DLmVdN4Flh/Wtg9X0jFrJT+5Z1YgB9+sQA6+WYEcfLMCOfhmBXLwzQrk4JsVyME3K5CDb1YgB9+sQA6+WYEcfLMCOfhmBXLwzQrk4JsVyME3K5CDb1ag3KG3/l7Ss5KekXSnpKMlnSppc6qkc3cahdfMWiCnhNZK4O+AtRHxO8A41fj63wG+lyrpvAlcNcyOmtng5J7qTwCflDQBHAPsBi4C7k3vu5KOWYvk1M57Dfhn4BWqwP8GeBLYHxFTabZJYOWwOmlmg5Vzqn8CVZ28U4FPA8dSFdc4XMyyvCvpmC0yOaf6XwB+FRF7I+IA1dj6fwAsTaf+AKuAXU0Lu5KO2eKTE/xXgPMlHSNJVGPpPwc8CnwpzeNKOmYtknONv5nqJt5TwC/SMjcD1wFfl7STqtjGrUPsp5kNUG4lnW8B3zqs+UXgvIH3yMyGzk/umRXIwTcrkINvViAH36xAimh87mY4G5P2Au8Avx7ZRofvU3h/FquP075A3v58NiJO7LaikQYfQNKWiFg70o0Okfdn8fo47QsMdn98qm9WIAffrEALEfybF2Cbw+T9Wbw+TvsCA9yfkV/jm9nC86m+WYFGGnxJ6yXtkLRT0vWj3Ha/JK2W9KikbWn8wWtS+zJJG9PYgxvT+AWtIWlc0lZJG9J0a8dSlLRU0r2StqfP6YI2fz7DHOtyZMGXNA78C9UgHmcBV0o6a1TbH4Ap4BsRcSZwPvC11P/rgU1p7MFNabpNrgG21abbPJbi94GHIuJzwNlU+9XKz2foY11GxEj+ARcAD9embwBuGNX2h7A/PwXWATuAFaltBbBjofvWwz6sogrDRcAGQFQPiEw0fWaL+R9wPPAr0n2rWnsrPx+qoexeBZZR/V+0G4BLBvX5jPJUv7MjHa0dp0/SKcA5wGbg5IjYDZBeT1q4nvXsJuCbwEyaXk57x1I8DdgL3JYuXW6RdCwt/XxiyGNdjjL4amhr3Z8UJB0H/Bi4NiLeWuj+zJekLwJ7IuLJenPDrG35jCaAc4EfRMQ5VI+Gt+K0vkm/Y112M8rgTwKra9OzjtO3WElaQhX6OyLivtT8hqQV6f0VwJ6F6l+PLgQuk/QScBfV6f5NZI6luAhNApNRjRgF1ahR59Lez6evsS67GWXwnwBOT3clj6K6UfHACLfflzTe4K3Atoj4bu2tB6jGHIQWjT0YETdExKqIOIXqs3gkIr5CS8dSjIjXgVclnZGaOmNDtvLzYdhjXY74hsWlwC+BF4B/XOgbKD32/Q+pTqt+Djyd/l1KdV28CXg+vS5b6L7OY98+D2xIP58GPA7sBH4EfGKh+9fDfqwBtqTP6CfACW3+fIBvA9uBZ4B/Az4xqM/HT+6ZFchP7pkVyME3K5CDb1YgB9+sQA6+WYEcfLMCOfhmBXLwzQr0/9k7AYHmvAINAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# Reset the environment\n", + "env_info = env.reset(train_mode=train_mode)[default_brain]\n", + "\n", + "# Examine the state space for the default brain\n", + "print(\"Agent state looks like: \\n{}\".format(env_info.vector_observations[0]))\n", + "\n", + "# Examine the observation space for the default brain\n", + "for observation in env_info.visual_observations:\n", + " print(\"Agent observations look like:\")\n", + " if observation.shape[3] == 3:\n", + " plt.imshow(observation[0,:,:,:])\n", + " else:\n", + " plt.imshow(observation[0,:,:,0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5. Take random actions in the environment\n", + "Once we restart an environment, we can step the environment forward and provide actions to all of the agents within the environment. Here we simply choose random actions based on the `action_space_type` of the default brain. \n", + "\n", + "Once this cell is executed, 10 messages will be printed that detail how much reward will be accumulated for the next 10 episodes. The Unity environment will then pause, waiting for further signals telling it what to do next. Thus, not seeing any animation is expected when running this cell." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Total reward this episode: 0.0\n", + "Total reward this episode: 0.0\n", + "Total reward this episode: 0.0\n", + "Total reward this episode: 0.0\n", + "Total reward this episode: 0.0\n", + "Total reward this episode: 0.0\n", + "Total reward this episode: 0.0\n", + "Total reward this episode: 0.0\n", + "Total reward this episode: 0.0\n", + "Total reward this episode: 0.0\n" + ] + } + ], + "source": [ + "for episode in range(10):\n", + " env_info = env.reset(train_mode=train_mode)[default_brain]\n", + " done = False\n", + " episode_rewards = 0\n", + " while not done:\n", + " action_size = brain.vector_action_space_size\n", + " if brain.vector_action_space_type == 'continuous':\n", + " env_info = env.step(np.random.randn(len(env_info.agents), \n", + " action_size[0]))[default_brain]\n", + " else:\n", + " action = np.column_stack([np.random.randint(0, action_size[i], size=(len(env_info.agents))) for i in range(len(action_size))])\n", + " env_info = env.step(action)[default_brain]\n", + " episode_rewards += env_info.rewards[0]\n", + " done = env_info.local_done[0]\n", + " print(\"Total reward this episode: {}\".format(episode_rewards))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 6. Close the environment when finished\n", + "When we are finished using an environment, we can close it with the function below." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "env.close()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/ProjectSettings/EditorBuildSettings.asset b/ProjectSettings/EditorBuildSettings.asset index ea88784..7388a8a 100644 --- a/ProjectSettings/EditorBuildSettings.asset +++ b/ProjectSettings/EditorBuildSettings.asset @@ -5,7 +5,10 @@ EditorBuildSettings: m_ObjectHideFlags: 0 serializedVersion: 2 m_Scenes: + - enabled: 0 + path: + guid: 00000000000000000000000000000000 - enabled: 1 - path: Assets/Scenes/SampleScene.unity - guid: 99c9720ab356a0642a771bea13969a05 + path: Assets/Scenes/CozmoTraining.unity + guid: 2b0b419445af9d44b84ac124a2015b65 m_configObjects: {} diff --git a/ProjectSettings/GraphicsSettings.asset b/ProjectSettings/GraphicsSettings.asset index cd1c3d6..1a6b7d1 100644 --- a/ProjectSettings/GraphicsSettings.asset +++ b/ProjectSettings/GraphicsSettings.asset @@ -36,6 +36,8 @@ GraphicsSettings: - {fileID: 10753, guid: 0000000000000000f000000000000000, type: 0} - {fileID: 10770, guid: 0000000000000000f000000000000000, type: 0} - {fileID: 10783, guid: 0000000000000000f000000000000000, type: 0} + - {fileID: 16000, guid: 0000000000000000f000000000000000, type: 0} + - {fileID: 17000, guid: 0000000000000000f000000000000000, type: 0} m_PreloadedShaders: [] m_SpritesDefaultMaterial: {fileID: 10754, guid: 0000000000000000f000000000000000, type: 0} diff --git a/ProjectSettings/ProjectSettings.asset b/ProjectSettings/ProjectSettings.asset index 8428479..7951191 100644 --- a/ProjectSettings/ProjectSettings.asset +++ b/ProjectSettings/ProjectSettings.asset @@ -13,7 +13,7 @@ PlayerSettings: useOnDemandResources: 0 accelerometerFrequency: 60 companyName: DefaultCompany - productName: Bachelorarbeit + productName: Bachelorarbeit-Cozmo defaultCursor: {fileID: 0} cursorHotspot: {x: 0, y: 0} m_SplashScreenBackgroundColor: {r: 0.13725491, g: 0.12156863, b: 0.1254902, a: 1} diff --git a/ProjectSettings/QualitySettings.asset b/ProjectSettings/QualitySettings.asset index 0621bef..bd0e66a 100644 --- a/ProjectSettings/QualitySettings.asset +++ b/ProjectSettings/QualitySettings.asset @@ -29,9 +29,16 @@ QualitySettings: vSyncCount: 0 lodBias: 0.3 maximumLODLevel: 0 + streamingMipmapsActive: 0 + streamingMipmapsAddAllCameras: 1 + streamingMipmapsMemoryBudget: 512 + streamingMipmapsRenderersPerFrame: 512 + streamingMipmapsMaxLevelReduction: 2 + streamingMipmapsMaxFileIORequests: 1024 particleRaycastBudget: 4 asyncUploadTimeSlice: 2 asyncUploadBufferSize: 16 + asyncUploadPersistentBuffer: 1 resolutionScalingFixedDPIFactor: 1 excludedTargetPlatforms: [] - serializedVersion: 2 @@ -57,9 +64,16 @@ QualitySettings: vSyncCount: 0 lodBias: 0.4 maximumLODLevel: 0 + streamingMipmapsActive: 0 + streamingMipmapsAddAllCameras: 1 + streamingMipmapsMemoryBudget: 512 + streamingMipmapsRenderersPerFrame: 512 + streamingMipmapsMaxLevelReduction: 2 + streamingMipmapsMaxFileIORequests: 1024 particleRaycastBudget: 16 asyncUploadTimeSlice: 2 asyncUploadBufferSize: 16 + asyncUploadPersistentBuffer: 1 resolutionScalingFixedDPIFactor: 1 excludedTargetPlatforms: [] - serializedVersion: 2 @@ -85,9 +99,16 @@ QualitySettings: vSyncCount: 1 lodBias: 0.7 maximumLODLevel: 0 + streamingMipmapsActive: 0 + streamingMipmapsAddAllCameras: 1 + streamingMipmapsMemoryBudget: 512 + streamingMipmapsRenderersPerFrame: 512 + streamingMipmapsMaxLevelReduction: 2 + streamingMipmapsMaxFileIORequests: 1024 particleRaycastBudget: 64 asyncUploadTimeSlice: 2 asyncUploadBufferSize: 16 + asyncUploadPersistentBuffer: 1 resolutionScalingFixedDPIFactor: 1 excludedTargetPlatforms: [] - serializedVersion: 2 @@ -113,15 +134,22 @@ QualitySettings: vSyncCount: 1 lodBias: 1 maximumLODLevel: 0 + streamingMipmapsActive: 0 + streamingMipmapsAddAllCameras: 1 + streamingMipmapsMemoryBudget: 512 + streamingMipmapsRenderersPerFrame: 512 + streamingMipmapsMaxLevelReduction: 2 + streamingMipmapsMaxFileIORequests: 1024 particleRaycastBudget: 256 asyncUploadTimeSlice: 2 asyncUploadBufferSize: 16 + asyncUploadPersistentBuffer: 1 resolutionScalingFixedDPIFactor: 1 excludedTargetPlatforms: [] - serializedVersion: 2 name: Very High pixelLightCount: 3 - shadows: 2 + shadows: 0 shadowResolution: 2 shadowProjection: 1 shadowCascades: 2 @@ -134,16 +162,23 @@ QualitySettings: textureQuality: 0 anisotropicTextures: 1 antiAliasing: 4 - softParticles: 1 + softParticles: 0 softVegetation: 1 - realtimeReflectionProbes: 1 + realtimeReflectionProbes: 0 billboardsFaceCameraPosition: 1 vSyncCount: 1 lodBias: 1.5 maximumLODLevel: 0 + streamingMipmapsActive: 0 + streamingMipmapsAddAllCameras: 1 + streamingMipmapsMemoryBudget: 512 + streamingMipmapsRenderersPerFrame: 512 + streamingMipmapsMaxLevelReduction: 2 + streamingMipmapsMaxFileIORequests: 1024 particleRaycastBudget: 1024 asyncUploadTimeSlice: 2 asyncUploadBufferSize: 16 + asyncUploadPersistentBuffer: 1 resolutionScalingFixedDPIFactor: 1 excludedTargetPlatforms: [] - serializedVersion: 2 @@ -169,9 +204,16 @@ QualitySettings: vSyncCount: 1 lodBias: 2 maximumLODLevel: 0 + streamingMipmapsActive: 0 + streamingMipmapsAddAllCameras: 1 + streamingMipmapsMemoryBudget: 512 + streamingMipmapsRenderersPerFrame: 512 + streamingMipmapsMaxLevelReduction: 2 + streamingMipmapsMaxFileIORequests: 1024 particleRaycastBudget: 4096 asyncUploadTimeSlice: 2 asyncUploadBufferSize: 16 + asyncUploadPersistentBuffer: 1 resolutionScalingFixedDPIFactor: 1 excludedTargetPlatforms: [] m_PerPlatformDefaultQuality: diff --git a/ProjectSettings/UnityConnectSettings.asset b/ProjectSettings/UnityConnectSettings.asset index fa0b146..c3ae9a0 100644 --- a/ProjectSettings/UnityConnectSettings.asset +++ b/ProjectSettings/UnityConnectSettings.asset @@ -4,7 +4,7 @@ UnityConnectSettings: m_ObjectHideFlags: 0 serializedVersion: 1 - m_Enabled: 0 + m_Enabled: 1 m_TestMode: 0 m_EventOldUrl: https://api.uca.cloud.unity3d.com/v1/events m_EventUrl: https://cdp.cloud.unity3d.com/v1/events