uvipen / Super-mario-bros-PPO-pytorch

Proximal Policy Optimization (PPO) algorithm for Super Mario Bros
MIT License
1.07k stars 201 forks source link

Please correct code in C#, this is same like super mario b game #17

Closed ghost closed 3 years ago

ghost commented 3 years ago

using System.Collections; using System.Collections.Generic; using UnityEngine;

public class Artemismovement : MonoBehaviour {

public float moveSpeed, jumpForce;

public bool Jumping;

public Rigidbody2D RG2D; public int moveX, moveY;

// Start is called before the first frame update
void Start()
{
    RG2D= GetComponent<Rigidbody2D>();

    moveSpeed= 11f; 
    jumpForce= 16f; 

    Jumping= true;  
}

// Update is called once per frame
void Update()
{
   moveX= input.getAxisraw("Horizontal"); 
   moveY=  input.getAxisraw("Vertical");

 //Horizontal Movement (X-Axis)
    if (moveX !=0)
    {
        RG2D.velocity= new Vector2(moveSpeed * moveX, RG2D.velocity.y);
    }

    //Jumping 
    if(moveY==1 && !Jumping)
    {
        RG2D.velocity= new Vector2(RG2D.velocity.x, jumpForce);
        Jumping= true;

    }

    //Crouching
    if(moveY==-1)
    {
        transform.localScale= new Vector2(1f, 0.5f); 
    }

    else
    {
        transform.localScale= new Vector2(1f, 1f); 
    }
}
void OnCollisionEnter2D(Collision2D col)
{
    Jumping= false; 
}
// static void Main(string[] args){
//     Artemismovement m = new Artemismovement;
//     m.start();
//     m.Update();
//     m.OnCollisionEnter2D();
// }

}