RL-Coach простой пример поведенческого клонирования - PullRequest
0 голосов
/ 30 апреля 2020

Я пытаюсь провести простой эксперимент B C, используя RL-Coach и пользовательскую среду Gym . Основываясь на идее this Я пытаюсь предсказать переменную target на основе f_1 , f_2 и f_3 . Я использую среду Gym:

import random
import gym
from gym import spaces
import pandas as pd
import numpy as np


class TestEnvOne(gym.Env):

    def __init__(self, max_time):
        super(TestEnvOne, self).__init__()
        self.max_time = max_time
        f_1 = np.sin(np.arange(self.max_time))
        f_2 = np.cos(np.arange(self.max_time))
        f_3 = np.tan(np.arange(self.max_time))
        target = np.roll(f_1, 1) + np.roll(f_2, 2) + np.roll(f_3, 3)
        self.df = pd.DataFrame({'target': target, 'f_1': f_1, 'f_2': f_2, 'f_3': f_3})
        self.max_target = self.df.max()['target']
        self.min_target = self.df.min()['target']
        self.max_f_1 = self.df.max()['f_1']
        self.max_f_2 = self.df.max()['f_2']
        self.max_f_3 = self.df.max()['f_3']
        self.min_f_1 = self.df.min()['f_1']
        self.min_f_2 = self.df.min()['f_2']
        self.min_f_3 = self.df.min()['f_3']
        self.start_step = 0
        self.current_step = 0

        # Actions
        self.action_space = spaces.Box(
            low=np.array([0, ]), high=np.array([1, ]), dtype=np.float32)

        # Observation
        self.observation_space = gym.spaces.dict.Dict({'measurements':
                                                           spaces.Box(low=np.array([0, 0, 0]), high=np.array([1, 1, 1]),
                                                                      dtype=np.float32),
                                                       'desired_goal': spaces.Box(low=np.array([0]), high=np.array([1]),
                                                                                  dtype=np.float32)
                                                       })

        self.reward_range = (-1, 1)

    def _next_observation(self):
        # Scale to between 0-1
        frame = np.array([
            self.df.loc[self.current_step, 'target'] / self.max_target,
            self.df.loc[self.current_step, 'f_1'] / self.max_f_1,
            self.df.loc[self.current_step, 'f_2'] / self.max_f_2,
            self.df.loc[self.current_step, 'f_3'] / self.max_f_3,
        ])

        frame = {'desired_goal': self.df.loc[self.current_step, 'target'] / self.max_target,
                 'measurements': [
                     self.df.loc[self.current_step, 'f_1'] / self.max_f_1,
                     self.df.loc[self.current_step, 'f_2'] / self.max_f_2,
                     self.df.loc[self.current_step, 'f_3'] / self.max_f_3
                 ]}

        return frame

    def step(self, action):
        self.current_step += 1

        if self.current_step >= len(self.df.loc[:, 'target'].values):
            self.current_step = 0

        obs = self._next_observation()
        reward = obs['desired_goal'] - action[0]
        done = (self.current_step == self.start_step)

        return {'measurements': obs['measurements'], 'desired_goal': obs['desired_goal']}, reward, done, {}

    def reset(self):
        # Set the current step to a random point within the data frame
        self.start_step = random.randint(
            0, len(self.df.loc[:, 'target'].values) - 1)
        self.current_step = self.start_step

        return self._next_observation()

    def render(self, mode='human', close=False):
        # Render the environment to the screen

        print(f'Step: {self.current_step}')
        print(f'Diff: {self.diff}')
        print(f'Target: {self.df.loc[self.current_step, "target"]}')

Я использую предустановку, основанную на Doom Basi c B C следующим образом:

from rl_coach.agents.bc_agent import BCAgentParameters
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.base_parameters import VisualizationParameters, PresetValidationParameters
from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps
from rl_coach.environments.gym_environment import GymVectorEnvironment
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
from rl_coach.memories.memory import MemoryGranularity


####################
# Graph Scheduling #
####################

schedule_params = ScheduleParameters()
schedule_params.improve_steps = TrainingSteps(2000)
schedule_params.steps_between_evaluation_periods = TrainingSteps(20)
schedule_params.evaluation_steps = EnvironmentEpisodes(5)
schedule_params.heatup_steps = EnvironmentSteps(10)

#########
# Agent #
#########
agent_params = BCAgentParameters()
agent_params.network_wrappers['main'].learning_rate = 0.00025
agent_params.memory.max_size = (MemoryGranularity.Transitions, 1000000)
agent_params.algorithm.discount = 0.99
agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(0)
#agent_params.network_wrappers['main'].batch_size = 1
agent_params.network_wrappers['main'].input_embedders_parameters = {'measurements': InputEmbedderParameters(),'desired_goal': InputEmbedderParameters()}


###############
# Environment #
###############
#envPath = 'env.TestEnvZero:TestEnvZero'
envPath = 'env.TestEnvOne:TestEnvOne'
env_params = GymVectorEnvironment(level=envPath)
env_params.additional_simulator_parameters = {'max_time': 2000}

########
# Test #
########
preset_validation_params = PresetValidationParameters()
preset_validation_params.test_using_a_trace_test = False

graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
                                    schedule_params=schedule_params, vis_params=VisualizationParameters(),
                                    preset_validation_params=preset_validation_params)

Когда я запускаю следующую команду:

coach -p presets/PruebaPresetBC.py

В результате получается

Please enter an experiment name: Test1

Creating graph - name: BasicRLGraphManager
Creating agent - name: agent
simple_rl_graph: Starting heatup
2020-04-30-16:21:37.128831 Heatup - Name: main_level/agent Worker: 0 Episode: 1 Total reward: -998.28 Exploration: [0.1] Steps: 2000 Training iteration: 0 
Starting to improve simple_rl_graph task index 0
Traceback (most recent call last):
  File "/home/user/coach_env/bin/coach", line 8, in <module>
    sys.exit(main())
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 777, in main
    launcher.launch()
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 226, in launch
    self.run_graph_manager(graph_manager, args)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 612, in run_graph_manager
    self.start_single_threaded(task_parameters, graph_manager, args)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 674, in start_single_threaded
    start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 88, in start_graph
    graph_manager.improve()
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 547, in improve
    self.train_and_act(self.steps_between_evaluation_periods)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 482, in train_and_act
    self.train()
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 408, in train
    [manager.train() for manager in self.level_managers]
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 408, in <listcomp>
    [manager.train() for manager in self.level_managers]
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/level_manager.py", line 187, in train
    [agent.train() for agent in self.agents.values()]
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/level_manager.py", line 187, in <listcomp>
    [agent.train() for agent in self.agents.values()]
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/agents/agent.py", line 741, in train
    total_loss, losses, unclipped_grads = self.learn_from_batch(batch)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/agents/bc_agent.py", line 77, in learn_from_batch
    targets)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/architectures/network_wrapper.py", line 171, in train_and_sync_networks
    importance_weights=importance_weights, no_accumulation=True)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/architectures/tensorflow_components/architecture.py", line 365, in accumulate_gradients
    result = self.sess.run(fetches, feed_dict=feed_dict)
  File "/home/user/coach_env/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 950, in run
    run_metadata_ptr)
  File "/home/user/coach_env/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1149, in _run
    str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (32,) for Tensor 'main_level/agent/main/online/network_0/desired_goal/desired_goal:0', which has shape '(?, 1)'

--------------------------------

Results stored at: ./experiments/Test1/30_4_2020-16_21_0
Total runtime: 0:00:06.481459

--------------------------------

Я обнаружил, что фигура 32 связана с

agent_params.network_wrappers['main'].batch_size

в предустановке. Но я не знаю, как продолжить или решить проблему. Иногда исключением является использование следующего кода:

ValueError: Cannot feed value of shape (32, 3) for Tensor 'main_level/agent/main/online/network_0/measurements/measurements:0', which has shape '(?, 0)'

Любая помощь приветствуется.

ОБНОВЛЕНИЕ 2020-05-04:

Следуя совету @MarcusRenshaw, который я добавил печать перед шагом спортзала и сбросом функций. Непосредственно перед ошибкой вызывается функция сброса, и пространство наблюдения равно:

reset: {'desired_goal': 0.00559788442127721, 'measurements': [0.6832680466354063, 0.7301735609948197, 0.00400035745607452]}

Пространство наблюдения такое же, как у shape (32, 3) и shape (32). ошибка. Последнее пространство наблюдения в функции шага во время нагрева составляет

step: {'measurements': [-0.6434517999514073, 0.7654916425445919, -0.00359343140212023], 'desired_goal': -0.010710469493505773}

Надеюсь, это поможет.

1 Ответ

0 голосов
/ 08 мая 2020

Я достиг некоторого прогресса в этом топи c. Были некоторые ошибки формы, но я все еще застрял на одной. Меняем код среды на следующее:

import random
import numpy as np
import pandas as pd

import gym
from gym import spaces

from sklearn.preprocessing import MinMaxScaler


class TestEnvOne(gym.Env):

    def __init__(self, max_time):
        super(TestEnvOne, self).__init__()
        self.max_time = max_time
        f_1 = np.sin(np.arange(self.max_time))
        f_2 = np.cos(np.arange(self.max_time))
        f_3 = np.tan(np.arange(self.max_time))
        target = np.roll(f_1, 1) + np.roll(f_2, 2) + np.roll(f_3, 3)
        df = pd.DataFrame({'target': target, 'f_1': f_1, 'f_2': f_2, 'f_3': f_3})
        list_col = ['target', 'f_1', 'f_2', 'f_3']
        df[list_col] = MinMaxScaler().fit_transform(df[list_col])
        self.df = df
        self.start_step = 0
        self.current_step = 0

        # Actions
        self.action_space = spaces.Box(low=np.array([0]), high=np.array([1]), dtype=np.float32)

        self.observation_space = gym.spaces.dict.Dict(
            {'measurements': spaces.Box(low=0.0, high=1.1, shape=(3,), dtype=np.float32),
             'desired_goal': spaces.Box(low=np.array([0]), high=np.array([1]), dtype=np.float32)
             })
        self.reward_range = (-1, 1)

    def _next_observation(self):
        measurements = np.array([
            self.df.loc[self.current_step, 'f_1'],
            self.df.loc[self.current_step, 'f_2'],
            self.df.loc[self.current_step, 'f_3']
        ])

        frame = {'desired_goal': self.df.loc[self.current_step, 'target'].reshape(-1, 1),
                 'measurements': measurements
                 }
        return frame

    def step(self, action):
        self.current_step += 1
        if self.current_step >= len(self.df.loc[:, 'target'].values):
            self.current_step = 0

        obs = self._next_observation()
        reward = (obs['desired_goal'] - action)[0][0]
        done = (self.current_step == self.start_step)
        returning_value = {'measurements': obs['measurements'], 'desired_goal': obs['desired_goal']}
        all = returning_value, reward, done, {}
        return all

    def reset(self):
        # Set the current step to a random point within the data frame
        self.start_step = random.randint(0, len(self.df.loc[:, 'target'].values) - 1)
        self.current_step = self.start_step
        observation = self._next_observation()
        return observation

    def render(self, mode='human', close=False):
        # Render the environment to the screen
        print(f'Step: {self.current_step}')
        print(f'Target: {self.df.loc[self.current_step, "target"]}')

    def seed(self, seed=None):
        self.seed_value = seed
        return [seed]

Я получаю ошибку:

Traceback (most recent call last):
  File "/home/user/coach_env/bin/coach", line 8, in <module>
    sys.exit(main())
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 777, in main
    launcher.launch()
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 226, in launch
    self.run_graph_manager(graph_manager, args)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 612, in run_graph_manager
    self.start_single_threaded(task_parameters, graph_manager, args)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 674, in start_single_threaded
    start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 88, in start_graph
    graph_manager.improve()
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 547, in improve
    self.train_and_act(self.steps_between_evaluation_periods)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 481, in train_and_act
    self.act(EnvironmentSteps(1))
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 447, in act
    result = self.top_level_manager.step(None)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/level_manager.py", line 245, in step
    action_info = acting_agent.act()
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/agents/agent.py", line 851, in act
    action = self.choose_action(curr_state)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/agents/imitation_agent.py", line 43, in choose_action
    prediction = self.networks['main'].online_network.predict(self.prepare_batch_for_inference(curr_state, 'main'))
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/architectures/tensorflow_components/architecture.py", line 547, in predict
    output = self.sess.run(outputs, feed_dict)
  File "/home/user/coach_env/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 950, in run
    run_metadata_ptr)
  File "/home/user/coach_env/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1149, in _run
    str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (1, 3) for Tensor 'main_level/agent/main/online/network_0/measurements/measurements:0', which has shape '(?, 0)'

Я думаю, что он пытается использовать измерение (три значения) в сеть нулевого размера. Я не знаю, откуда эта нулевая форма. Есть идеи? Спасибо

Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...