<menu id="guoca"></menu>
<nav id="guoca"></nav><xmp id="guoca">
  • <xmp id="guoca">
  • <nav id="guoca"><code id="guoca"></code></nav>
  • <nav id="guoca"><code id="guoca"></code></nav>

    A true man can play a palo one hundred time

    題目說明

    Question Now you have a balance palo.You can move it left or right.
    Just play hundred time on it.

    Description Get request receive two params 1. move, 0 or 1
    2. id, just your token Observation 1. pole position x
    2. a value depend on x 3. pole deviate from center ?? 4. a value depend on ?? Why you failed ?? or x > a certain value

    總而言之就是個玩棒子的游戲(霧。
    之所以出現在最后一道請去問關卡規則的設計者。
    因為ctf本來不應該出現這種問題,所以我有意把這題設計得簡單了一點,但是,ctf真是不講道理,也導致這道題被少量非預期。

    其實就是一個非常非常簡單的強化學習的問題,甚至不需要強化學習去解。

    DQN網絡結構定義

    import numpy as np
    import tensorflow as tf
    import requests
    import math
    
    class DeepQNetwork:
        def __init__(
                self,
                n_actions,
                n_features,
                learning_rate=0.01,
                reward_decay=0.9,
                e_greedy=0.9,
                replace_target_iter=300,
                memory_size=500,
                batch_size=32,
                e_greedy_increment=None,
                output_graph=False,
        ):
            self.n_actions = n_actions
            self.n_features = n_features
            self.lr = learning_rate
            self.gamma = reward_decay
            self.epsilon_max = e_greedy
            self.replace_target_iter = replace_target_iter
            self.memory_size = memory_size
            self.batch_size = batch_size
            self.epsilon_increment = e_greedy_increment
            self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
    
            # total learning step
            self.learn_step_counter = 0
    
            # initialize zero memory [s, a, r, s_]
            self.memory = np.zeros((self.memory_size, n_features * 2 + 2))
    
            # consist of [target_net, evaluate_net]
            self._build_net()
            t_params = tf.get_collection('target_net_params')
            e_params = tf.get_collection('eval_net_params')
            self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
    
            self.sess = tf.Session()
    
            if output_graph:
                # $ tensorboard --logdir=logs
                # tf.train.SummaryWriter soon be deprecated, use following
                tf.summary.FileWriter("logs/", self.sess.graph)
    
            self.sess.run(tf.global_variables_initializer())
            self.cost_his = []
    
        def _build_net(self):
            # ------------------ build evaluate_net ------------------
            self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s')  # input
            self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target')  # for calculating loss
            with tf.variable_scope('eval_net'):
                # c_names(collections_names) are the collections to store variables
                c_names, n_l1, w_initializer, b_initializer = \
                    ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \
                    tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1)  # config of layers
    
                # first layer. collections is used later when assign to target net
                with tf.variable_scope('l1'):
                    w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
                    b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
                    l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
    
                # second layer. collections is used later when assign to target net
                with tf.variable_scope('l2'):
                    w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
                    b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
                    self.q_eval = tf.matmul(l1, w2) + b2
    
            with tf.variable_scope('loss'):
                self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
            with tf.variable_scope('train'):
                self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
    
            # ------------------ build target_net ------------------
            self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_')    # input
            with tf.variable_scope('target_net'):
                # c_names(collections_names) are the collections to store variables
                c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
    
                # first layer. collections is used later when assign to target net
                with tf.variable_scope('l1'):
                    w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
                    b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
                    l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
    
                # second layer. collections is used later when assign to target net
                with tf.variable_scope('l2'):
                    w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
                    b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
                    self.q_next = tf.matmul(l1, w2) + b2
    
        def store_transition(self, s, a, r, s_):
            if not hasattr(self, 'memory_counter'):
                self.memory_counter = 0
    
            transition = np.hstack((s, [a, r], s_))
    
            # replace the old memory with new memory
            index = self.memory_counter % self.memory_size
            self.memory[index, :] = transition
    
            self.memory_counter += 1
    
        def choose_action(self, observation):
            # to have batch dimension when feed into tf placeholder
            observation = observation[np.newaxis, :]
    
            if np.random.uniform() < self.epsilon:
                # forward feed the observation and get q value for every actions
                actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
                action = np.argmax(actions_value)
            else:
                action = np.random.randint(0, self.n_actions)
            return action
    
        def learn(self):
            # check to replace target parameters
            if self.learn_step_counter % self.replace_target_iter == 0:
                self.sess.run(self.replace_target_op)
                print('\\ntarget_params_replaced\\n')
    
            # sample batch memory from all memory
            if self.memory_counter > self.memory_size:
                sample_index = np.random.choice(self.memory_size, size=self.batch_size)
            else:
                sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
            batch_memory = self.memory[sample_index, :]
    
            q_next, q_eval = self.sess.run(
                [self.q_next, self.q_eval],
                feed_dict={
                    self.s_: batch_memory[:, -self.n_features:],  # fixed params
                    self.s: batch_memory[:, :self.n_features],  # newest params
                })
    
            # change q_target w.r.t q_eval's action
            q_target = q_eval.copy()
    
            batch_index = np.arange(self.batch_size, dtype=np.int32)
            eval_act_index = batch_memory[:, self.n_features].astype(int)
            reward = batch_memory[:, self.n_features + 1]
    
            q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
    
            # train eval network
            _, self.cost = self.sess.run([self._train_op, self.loss],
                                         feed_dict={self.s: batch_memory[:, :self.n_features],
                                                    self.q_target: q_target})
            self.cost_his.append(self.cost)
    
            # increasing epsilon
            self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
            self.learn_step_counter += 1

    學習迭代

    x_threshold = 2.4
    theta_threshold_radians = 1/15*math.pi
    
    RL = DeepQNetwork(n_actions=2,
                      n_features=4,
                      learning_rate=0.01, e_greedy=0.9,
                      replace_target_iter=100, memory_size=2000,
                      e_greedy_increment=0.001,)
    
    total_steps = 0
    
    for i_episode in range(100):
        json_req = requests.get(url=url, params={'id': token, 'move': 0}).json()
        observation = json_req['observation']
        ep_r = 0
    
        while True:
            action = RL.choose_action(np.array(observation))
    
            json_req = requests.get(url=url, params={'id': token, 'move': action}).json()
            try:  observation_ = json_req['observation']
            except KeyError:
                pass
            print(observation)
            done = not json_req['status']
    
            # the smaller theta and closer to center the better
            x, x_dot, theta, theta_dot = observation_
            r1 = (x_threshold - abs(x))/x_threshold - 0.8
            r2 = (theta_threshold_radians - abs(theta))/theta_threshold_radians - 0.5
            reward = r1 + r2
    
            RL.store_transition(observation, action, reward, observation_)
    
            ep_r += reward
            if total_steps > 1000:
                RL.learn()
    
            if done:
                count = json_req['count']
                if count == 100:
                    print(json_req['flag'])
                else:
                    print('count:', json_req['count'])
                break
    
            observation = observation_
            total_steps += 1

    本文章首發在 網安wangan.com 網站上。

    上一篇 下一篇
    討論數量: 0
    只看當前版本


    暫無話題~
    亚洲 欧美 自拍 唯美 另类