开发者

numpy.float32的典型用法

目录
  • 示例1:draw_image
  • 示例2:generate_moving_mnist
  • 示例3:wav_format
  • 示例4:get_rois_blob
  • 示例5:generate_anchors_pre
  • 示例6:draw_heatmap
  • 示例7:maybe_cast_to_float64
  • 示例8:in_top_k
  • 示例9:ctc_path_probs
  • 示例10:rmsprop
  • 示例11:adadelta
  • 示例12:adagrad
  • 示例13:sgd
  • 示例14:sgdmomentum
  • 示例15:set_values

本文汇总了python中numpy.float32方法的典型用法代码示例,可以为大家提供其具体用法示例。

示例1:draw_image

import numpy as np
from numpy import float32

def draw_image(self, img, color=[0, 255, 0], alpha=1.0, copy=True, from_img=None):
    if copy:
      img = np.copy(img)

    orig_dtype = img.dtype
    if alpha != 1.0 and img.dtype != np.float32:
      img = img.astype(np.float32, copy=False)

    for rect in self:
      if from_img is not None:
        rect.resize(from_img, img).draw_on_image(img, color=color, alpha=alpha, copy=False)
      else:
        rect.draw_on_image(img, color=color, alpha=alpha, copy=False)

    if orig_dtype != img.dtype:
      img = img.astype(orig_dtype, copy=False)

    return img

示例2:generate_moving_mnist

import numpy as np
from numpy import float32

def generate_moving_mnist(self, num_digits=2):
  '''
  Get random trajectories for the digits and generate a video.
  '''
  data = np.zeros((self.n_frames_total, self.image_size_, self.image_size_), dtype=np.float32)
  for n in range(num_digits):
   # Trajectory
   start_y, start_x = self.get_random_trajectory(self.n_frames_total)
   ind = random.randint(0, self.mnist.shape[0] - 1)
   digit_image = self.mnist[ind]
   for i in range(self.n_frames_total):
    top  = start_y[i]
    left  = start_x[i]
    bottom = top + self.digit_size_
    right = left + self.digit_size_
    # Draw digit
    data[i, top:bottom, left:right] = np.maximum(data[i, top:bottom, left:right], digit_image)

  data = data[..., np.newaxis]
  return data

示例3:wav_format

import numpy as np
from numpy import float32

def wav_format(self, input_wave_file, output_wave_file, target_phrase):
    pop_size = 100
    elite_size = 10
    mutation_p = 0.005
    noise_stdev = 40
    noise_threshold = 1
    mu = 0.9
    alpha = 0.001
    max_iters = 3000
    num_points_estimate = 100
    delta_for_gradient = 100
    delta_for_perturBATion = 1e3
    input_audio = load_wav(input_wave_file).astype(np.float32)
    pop = np.expand_dims(input_audio, axis=0)
    pop = np.tile(pop, (pop_size, 1))
    output_wave_file = output_wave_file
    target_phrase = target_phrase
    funcs = setup_graph(pop, np.array([toks.index(x) for x in target_phrase]))

示例4:get_rois_blob

import numpy as np
from numpy import float32

def get_rois_blob(im_rois, im_scale_factors):
  """Converts RoIs into network inputs.
  Arguments:
    im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
    im_scale_factors (list): scale factors as returned by _get_image_blob
  Returns:
    blob (ndarray): R x 5 matrix of RoIs in the image pyramid
  """
  rois_blob_real = []

  for i in range(len(im_scale_factors)):
    rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
    rois_blob = np.hstack((levels, rois))
    rois_blob_real.append(rois_blob.astype(np.float32, copy=False))

  return rois_blob_real

示例5:generate_anchors_pre

import numpy as np
from numpy import float32

def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8,16,32), anchor_ratIOS=(0.5,1,2)):
 """ A wrapper function to generate anchors given different scales
  Also return the number of anchors in variable 'length'
 """
 anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
 A = anchors.shape[0]
 shift_x = np.arange(0, width) * feat_stride
 shift_y = np.arange(0, height) * feat_stride
 shift_x, shift_y = np.meshgrid(shift_x, shift_y)
 shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
 K = shifts.shape[0]
 # width changes faster, so here it is H, W, C
 anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
 anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
 length = np.int32(an开发者_Js入门chors.shape[0])

 return anchors, length

示例6:draw_heatmap

import numpy as np
from numpy import float32

def draw_heatmap(img, heatmap, alpha=0.5):
  """Draw a heatmap overlay over an image."""
  assert len(heatmap.shape) == 2 or \
    (len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
  assert img.dtype in [np.uint8, np.int32, np.int6python4]
  assert heatmap.dtype in [np.float32, np.float64]

  if img.shape[0:2] != heatmap.shape[0:2]:
    heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
    heatmap_rs = ia.imresize_single_image(
      heatmap_rs[..., np.newaxis],
      img.shape[0:2],
      interpolation="nearest"
    )
    heatmap = np.squeeze(heatmap_rs) / 255.0

  cmap = plt.get_cmap('jet')
  heatmap_cmapped = cmap(heatmap)
  heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
  heatmap_cmapped = heatmap_cmapped * 255
  mix = (1-alpha) * img + alpha * heatmap_cmapped
  mix = np.clip(mix, 0, 255).astype(np.uint8)
  return mix

示例7:maybe_cast_to_float64

import numpy as np
from numpy import float32

def maybe_cast_to_float64(da):
  """Cast DataArrays to np.float64 if they are of type np.float32.

  Parameters
  ----------
  da : xr.DataArray
    Input DataArray

  Returns
  -------
  DataArray

  """
  if da.dtype == np.float32:
    logging.warning('Datapoints were stored using the np.float3python2 datatype.'
            'For accurate reduction operations using bottleneck, '
            'datapoints are being cast to the np.float64 datatype.'
            ' For more information see: https://github.com/pydata/'
            'xarray/issues/1346')
    return da.astype(np.float64)
  else:
    return da

示例8:in_top_k

import numpy as np
from numpy import 编程客栈float32

def in_top_k(predictions, targets, k):
  '''Returns whether the `targets` are in the top `k` `predictions`

  # Arguments
    predictions: A tensor of shape batch_size x classess and type float32.
    targets: A tensor of shape batch_size and type int32 or int64.
    k: An int, number of top elements to consider.

  # Returns
    A tensor of shape batch_size and type int. output_i is 1 if
    targets_i is within top-k values of predictions_i
  '''
  predictions_top_k = T.argsort(predictions)[:, -k:]
  result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets]

示例9:ctc_path_probs

import numpy as np
from numpy import float32

def ctc_path_probs(predict, Y, alpha=1e-4):
  smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
  L = T.log(smoothed_predict)
  zeros = T.zeros_like(L[0])
  log_first = zeros

  f_skip_idxs = ctc_create_skip_idxs(Y)
  b_skip_idxs = ctc_create_skip_idxs(Y[::-1]) # there should be a shortcut to calculating this

  def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
    f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
    b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
    return f_active_next, log_f_next, b_active_next, log_b_next

  [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
    step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])

  idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
  mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
  log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
  return log_probs, mask

示例10:rmsprop

import numpy as np
from numpy import float32

def rmsprop(self, cost, params, lr=0.001, rho=0.9, eps=1e-6,consider_constant=None):
    """
    RMSProp.
    """
    lr = theano.shared(np.float32(lr).astype(floatX))

    gradients = self.get_gradients(cost, params,consider_constant)
    accumulators = [theano.shared(np.zeros_like(p.get_value()).astype(np.float32)) for p in params]

    updates = []

    for param, gradient, accumulator in zip(params, gradients, accumulators):
      new_accumulator = rho * accumulator + (1 - rho) * gradient ** 2
      updates.append((accumulator, new_accumulator))

      new_param = param - lr * gradient / T.sqrt(new_accumulator + eps)
      updates.append((param, new_param))

    return updates

示例11:adadelta

import numpy as np
from numpy import float32

def adadelta(self, cost, params, rho=0.95, epsilon=1e-6,consider_constant=None):
    """
    Adadelta. Based on:
    http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf
    """
    rho = theano.shared(np.float32(rho).astype(floatX))
    epsilon = theano.shared(np.float32(epsilon).astype(floatX))

    gradients = self.get_gradients(cost, params,consider_constant)
    accu_gradients = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
    accu_deltas = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

    updates =http://www.devze.com []
    for param, gradient, accu_gradient, accu_delta in zip(params, gradients, accu_gradients, accu_deltas):
      new_accu_gradient = rho * accu_gradient + (1. - rho) * gradient ** 2.
      delta_x = - T.sqrt((accu_delta + epsilon) / (new_accu_gradient + epsilon)) * gradient
      new_accu_delta = rho * accu_delta + (1. - rho) * delta_x ** 2.
      updates.append((accu_gradient, new_accu_gradient))
      updates.append((accu_delta, new_accu_delta))
      updates.append((param, param + delta_x))
    return updates

示例12:adagrad

import numpy as np
from numpy import float32

def adagrad(self, cost, params, lr=1.0, epsilon=1e-6,consider_constant=None):
    """
    Adagrad. Based on http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf
    """
    lr = theano.shared(np.float32(lr).astype(floatX))
    epsilon = theano.shared(np.float32(epsilon).astype(floatX))

    gradients = self.get_gradients(cost, params,consider_constant)
    gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

    updates = []
    for param, gradient, gsum in zip(params, gradients, gsums):
      new_gsum = gsum + gradient ** 2.
      updates.append((gsum, new_gsum))
      updates.append((param, param - lr * gradient / (T.sqrt(gsum + epsilon))))
    return updates

示例13:sgd

import numpy as np
from numpy import float32

def sgd(self, cost, params,constraints={}, lr=0.01):
    """
    Stochatic gradient descent.
    """
    updates = []
   
    lr = theano.shared(np.float32(lr).astype(floatX))
    gradients = self.get_gradients(cost, params)
   
    for p, g in zip(params, gradients):
      v=-lr*g;
      new_p=p+v;
      # apply constraints
      if p in constraints:
        c=constraints[p];
        new_p=c(new_p);
      updates.append((p, new_p))

    return updates

示例14:sgdmomentum

import numpy as np
from numpy import float32

def sgdmomentum(self, cost, params,constraints={}, lr=0.01,consider_constant=None, momentum=0.):
    """
    Stochatic gradient descent with momentum. Momentum has to be in [0, 1)
    """
    # Check that the momentum is a correct value
    assert 0 <= momentum < 1

    lr = theano.shared(np.float32(lr).astype(floatX))
    momentum = theano.shared(np.float32(momentum).astype(floatX))

    gradients = self.get_gradients(cost, params)
    velocities = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

    updates = []
    for param, gradient, velocity in zip(params, gradients, velocities):
      new_velocity = momentum * velocity - lr * gradient
      updates.append((velocity, new_velocity))
      new_p=param+new_velocity;
      # apply constraints
      ifphp param in constraints:
        c=constraints[param];
        new_p=c(new_p);
      updates.append((param, new_p))
    return updates

示例15:set_values

import numpy as np
from numpy import float32

def set_values(name, param, pretrained):
  """
  Initialize a network parameter with pretrained values.
  We check that sizes are compatible.
  """
  param_value = param.get_value()
  if pretrained.size != param_value.size:
    raise Exception(
      "Size mismatch for parameter %s. Expected %i, found %i."
      % (name, param_value.size, pretrained.size)
    )
  param.set_value(np.reshape(
    pretrained, param_value.shape
  ).astype(np.float32))

到此这篇关于numpy.float32的典型用法的文章就介绍到这了,更多相关numpy.float32用法内容请搜索我们以前的文章或继续浏览下面的相关文章希望大家以后多多支持我们!

0

上一篇:

下一篇:

精彩评论

暂无评论...
验证码 换一张
取 消

最新开发

开发排行榜