2.6 KiB
2.6 KiB
In [ ]:
import torch device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
In [ ]:
data = torch.ones(3, 3) print(data.device)
In [ ]:
device = torch.device("cuda:0") data_gpu = data.to(device) print(data_gpu.device)
In [ ]:
import torch.nn as nn net = nn.Sequential(nn.Linear(3, 3)) net.to(device)
In [ ]:
from torch import nn class ASimpleNet(nn.Module): def __init__(self, layers=3): super(ASimpleNet, self).__init__() self.linears = nn.ModuleList([nn.Linear(3, 3, bias=False) for i in range(layers)]) def forward(self, x): print("forward batchsize is: {}".format(x.size()[0])) x = self.linears(x) x = torch.relu(x) return x
In [ ]:
batch_size = 16 inputs = torch.randn(batch_size, 3) labels = torch.randn(batch_size, 3) inputs, labels = inputs.to(device), labels.to(device) net = ASimpleNet() net = nn.DataParallel(net) net.to(device) # print("CUDA_VISIBLE_DEVICES :{}".format(os.environ["CUDA_VISIBLE_DEVICES"])) for epoch in range(1): outputs = net(inputs)