Annexure 8: End-to-End PyTorch Projects (Complete Code Pipelines).
Abstract:
Below is the complete Annexure 8: End-to-End PyTorch Projects (Complete Code Pipelines).
**ANNEXURE 8
End-to-End PyTorch Projects (Complete Code Pipelines)**
This annexure provides full, end-to-end project templates that include:
-
Data loading
-
Model building
-
Training and evaluation
-
Saving/loading
-
Inference
-
Deployment options
Each project is kept concise yet fully functional—easy to extend for academic or production use.
Included Projects:
-
Image Classification (CNN) – CIFAR-10
-
Text Sentiment Analysis (LSTM/Embedding)
-
Object Detection (Faster R-CNN)
-
Time Series Forecasting (LSTM)
-
Reinforcement Learning with DQN (CartPole)
**PROJECT 1
Image Classification with CNN (CIFAR-10)**
1. Imports and Device Setup
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
2. Data Pipeline
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
])
train_ds = datasets.CIFAR10(root="data", train=True, transform=transform, download=True)
test_ds = datasets.CIFAR10(root="data", train=False, transform=transforms.ToTensor())
train_loader = DataLoader(train_ds, batch_size=64, shuffle=True)
test_loader = DataLoader(test_ds, batch_size=64)
3. Model
class SimpleCNN(nn.Module):
def __init__(self):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1), nn.ReLU(), nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3, padding=1), nn.ReLU(), nn.MaxPool2d(2)
)
self.classifier = nn.Sequential(
nn.Linear(64*8*8, 256), nn.ReLU(),
nn.Linear(256, 10)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
model = SimpleCNN().to(device)
4. Training & Evaluation
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
def train_epoch():
model.train()
total_loss = 0
for x, y in train_loader:
x, y = x.to(device), y.to(device)
optimizer.zero_grad()
preds = model(x)
loss = loss_fn(preds, y)
loss.backward()
optimizer.step()
total_loss += loss.item()
return total_loss / len(train_loader)
def evaluate():
model.eval()
correct = 0
total = 0
with torch.no_grad():
for x, y in test_loader:
x, y = x.to(device), y.to(device)
preds = model(x)
_, pred_labels = preds.max(1)
correct += (pred_labels == y).sum().item()
total += y.size(0)
return 100 * correct / total
5. Run Training
for epoch in range(10):
loss = train_epoch()
acc = evaluate()
print(f"Epoch {epoch+1}: Loss={loss:.4f}, Accuracy={acc:.2f}%")
6. Save & Inference
torch.save(model.state_dict(), "cnn.pth")
img, _ = test_ds[0]
model.eval()
with torch.no_grad():
pred = model(img.unsqueeze(0).to(device)).argmax(1)
print("Predicted:", pred.item())
**PROJECT 2
Text Sentiment Analysis with LSTM**
1. Imports
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchtext.datasets import IMDB
from torchtext.data.utils import get_tokenizer
from collections import Counter
2. Prepare Vocabulary
tokenizer = get_tokenizer("basic_english")
train_iter = IMDB(split="train")
counter = Counter()
for label, text in train_iter:
counter.update(tokenizer(text))
vocab = {word: i+2 for i, (word, _) in enumerate(counter.most_common(20000))}
vocab["<unk>"] = 0
vocab["<pad>"] = 1
def encode(text):
return [vocab.get(t, 0) for t in tokenizer(text)]
3. Dataset
def collate_batch(batch):
labels, texts = [], []
for label, text in batch:
labels.append(1 if label == "pos" else 0)
tokens = encode(text)
texts.append(tokens[:200] + [1]*(200-len(tokens)))
return torch.tensor(texts), torch.tensor(labels)
4. LSTM Model
class LSTMClassifier(nn.Module):
def __init__(self, vocab_size, embed_dim=128, hidden=128):
super().__init__()
self.embed = nn.Embedding(vocab_size, embed_dim)
self.lstm = nn.LSTM(embed_dim, hidden, batch_first=True)
self.fc = nn.Linear(hidden, 2)
def forward(self, x):
x = self.embed(x)
_, (h, _) = self.lstm(x)
return self.fc(h[-1])
model = LSTMClassifier(len(vocab)).to(device)
5. Train
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
for epoch in range(5):
for x, y in DataLoader(IMDB(split="train"), batch_size=32, collate_fn=collate_batch):
x, y = x.to(device), y.to(device)
optimizer.zero_grad()
preds = model(x)
loss = loss_fn(preds, y)
loss.backward()
optimizer.step()
print("Epoch", epoch+1, "done")
6. Inference
def predict_sentiment(text):
x = torch.tensor([encode(text)[:200]]).to(device)
with torch.no_grad():
return model(x).argmax(1).item()
print(predict_sentiment("This movie was excellent!"))
**PROJECT 3
Object Detection with Faster R-CNN (torchvision)**
1. Load Model
from torchvision.models.detection import fasterrcnn_resnet50_fpn
model = fasterrcnn_resnet50_fpn(pretrained=True).to(device)
model.eval()
2. Image Transform
from torchvision import transforms
from PIL import Image
transform = transforms.Compose([
transforms.ToTensor()
])
3. Inference
img = Image.open("image.jpg")
x = transform(img).to(device)
with torch.no_grad():
output = model([x])[0]
print(output["boxes"], output["labels"], output["scores"])
**PROJECT 4
Time Series Forecasting with LSTM**
1. Prepare Sequence Data
def create_sequences(data, seq_len=20):
xs, ys = [], []
for i in range(len(data) - seq_len):
xs.append(data[i:i+seq_len])
ys.append(data[i+seq_len])
return torch.tensor(xs).float(), torch.tensor(ys).float()
x, y = create_sequences(series)
2. LSTM Model
class LSTMForecast(nn.Module):
def __init__(self):
super().__init__()
self.lstm = nn.LSTM(1, 64, batch_first=True)
self.fc = nn.Linear(64, 1)
def forward(self, x):
_, (h, _) = self.lstm(x)
return self.fc(h[-1])
3. Train
model = LSTMForecast().to(device)
loss_fn = nn.MSELoss()
opt = torch.optim.Adam(model.parameters())
for epoch in range(20):
opt.zero_grad()
preds = model(x.unsqueeze(-1).to(device))
loss = loss_fn(preds, y.to(device))
loss.backward()
opt.step()
print("Epoch", epoch+1, "Loss:", loss.item())
**PROJECT 5
Reinforcement Learning: DQN for CartPole**
1. Q-Network
class DQN(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(4, 64), nn.ReLU(),
nn.Linear(64, 64), nn.ReLU(),
nn.Linear(64, 2)
)
def forward(self, x):
return self.layers(x)
2. Environment + Training Loop
import gym
import random
from collections import deque
env = gym.make("CartPole-v1")
model = DQN().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
loss_fn = nn.MSELoss()
memory = deque(maxlen=50000)
gamma = 0.99
epsilon = 1.0
epsilon_decay = 0.995
batch_size = 32
3. Replay + Learn Step
def replay():
if len(memory) < batch_size:
return
batch = random.sample(memory, batch_size)
states, actions, rewards, next_states, dones = zip(*batch)
states = torch.tensor(states).float().to(device)
next_states = torch.tensor(next_states).float().to(device)
actions = torch.tensor(actions).long().to(device)
rewards = torch.tensor(rewards).float().to(device)
dones = torch.tensor(dones).float().to(device)
q_vals = model(states).gather(1, actions.unsqueeze(1)).squeeze()
next_q_vals = model(next_states).max(1)[0]
target = rewards + (1 - dones) * gamma * next_q_vals
loss = loss_fn(q_vals, target.detach())
optimizer.zero_grad()
loss.backward()
optimizer.step()
4. Training Episodes
for episode in range(200):
state = env.reset()[0]
done = False
while not done:
if random.random() < epsilon:
action = env.action_space.sample()
else:
with torch.no_grad():
action = model(torch.tensor(state).float().to(device)).argmax().item()
next_state, reward, done, _, _ = env.step(action)
memory.append((state, action, reward, next_state, done))
state = next_state
replay()
epsilon = max(0.01, epsilon * epsilon_decay)
print("Episode", episode+1, "Epsilon:", epsilon)
Conclusion
This annexure provides complete, practical, end-to-end PyTorch project pipelines that cover:
-
Computer Vision
-
NLP
-
Object Detection
-
Time Series Forecasting
-
Reinforcement Learning
These templates can be used for academic implementation, research experiments, or production-level prototyping.
Comments
Post a Comment
"Thank you for seeking advice on your career journey! Our team is dedicated to providing personalized guidance on education and success. Please share your specific questions or concerns, and we'll assist you in navigating the path to a fulfilling and successful career."