Pinterest Placement Papers 2026
Pinterest Placement Papers 2026 with Solutions
Meta Description: Prepare for Pinterest 2026 campus drive with latest placement papers, exam pattern analysis, and 20+ solved questions. Get ready to crack the visual discovery platform leader's hiring process.
Pinterest is a visual discovery engine that helps people find ideas for their interests and hobbies. Founded in 2010 by Ben Silbermann, Paul Sciarra, and Evan Sharp, Pinterest has grown to become a go-to platform for inspiration across categories like home decor, fashion, recipes, travel, and DIY projects. With over 450 million monthly active users, Pinterest helps people discover and save ideas that inspire them to create a life they love.
Pinterest's engineering culture emphasizes empathy, craftsmanship, and building products that help people discover inspiration. If you're targeting Pinterest placements in 2026, this comprehensive guide with real placement paper questions and detailed solutions is your ultimate preparation resource.
Pinterest Hiring Pattern 2026
Eligibility Criteria
| Parameter | Requirements |
|---|---|
| Degree | B.E./B.Tech/M.E./M.Tech/MCA/M.Sc (CS/IT) |
| Academic Requirement | Strong CS fundamentals; ML/Recommendation systems knowledge is a plus |
| Backlogs | No active backlogs |
| Gap Criteria | Flexible, evaluated case-by-case |
| Package | ₹18-35 LPA (India) / $120K-180K (US) |
Selection Process Overview
- Online Assessment (90 minutes)
- Technical Phone Screen (45-60 minutes)
- Virtual Onsite - Coding Round (60 minutes)
- Virtual Onsite - System Design (60 minutes)
- Virtual Onsite - Behavioral (45 minutes)
Pinterest Online Assessment Pattern 2026
| Section | Number of Questions | Time | Difficulty |
|---|---|---|---|
| Aptitude & Problem Solving | 15 | 25 minutes | Medium |
| Technical MCQs | 10 | 15 minutes | Medium-High |
| Coding Problems | 2-3 | 50 minutes | High |
| Total | 27-28 | ~90 mins | - |
Note: Pinterest emphasizes recommendation systems, search, and visual content processing.
Pinterest Placement Papers 2026 - Practice Questions
Section 1: Quantitative Aptitude
Interactive Mock Test
Test your knowledge with 15 real placement questions. Get instant feedback and detailed solutions.
Section 2: Logical Reasoning
Section 3: Technical MCQs
Section 4: Coding Problems
Question 16
Problem: Implement a simple recommendation system that suggests items based on user similarity.
Solution:
from collections import defaultdict
import math
class SimpleRecommender:
def __init__(self):
# user -> item -> rating
self.user_ratings = defaultdict(dict)
# item -> list of users who rated it
self.item_users = defaultdict(list)
def add_rating(self, user_id, item_id, rating):
"""Add a user rating for an item"""
self.user_ratings[user_id][item_id] = rating
self.item_users[item_id].append(user_id)
def cosine_similarity(self, user1, user2):
"""Calculate cosine similarity between two users"""
ratings1 = self.user_ratings[user1]
ratings2 = self.user_ratings[user2]
# Find common items
common_items = set(ratings1.keys()) & set(ratings2.keys())
if not common_items:
return 0
# Calculate dot product and magnitudes
dot_product = sum(ratings1[item] * ratings2[item] for item in common_items)
magnitude1 = math.sqrt(sum(r**2 for r in ratings1.values()))
magnitude2 = math.sqrt(sum(r**2 for r in ratings2.values()))
if magnitude1 == 0 or magnitude2 == 0:
return 0
return dot_product / (magnitude1 * magnitude2)
def get_similar_users(self, user_id, n=5):
"""Get n most similar users"""
similarities = []
for other_user in self.user_ratings:
if other_user != user_id:
similarity = self.cosine_similarity(user_id, other_user)
similarities.append((other_user, similarity))
# Sort by similarity (highest first)
similarities.sort(key=lambda x: x[1], reverse=True)
return similarities[:n]
def recommend_items(self, user_id, n=10):
"""Recommend items for a user"""
user_ratings = self.user_ratings[user_id]
similar_users = self.get_similar_users(user_id, n=5)
# Calculate weighted scores for items
item_scores = defaultdict(float)
item_similarity_sum = defaultdict(float)
for similar_user, similarity in similar_users:
if similarity <= 0:
continue
for item, rating in self.user_ratings[similar_user].items():
# Skip items user has already rated
if item in user_ratings:
continue
item_scores[item] += similarity * rating
item_similarity_sum[item] += similarity
# Calculate predicted ratings
predictions = []
for item, score in item_scores.items():
if item_similarity_sum[item] > 0:
predicted_rating = score / item_similarity_sum[item]
predictions.append((item, predicted_rating))
# Sort by predicted rating
predictions.sort(key=lambda x: x[1], reverse=True)
return predictions[:n]
# Test
recommender = SimpleRecommender()
# Add some ratings
recommender.add_rating("user1", "item1", 5)
recommender.add_rating("user1", "item2", 3)
recommender.add_rating("user1", "item3", 4)
recommender.add_rating("user2", "item1", 4)
recommender.add_rating("user2", "item2", 2)
recommender.add_rating("user2", "item4", 5)
recommender.add_rating("user3", "item1", 2)
recommender.add_rating("user3", "item3", 5)
recommender.add_rating("user3", "item4", 4)
print("Similar users to user1:", recommender.get_similar_users("user1"))
print("Recommendations for user1:", recommender.recommend_items("user1"))
Time Complexity: O(U² × I) where U = number of users, I = average items per user
Space Complexity: O(U × I)
Question 17
Problem: Design a system to track trending pins based on save velocity.
Solution:
from datetime import datetime, timedelta
from collections import defaultdict, deque
import heapq
class TrendingTracker:
def __init__(self, window_hours=24, check_interval_hours=1):
"""
Args:
window_hours: Time window for trending calculation
check_interval_hours: How often to check for trending items
"""
self.window_hours = window_hours
self.check_interval_hours = check_interval_hours
# pin_id -> deque of (timestamp, saves) in last window
self.pin_history = defaultdict(deque)
# pin_id -> total saves in current window
self.pin_saves = defaultdict(int)
# Last cleanup time
self.last_cleanup = datetime.now()
def record_save(self, pin_id, timestamp=None):
"""Record a save for a pin"""
if timestamp is None:
timestamp = datetime.now()
# Add to history
self.pin_history[pin_id].append((timestamp, 1))
self.pin_saves[pin_id] += 1
# Clean up old records periodically
self._cleanup_if_needed()
def _cleanup_if_needed(self):
"""Remove old records outside the window"""
current_time = datetime.now()
if (current_time - self.last_cleanup).total_seconds() < self.check_interval_hours * 3600:
return
cutoff = current_time - timedelta(hours=self.window_hours)
for pin_id in list(self.pin_history.keys()):
# Remove old entries
while self.pin_history[pin_id] and self.pin_history[pin_id][0][0] < cutoff:
old_timestamp, old_saves = self.pin_history[pin_id].popleft()
self.pin_saves[pin_id] -= old_saves
# Remove pin if no saves in window
if not self.pin_history[pin_id]:
del self.pin_history[pin_id]
del self.pin_saves[pin_id]
self.last_cleanup = current_time
def get_trending_pins(self, n=10):
"""Get top n trending pins based on save velocity"""
self._cleanup_if_needed()
# Calculate save velocity (saves per hour)
trending_scores = []
current_time = datetime.now()
for pin_id, history in self.pin_history.items():
if not history:
continue
# Get time range of history
oldest_time = history[0][0]
newest_time = history[-1][0]
time_range_hours = max(1, (newest_time - oldest_time).total_seconds() / 3600)
# Calculate velocity
total_saves = self.pin_saves[pin_id]
velocity = total_saves / time_range_hours
trending_scores.append((velocity, pin_id, total_saves))
# Get top n by velocity
trending_scores.sort(reverse=True)
return trending_scores[:n]
def batch_record_saves(self, pin_save_counts):
"""Record multiple saves at once"""
current_time = datetime.now()
for pin_id, count in pin_save_counts.items():
self.pin_history[pin_id].append((current_time, count))
self.pin_saves[pin_id] += count
self._cleanup_if_needed()
# Optimized version for high volume
class OptimizedTrendingTracker:
def __init__(self, window_hours=24, granularity_minutes=5):
self.window_hours = window_hours
self.granularity_minutes = granularity_minutes
self.num_buckets = (window_hours * 60) // granularity_minutes
# pin_id -> circular buffer of bucket counts
self.pin_buckets = defaultdict(lambda: [0] * self.num_buckets)
# Current bucket index
self.current_bucket = 0
self.last_update = datetime.now()
def _get_bucket_index(self, timestamp):
"""Convert timestamp to bucket index"""
minutes_since_epoch = int(timestamp.timestamp() / 60)
return (minutes_since_epoch // self.granularity_minutes) % self.num_buckets
def record_save(self, pin_id,
Explore this topic cluster
More resources in Uncategorized
Use the category hub to browse similar questions, exam patterns, salary guides, and preparation resources related to this topic.