NestJS has first-class microservice support built in. It abstracts the transport layer, letting you switch between TCP, Redis, RabbitMQ, or Kafka with minimal code changes. RabbitMQ is a solid default - battle-tested, mature tooling, excellent reliability guarantees, and great support for complex routing patterns.
In this guide I'll walk through building a real microservices setup: an API gateway, a user service, and a notification service. The same patterns I use in production.
Project Setup
npm i -g @nestjs/cli
nest new api-gateway
nest new user-service
nest new notification-service
# In each project:
npm install @nestjs/microservices amqplib amqp-connection-managerThe API Gateway
The gateway is your public-facing service. It receives HTTP requests and routes them to the appropriate microservice over RabbitMQ.
// api-gateway/src/app.module.ts
@Module({
imports: [
ClientsModule.register([
{
name: 'USER_SERVICE',
transport: Transport.RMQ,
options: {
urls: [process.env.RABBITMQ_URL],
queue: 'user_queue',
queueOptions: { durable: true },
noAck: false,
prefetchCount: 10,
},
},
{
name: 'NOTIFICATION_SERVICE',
transport: Transport.RMQ,
options: {
urls: [process.env.RABBITMQ_URL],
queue: 'notification_queue',
queueOptions: { durable: true },
},
},
]),
],
})
export class AppModule {}// api-gateway/src/users/users.controller.ts
@Controller('users')
export class UsersController {
constructor(
@Inject('USER_SERVICE') private userClient: ClientProxy,
@Inject('NOTIFICATION_SERVICE') private notificationClient: ClientProxy,
) {}
@Post('register')
async register(@Body() dto: RegisterUserDto) {
// Request-response: send a message, wait for result
const user = await firstValueFrom(
this.userClient.send({ cmd: 'user.create' }, dto),
);
// Fire-and-forget: emit an event, don't block
this.notificationClient.emit('user.registered', {
userId: user.id,
email: dto.email,
});
return user;
}
@Get(':id')
async findOne(@Param('id') id: string) {
return firstValueFrom(
this.userClient.send({ cmd: 'user.findById' }, id),
);
}
}The User Microservice
// user-service/src/main.ts
async function bootstrap() {
const app = await NestFactory.createMicroservice<MicroserviceOptions>(
AppModule,
{
transport: Transport.RMQ,
options: {
urls: [process.env.RABBITMQ_URL],
queue: 'user_queue',
queueOptions: { durable: true },
noAck: false,
prefetchCount: 1, // process one message at a time per consumer
},
},
);
await app.listen();
console.log('User service is listening');
}
bootstrap();// user-service/src/users/users.controller.ts
@Controller()
export class UsersController {
constructor(private usersService: UsersService) {}
@MessagePattern({ cmd: 'user.create' })
async createUser(@Payload() dto: CreateUserDto) {
return this.usersService.create(dto);
}
@MessagePattern({ cmd: 'user.findById' })
async findUser(@Payload() id: string) {
const user = await this.usersService.findById(id);
if (!user) {
throw new RpcException({ statusCode: 404, message: 'User not found' });
}
return user;
}
}Message Patterns vs Event Patterns
NestJS microservices support two communication styles. Understanding which to use when is key to a clean architecture:
Message patterns (send / @MessagePattern) are request-response. The gateway sends a message and waits for a reply. Use these when you need a result - creating a user, fetching data, validating something.
Event patterns (emit / @EventPattern) are fire-and-forget. The publisher emits an event and does not wait. Use these for side effects - sending emails, logging activity, updating search indexes.
// Request-response (gateway waits for user data)
const user = await firstValueFrom(
this.client.send({ cmd: 'user.create' }, data),
);
// Event (gateway doesn't wait for email to be sent)
this.client.emit('user.registered', { userId: user.id });
// Event handler in notification service
@EventPattern('user.registered')
async onUserRegistered(@Payload() data: UserRegisteredEvent) {
await this.emailService.sendWelcome(data.userId);
await this.pushService.subscribeToUpdates(data.userId);
}Error Handling
RPC exceptions are the way to propagate errors from a microservice back to the gateway.
// Throwing in the microservice
@MessagePattern({ cmd: 'user.findById' })
async findUser(@Payload() id: string) {
const user = await this.usersService.findById(id);
if (!user) {
throw new RpcException({
statusCode: 404,
message: 'User not found',
});
}
return user;
}
// Catching in the gateway controller
@Get(':id')
async findOne(@Param('id') id: string) {
try {
return await firstValueFrom(
this.userClient.send({ cmd: 'user.findById' }, id),
);
} catch (err) {
const error = err.error ?? err;
throw new HttpException(
error.message ?? 'Internal error',
error.statusCode ?? 500,
);
}
}Dead Letter Queues
A dead letter queue (DLQ) captures messages that failed to process after N retries. Essential for production - without a DLQ, failed messages are silently dropped or cause infinite retry loops.
// Queue with DLQ configuration
queueOptions: {
durable: true,
arguments: {
// Route failed messages to a dead letter exchange
'x-dead-letter-exchange': 'dlx',
'x-dead-letter-routing-key': 'dead_letters',
// Optional: auto-retry after 30s by setting TTL on the DLQ
'x-message-ttl': 30000,
// Limit queue size to prevent memory issues
'x-max-length': 10000,
},
}Monitor your DLQ queue depth. Messages accumulating there mean a consumer is broken or a downstream dependency (database, email service) is down.
Health Checks
Each microservice should expose a health endpoint for your orchestration layer (Kubernetes, ECS) to probe:
// user-service/src/health/health.controller.ts
@Controller('health')
export class HealthController {
constructor(
private health: HealthCheckService,
@InjectConnection() private connection: Connection,
) {}
@Get()
@HealthCheck()
check() {
return this.health.check([
async () => {
const state = this.connection.readyState;
return state === 1
? { mongodb: { status: 'up' } }
: Promise.reject(new Error('MongoDB disconnected'));
},
]);
}
}Production Tips
- Always use durable queues - messages survive RabbitMQ restarts without durable queues, you lose them on redeploy.
- Set
noAck: falseand acknowledge messages manually - this prevents message loss if your consumer crashes mid-processing. - Set prefetchCount - prevents one slow consumer from hogging all messages. Start with 1 and tune up.
- Use separate exchanges for different domains instead of routing everything through one queue.
- Monitor queue depth - growing queues signal a consumer bottleneck or downstream outage.
- Implement a retry strategy with exponential backoff using the DLQ + TTL pattern above.