Implementing Log File Analysis for Search Robot Behavior
Server log files are the only source of truth about what search robots actually do. GSC shows summarized data with delay. Logs show exactly: which URLs Googlebot crawls, how often, how much time, which URLs it ignores, where it gets 404/500, bot anomalies.
Log Structure
# Nginx access.log (combined format)
66.249.64.13 - - [15/Nov/2024:14:23:01 +0300] "GET /products/laptop-apple/ HTTP/1.1" 200 45231 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
Fields: IP, ident, auth, time, method+URL+protocol, status, bytes, referer, user-agent.
Add response time to logs:
# nginx.conf
log_format detailed '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'$request_time $upstream_response_time';
access_log /var/log/nginx/access.log detailed;
Identifying Search Robots
Main User-Agent patterns:
CRAWLER_PATTERNS = {
'Googlebot': r'Googlebot(?:/\d+\.\d+)?',
'Yandexbot': r'YandexBot(?:/\d+\.\d+)?',
'Bingbot': r'bingbot(?:/\d+\.\d+)?',
'Baiduspider': r'Baiduspider',
}
Verify Googlebot authenticity via reverse DNS (PTR record):
import socket
import re
def verify_googlebot(ip: str) -> bool:
try:
hostname = socket.gethostbyaddr(ip)[0]
if not re.search(r'\.googlebot\.com$|\.google\.com$', hostname):
return False
resolved_ip = socket.gethostbyname(hostname)
return resolved_ip == ip
except socket.herror:
return False
Basic Log Parsing
import re
import gzip
from datetime import datetime
from collections import defaultdict, Counter
LOG_PATTERN = re.compile(
r'(?P<ip>[\d.]+) .+ \[(?P<time>[^\]]+)\] '
r'"(?P<method>\w+) (?P<url>[^\s]+) HTTP/[\d.]+" '
r'(?P<status>\d+) (?P<bytes>\d+) '
r'"[^"]*" "(?P<ua>[^"]*)"'
r'(?:\s+(?P<request_time>[\d.]+))?'
)
def parse_log_file(filepath: str):
open_func = gzip.open if filepath.endswith('.gz') else open
with open_func(filepath, 'rt', encoding='utf-8', errors='replace') as f:
for line in f:
m = LOG_PATTERN.match(line)
if not m:
continue
yield {
'ip': m.group('ip'),
'time': m.group('time'),
'method': m.group('method'),
'url': m.group('url'),
'status': int(m.group('status')),
'bytes': int(m.group('bytes')),
'user_agent': m.group('ua'),
'request_time': float(m.group('request_time') or 0)
}
def analyze_crawlers(log_files: list[str]) -> dict:
stats = defaultdict(lambda: {
'total_requests': 0,
'urls': Counter(),
'status_codes': Counter(),
'slow_urls': [],
'errors': []
})
for log_file in log_files:
for entry in parse_log_file(log_file):
crawler = identify_crawler(entry['user_agent'])
if not crawler:
continue
s = stats[crawler]
s['total_requests'] += 1
s['urls'][entry['url']] += 1
s['status_codes'][entry['status']] += 1
if entry['request_time'] > 2.0:
s['slow_urls'].append(entry)
if entry['status'] >= 400:
s['errors'].append(entry)
return dict(stats)
Key Metrics
Crawl rate: Normal for average site — 100–5000 requests Googlebot daily. Sharp drop — problem (robots.txt block, server errors, priority drop).
Most/least crawled sections:
def crawl_distribution(urls: Counter) -> dict:
sections = defaultdict(int)
for url, count in urls.items():
section = f'/{url.split("/")[1]}/' if '/' in url else '/'
sections[section] += count
return dict(sorted(sections.items(), key=lambda x: x[1], reverse=True))
URLs crawled but returning errors (404, 500): These must be restored or 301-redirected.
Monitoring via ClickHouse + Grafana
For continuous monitoring, stream logs to ClickHouse:
CREATE TABLE crawler_logs (
timestamp DateTime,
ip IPv4,
method LowCardinality(String),
url String,
status UInt16,
bytes UInt32,
user_agent String,
request_ms Float32,
crawler LowCardinality(String)
) ENGINE = MergeTree()
PARTITION BY toYYYYMM(timestamp)
ORDER BY (crawler, timestamp)
TTL timestamp + INTERVAL 6 MONTH;
Finding Parasitic Bots
Not all bots are useful. Search by user_agent:
def find_suspicious_crawlers(log_files: list[str]) -> list:
known_good = set(CRAWLER_PATTERNS.keys()) | {'curl', 'wget'}
ua_counter = Counter()
for log_file in log_files:
for entry in parse_log_file(log_file):
if not identify_crawler(entry['user_agent']):
ua_counter[entry['user_agent']] += 1
suspicious = []
for ua, count in ua_counter.most_common(50):
if count > 1000:
suspicious.append({'user_agent': ua, 'requests': count})
return suspicious
Block in nginx:
map $http_user_agent $bad_bot {
default 0;
~*SemrushBot 0; # Allow
~*AhrefsBot 0;
~*MJ12bot 1; # Block
}
server {
if ($bad_bot) {
return 403;
}
}
Timeline
One-time log analysis for 1 month (up to 5GB) with report — 2–3 days. Automated pipeline setup (parsing → ClickHouse → Grafana dashboard) with alerts — 4–7 days.







