This guide provides comprehensive examples for extracting and processing different types of data from the Brand.dev API response. Each recipe includes ready-to-use code for Zapier’s “Code by Zapier” action.

Understanding the API Response

The Brand.dev API returns a structured JSON response with the following main sections:
{
  "status": "<string>",
  "brand": {
    "domain": "<string>",
    "title": "<string>",
    "description": "<string>",
    "slogan": "<string>",
    "colors": [...],
    "logos": [...],
    "backdrops": [...],
    "socials": [...],
    "address": {...},
    "stock": {...},
    "is_nsfw": true,
    "email": "<string>",
    "phone": "<string>",
    "industries": {...},
    "links": {...}
  },
  "code": 123
}

Basic Data Extraction

Extract Basic Company Information

import json

# Parse the input JSON string
input_data = json.loads(input_data.get('inputData', '{}'))

# Extract basic company information
brand = input_data.get('brand', {})
output = {
    'company_name': brand.get('title', ''),
    'domain': brand.get('domain', ''),
    'description': brand.get('description', ''),
    'slogan': brand.get('slogan', ''),
    'email': brand.get('email', ''),
    'phone': brand.get('phone', ''),
    'is_nsfw': brand.get('is_nsfw', False)
}

Extract Contact Information

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

# Extract contact information
output = {
    'email': brand.get('email', ''),
    'phone': brand.get('phone', ''),
    'domain': brand.get('domain', '')
}

# Extract address information
address = brand.get('address', {})
if address:
    output.update({
        'street': address.get('street', ''),
        'city': address.get('city', ''),
        'state': address.get('state_province', ''),
        'state_code': address.get('state_code', ''),
        'country': address.get('country', ''),
        'country_code': address.get('country_code', ''),
        'postal_code': address.get('postal_code', '')
    })

Logo and Visual Assets

Extract All Logo URLs

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

# Extract all logo URLs
logos = brand.get('logos', [])
logo_urls = [logo.get('url', '') for logo in logos if logo.get('url')]

output = {
    'logo_urls': logo_urls,
    'logo_count': len(logo_urls)
}

Find Best Logo for Different Use Cases

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

# Initialize variables
largest_resolution = None
smallest_resolution = None
best_icon_dark = None
best_icon_light = None
best_logo_dark = None
best_logo_light = None

# Process logos
for logo in brand.get('logos', []):
    resolution = logo.get('resolution', {})
    width = resolution.get('width', 0)
    height = resolution.get('height', 0)
    area = width * height
    
    # Find largest and smallest resolutions
    if largest_resolution is None or area > largest_resolution[1]:
        largest_resolution = (logo.get('url', ''), area)
    if smallest_resolution is None or area < smallest_resolution[1]:
        smallest_resolution = (logo.get('url', ''), area)
    
    # Find best icons for dark/light modes
    if logo.get('type') == 'icon':
        if logo.get('mode') == 'dark':
            best_icon_dark = logo.get('url', '')
        elif logo.get('mode') == 'light':
            best_icon_light = logo.get('url', '')

# Process backdrops for logos
for backdrop in brand.get('backdrops', []):
    if backdrop.get('mode') == 'dark':
        best_logo_dark = backdrop.get('url', '')
    elif backdrop.get('mode') == 'light':
        best_logo_light = backdrop.get('url', '')

output = {
    'largest_logo': largest_resolution[0] if largest_resolution else '',
    'smallest_logo': smallest_resolution[0] if smallest_resolution else '',
    'best_icon_dark': best_icon_dark,
    'best_icon_light': best_icon_light,
    'best_logo_dark': best_logo_dark,
    'best_logo_light': best_logo_light
}

Extract Logo Metadata

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

logos_info = []
for logo in brand.get('logos', []):
    logo_data = {
        'url': logo.get('url', ''),
        'type': logo.get('type', ''),
        'mode': logo.get('mode', ''),
        'width': logo.get('resolution', {}).get('width', 0),
        'height': logo.get('resolution', {}).get('height', 0),
        'aspect_ratio': logo.get('resolution', {}).get('aspect_ratio', 0)
    }
    logos_info.append(logo_data)

output = {
    'logos': logos_info,
    'logo_count': len(logos_info)
}

Color Palette Extraction

Extract All Colors

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

# Extract all colors
colors = brand.get('colors', [])
color_list = []
for color in colors:
    color_list.append({
        'hex': color.get('hex', ''),
        'name': color.get('name', '')
    })

output = {
    'colors': color_list,
    'color_count': len(color_list)
}

Extract Primary and Secondary Colors

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

colors = brand.get('colors', [])
primary_colors = []
secondary_colors = []

for color in colors:
    color_name = color.get('name', '').lower()
    if 'primary' in color_name or 'main' in color_name:
        primary_colors.append(color.get('hex', ''))
    elif 'secondary' in color_name or 'accent' in color_name:
        secondary_colors.append(color.get('hex', ''))

output = {
    'primary_colors': primary_colors,
    'secondary_colors': secondary_colors,
    'all_colors': [color.get('hex', '') for color in colors]
}

Extract Logo Colors

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

logo_colors = []
for logo in brand.get('logos', []):
    for color in logo.get('colors', []):
        logo_colors.append({
            'hex': color.get('hex', ''),
            'name': color.get('name', ''),
            'logo_url': logo.get('url', '')
        })

output = {
    'logo_colors': logo_colors,
    'unique_logo_colors': list(set([color['hex'] for color in logo_colors if color['hex']]))
}
import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

socials = brand.get('socials', [])
social_links = {}
for social in socials:
    social_type = social.get('type', '').lower()
    social_url = social.get('url', '')
    if social_url:
        social_links[social_type] = social_url

output = {
    'social_links': social_links,
    'facebook': social_links.get('facebook', ''),
    'twitter': social_links.get('twitter', ''),
    'instagram': social_links.get('instagram', ''),
    'linkedin': social_links.get('linkedin', ''),
    'youtube': social_links.get('youtube', '')
}
import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

links = brand.get('links', {})
output = {
    'careers': links.get('careers', ''),
    'privacy': links.get('privacy', ''),
    'terms': links.get('terms', ''),
    'contact': links.get('contact', ''),
    'blog': links.get('blog', ''),
    'pricing': links.get('pricing', '')
}

Industry and Classification

Extract Industry Information

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

industries = brand.get('industries', {})
eic_industries = []
for industry in industries.get('eic', []):
    eic_industries.append({
        'industry': industry.get('industry', ''),
        'subindustry': industry.get('subindustry', '')
    })

output = {
    'industries': eic_industries,
    'primary_industry': eic_industries[0].get('industry', '') if eic_industries else '',
    'primary_subindustry': eic_industries[0].get('subindustry', '') if eic_industries else ''
}

Financial Information

Extract Stock Information

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

stock = brand.get('stock', {})
output = {
    'ticker': stock.get('ticker', ''),
    'exchange': stock.get('exchange', ''),
    'has_stock_info': bool(stock.get('ticker'))
}

Complete Data Extraction

Extract Everything in One Go

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

# Basic information
output = {
    'company_name': brand.get('title', ''),
    'domain': brand.get('domain', ''),
    'description': brand.get('description', ''),
    'slogan': brand.get('slogan', ''),
    'email': brand.get('email', ''),
    'phone': brand.get('phone', ''),
    'is_nsfw': brand.get('is_nsfw', False)
}

# Colors
colors = brand.get('colors', [])
output['colors'] = [{'hex': c.get('hex', ''), 'name': c.get('name', '')} for c in colors]
output['primary_color'] = colors[0].get('hex', '') if colors else ''

# Logos
logos = brand.get('logos', [])
output['logo_count'] = len(logos)
output['logo_urls'] = [logo.get('url', '') for logo in logos if logo.get('url')]

# Social media
socials = brand.get('socials', [])
output['social_links'] = {s.get('type', '').lower(): s.get('url', '') for s in socials}

# Address
address = brand.get('address', {})
if address:
    output.update({
        'address_street': address.get('street', ''),
        'address_city': address.get('city', ''),
        'address_state': address.get('state_province', ''),
        'address_country': address.get('country', ''),
        'address_postal': address.get('postal_code', '')
    })

# Stock
stock = brand.get('stock', {})
output['stock_ticker'] = stock.get('ticker', '')
output['stock_exchange'] = stock.get('exchange', '')

# Industries
industries = brand.get('industries', {}).get('eic', [])
output['industries'] = [{'industry': i.get('industry', ''), 'subindustry': i.get('subindustry', '')} for i in industries]

# Links
links = brand.get('links', {})
output.update({
    'careers_url': links.get('careers', ''),
    'privacy_url': links.get('privacy', ''),
    'terms_url': links.get('terms', ''),
    'contact_url': links.get('contact', ''),
    'blog_url': links.get('blog', ''),
    'pricing_url': links.get('pricing', '')
})

Advanced Processing

Create Brand Summary

import json

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

# Create a comprehensive brand summary
summary = f"""
Company: {brand.get('title', 'N/A')}
Domain: {brand.get('domain', 'N/A')}
Description: {brand.get('description', 'N/A')}
Slogan: {brand.get('slogan', 'N/A')}
Email: {brand.get('email', 'N/A')}
Phone: {brand.get('phone', 'N/A')}
Colors: {len(brand.get('colors', []))} colors found
Logos: {len(brand.get('logos', []))} logos found
Social Media: {len(brand.get('socials', []))} platforms
"""

# Add industry info
industries = brand.get('industries', {}).get('eic', [])
if industries:
    summary += f"Primary Industry: {industries[0].get('industry', 'N/A')}\n"

# Add stock info
stock = brand.get('stock', {})
if stock.get('ticker'):
    summary += f"Stock: {stock.get('ticker', '')} on {stock.get('exchange', '')}\n"

output = {
    'brand_summary': summary.strip(),
    'company_name': brand.get('title', ''),
    'domain': brand.get('domain', ''),
    'color_count': len(brand.get('colors', [])),
    'logo_count': len(brand.get('logos', [])),
    'social_count': len(brand.get('socials', []))
}

Validate and Clean Data

import json
import re

input_data = json.loads(input_data.get('inputData', '{}'))
brand = input_data.get('brand', {})

def clean_text(text):
    """Clean and normalize text data"""
    if not text:
        return ''
    return re.sub(r'\s+', ' ', str(text).strip())

def validate_email(email):
    """Basic email validation"""
    if not email:
        return False
    return '@' in email and '.' in email

def validate_url(url):
    """Basic URL validation"""
    if not url:
        return False
    return url.startswith(('http://', 'https://'))

# Clean and validate data
output = {
    'company_name': clean_text(brand.get('title', '')),
    'domain': clean_text(brand.get('domain', '')),
    'description': clean_text(brand.get('description', '')),
    'slogan': clean_text(brand.get('slogan', '')),
    'email': brand.get('email', '') if validate_email(brand.get('email', '')) else '',
    'phone': clean_text(brand.get('phone', '')),
    'is_nsfw': brand.get('is_nsfw', False),
    'data_quality': {
        'has_name': bool(brand.get('title')),
        'has_domain': bool(brand.get('domain')),
        'has_description': bool(brand.get('description')),
        'has_email': validate_email(brand.get('email', '')),
        'has_phone': bool(brand.get('phone')),
        'has_colors': len(brand.get('colors', [])) > 0,
        'has_logos': len(brand.get('logos', [])) > 0
    }
}

Error Handling

Robust Data Extraction with Error Handling

import json

try:
    input_data = json.loads(input_data.get('inputData', '{}'))
    brand = input_data.get('brand', {})
    
    # Safe data extraction with defaults
    output = {
        'company_name': brand.get('title', 'Unknown Company'),
        'domain': brand.get('domain', ''),
        'description': brand.get('description', ''),
        'slogan': brand.get('slogan', ''),
        'email': brand.get('email', ''),
        'phone': brand.get('phone', ''),
        'is_nsfw': brand.get('is_nsfw', False),
        'status': 'success'
    }
    
    # Safely extract colors
    try:
        colors = brand.get('colors', [])
        output['colors'] = [{'hex': c.get('hex', ''), 'name': c.get('name', '')} for c in colors]
        output['color_count'] = len(colors)
    except Exception as e:
        output['colors'] = []
        output['color_count'] = 0
        output['color_error'] = str(e)
    
    # Safely extract logos
    try:
        logos = brand.get('logos', [])
        output['logo_urls'] = [logo.get('url', '') for logo in logos if logo.get('url')]
        output['logo_count'] = len(output['logo_urls'])
    except Exception as e:
        output['logo_urls'] = []
        output['logo_count'] = 0
        output['logo_error'] = str(e)
        
except Exception as e:
    output = {
        'status': 'error',
        'error_message': str(e),
        'company_name': 'Error',
        'domain': '',
        'description': '',
        'slogan': '',
        'email': '',
        'phone': '',
        'is_nsfw': False
    }

Usage Tips

Best Practices

  1. Always validate input data before processing
  2. Use error handling to prevent zap failures
  3. Extract only what you need to improve performance
  4. Test with different companies to ensure robustness
  5. Handle missing data gracefully with default values

Performance Optimization

  1. Extract specific fields rather than everything
  2. Use list comprehensions for better performance
  3. Avoid nested loops when possible
  4. Cache frequently used data if processing multiple items

Common Pitfalls

  1. Missing error handling can cause zap failures
  2. Not validating input can lead to unexpected results
  3. Over-extracting data can slow down processing
  4. Ignoring data types can cause formatting issues

Need help with a specific extraction? Check out our Setup Guide for configuration help or contact support for assistance.