New skills covering 10 categories: **Security & Audit**: 007 (STRIDE/PASTA/OWASP), cred-omega (secrets management) **AI Personas**: Karpathy, Hinton, Sutskever, LeCun (4 sub-skills), Altman, Musk, Gates, Jobs, Buffett **Multi-agent Orchestration**: agent-orchestrator, task-intelligence, multi-advisor **Code Analysis**: matematico-tao (Terence Tao-inspired mathematical code analysis) **Social & Messaging**: Instagram Graph API, Telegram Bot, WhatsApp Cloud API, social-orchestrator **Image Generation**: AI Studio (Gemini), Stability AI, ComfyUI Gateway, image-studio router **Brazilian Domain**: 6 auction specialist modules, 2 legal advisors, auctioneers data scraper **Product & Growth**: design, invention, monetization, analytics, growth engine **DevOps & LLM Ops**: Docker/CI-CD/AWS, RAG/embeddings/fine-tuning **Skill Governance**: installer, sentinel auditor, context management Each skill includes: - Standardized YAML frontmatter (name, description, risk, source, tags, tools) - Structured sections (Overview, When to Use, How it Works, Best Practices) - Python scripts and reference documentation where applicable - Cross-platform compatibility (Claude Code, Antigravity, Cursor, Gemini CLI, Codex CLI) Co-authored-by: ProgramadorBrasil <214873561+ProgramadorBrasil@users.noreply.github.com> Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
64 lines
2.2 KiB
Python
64 lines
2.2 KiB
Python
"""
|
|
Scraper JUCEC — Junta Comercial do Estado do Ceará
|
|
URL: https://www.jucec.ce.gov.br/leiloeiros/
|
|
Método: httpx + BeautifulSoup
|
|
"""
|
|
from __future__ import annotations
|
|
|
|
from typing import List
|
|
|
|
from .base_scraper import AbstractJuntaScraper, Leiloeiro
|
|
|
|
|
|
class JucecScraper(AbstractJuntaScraper):
|
|
estado = "CE"
|
|
junta = "JUCEC"
|
|
url = "https://www.jucec.ce.gov.br/leiloeiros/"
|
|
|
|
async def parse_leiloeiros(self) -> List[Leiloeiro]:
|
|
soup = await self.fetch_page()
|
|
if not soup:
|
|
return []
|
|
|
|
results: List[Leiloeiro] = []
|
|
|
|
tables = soup.find_all("table")
|
|
for table in tables:
|
|
rows = table.find_all("tr")
|
|
if len(rows) < 2:
|
|
continue
|
|
headers = [self.clean(th.get_text()) for th in rows[0].find_all(["th", "td"])]
|
|
col = {(h or "").lower(): i for i, h in enumerate(headers)}
|
|
|
|
def gcol(cells, frags):
|
|
for k, i in col.items():
|
|
if any(f in k for f in frags) and i < len(cells):
|
|
return self.clean(cells[i].get_text())
|
|
return None
|
|
|
|
for row in rows[1:]:
|
|
cells = row.find_all(["td", "th"])
|
|
if not cells:
|
|
continue
|
|
nome = gcol(cells, ["nome"]) or self.clean(cells[0].get_text())
|
|
if not nome or len(nome) < 3:
|
|
continue
|
|
results.append(self.make_leiloeiro(
|
|
nome=nome,
|
|
matricula=gcol(cells, ["matr", "registro", "núm"]),
|
|
cpf_cnpj=gcol(cells, ["cpf", "cnpj"]),
|
|
situacao=gcol(cells, ["situ", "status"]),
|
|
municipio=gcol(cells, ["munic", "cidade", "fortaleza"]),
|
|
telefone=gcol(cells, ["tel", "fone"]),
|
|
email=gcol(cells, ["email"]),
|
|
endereco=gcol(cells, ["ender", "logr"]),
|
|
))
|
|
|
|
if not results:
|
|
for el in soup.select("ul li, ol li, .leiloeiro"):
|
|
text = self.clean(el.get_text(" | "))
|
|
if text and len(text) > 5:
|
|
results.append(self.make_leiloeiro(nome=text))
|
|
|
|
return results
|