Files
hyungi_document_server/app/services/search/grounding_check.py
Hyungi Ahn b2306c3afd feat(ask): Phase 3.5b guardrails — verifier + telemetry + grounding 강화
Phase 3.5a(classifier+refusal gate+grounding) 위에 4개 Item 추가:

Item 0: ask_events telemetry 배선
- AskEvent ORM 모델 + record_ask_event() — ask_events INSERT 완성
- defense_layers에 input_snapshot(query, chunks, answer) 저장
- refused/normal 두 경로 모두 telemetry 호출

Item 3: evidence 간 numeric conflict detection
- 동일 단위 다른 숫자 → weak flag
- "이상/이하/초과/미만" threshold 표현 → skip (FP 방지)

Item 4: fabricated_number normalization 개선
- 단위 접미사 건/원 추가, 범위 표현(10~20%) 양쪽 추출
- bare number 2자리 이상만 (1자리 FP 제거)

Item 1: exaone semantic verifier (판단권 잠금 배선)
- verifier_service.py — 3s timeout, circuit breaker, severity 3단계
- direct_negation만 strong, numeric/intent→medium, 나머지→weak
- verifier strong 단독 refuse 금지 — grounding과 교차 필수
- 6-tier re-gate (4라운드 리뷰 확정)
- grounding strong 2+ OR max_score<0.2 → verifier skip

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-10 09:49:56 +09:00

220 lines
7.7 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""Grounding check — post-synthesis 검증 (Phase 3.5a).
Strong/weak flag 분리:
- **Strong** (→ partial 강등 or refuse): fabricated_number, intent_misalignment(important)
- **Weak** (→ confidence lower only): uncited_claim, low_overlap, intent_misalignment(generic)
Re-gate 로직 (Phase 3.5a 9라운드 토론 결과):
- strong 1개 → partial 강등
- strong 2개 이상 → refuse
- weak → confidence "low"
Intent alignment (rule-based):
- query 의 핵심 명사가 answer 에 등장하는지 확인
- "처벌" 같은 중요 키워드 누락은 strong
- "주요", "관련" 같은 generic 은 무시
"""
from __future__ import annotations
import re
from dataclasses import dataclass
from typing import TYPE_CHECKING
from core.utils import setup_logger
if TYPE_CHECKING:
from .evidence_service import EvidenceItem
logger = setup_logger("grounding")
# "주요", "관련" 등 intent alignment 에서 제외할 generic 단어
GENERIC_TERMS = frozenset({
"주요", "관련", "내용", "정의", "기준", "방법", "설명", "개요",
"대한", "위한", "대해", "무엇", "어떤", "어떻게", "있는",
"하는", "되는", "이런", "그런", "이것", "그것",
})
@dataclass(slots=True)
class GroundingResult:
strong_flags: list[str]
weak_flags: list[str]
_UNIT_CHARS = r'명인개%년월일조항호세건원'
# "이상/이하/초과/미만" — threshold 표현 (numeric conflict 에서 skip 대상)
_THRESHOLD_SUFFIXES = re.compile(r'이상|이하|초과|미만')
def _extract_number_literals(text: str) -> set[str]:
"""숫자 + 단위 추출 + normalize (Phase 3.5b 개선)."""
# 1. 숫자 + 한국어 단위 접미사
raw = set(re.findall(rf'\d[\d,.]*\s*[{_UNIT_CHARS}]\w{{0,2}}', text))
# 2. 범위 표현 (10~20%, 100-200명 등) — 양쪽 숫자 각각 추출
for m in re.finditer(
rf'(\d[\d,.]*)\s*[~\-]\s*(\d[\d,.]*)\s*([{_UNIT_CHARS}])',
text,
):
raw.add(m.group(1) + m.group(3))
raw.add(m.group(2) + m.group(3))
# 3. normalize
normalized = set()
for r in raw:
normalized.add(r.strip())
num_only = re.match(r'[\d,.]+', r)
if num_only:
normalized.add(num_only.group().replace(',', ''))
# 4. 단독 숫자 (2자리 이상만 — 1자리는 오탐 과다)
for d in re.findall(r'\b(\d{2,})\b', text):
normalized.add(d)
return normalized
def _extract_content_tokens(text: str) -> set[str]:
"""한국어 2자 이상 명사 + 영어 3자 이상 단어."""
return set(re.findall(r'[가-힣]{2,}|[a-zA-Z]{3,}', text))
def _parse_number_with_unit(literal: str) -> tuple[str, str] | None:
"""숫자 리터럴에서 (digits_only, unit) 분리. 단위 없으면 None."""
m = re.match(rf'([\d,.]+)\s*([{_UNIT_CHARS}])', literal)
if not m:
return None
digits = m.group(1).replace(',', '')
unit = m.group(2)
return (digits, unit)
def _check_evidence_numeric_conflicts(evidence: list["EvidenceItem"]) -> list[str]:
"""evidence 간 숫자 충돌 감지 (Phase 3.5b). evidence >= 2 일 때만 활성.
동일 단위, 다른 숫자 → weak flag. "이상/이하/초과/미만" 포함 시 skip.
bare number 는 비교 안 함 (조항 번호 등 false positive 방지).
"""
if len(evidence) < 2:
return []
# 각 evidence 에서 단위 있는 숫자 + threshold 여부 추출
# {evidence_idx: [(digits, unit, has_threshold), ...]}
per_evidence: dict[int, list[tuple[str, str, bool]]] = {}
for idx, ev in enumerate(evidence):
nums = re.findall(
rf'\d[\d,.]*\s*[{_UNIT_CHARS}]\w{{0,4}}',
ev.span_text,
)
entries = []
for raw in nums:
parsed = _parse_number_with_unit(raw)
if not parsed:
continue
has_thr = bool(_THRESHOLD_SUFFIXES.search(raw))
entries.append((parsed[0], parsed[1], has_thr))
if entries:
per_evidence[idx] = entries
if len(per_evidence) < 2:
return []
# 단위별로 evidence 간 숫자 비교
# {unit: {digits: [evidence_idx, ...]}}
unit_map: dict[str, dict[str, list[int]]] = {}
for idx, entries in per_evidence.items():
for digits, unit, has_thr in entries:
if has_thr:
continue # threshold 표현은 skip
if unit not in unit_map:
unit_map[unit] = {}
if digits not in unit_map[unit]:
unit_map[unit][digits] = []
if idx not in unit_map[unit][digits]:
unit_map[unit][digits].append(idx)
flags: list[str] = []
for unit, digits_map in unit_map.items():
distinct_values = list(digits_map.keys())
if len(distinct_values) >= 2:
# 가장 많이 등장하는 2개 비교
top2 = sorted(distinct_values, key=lambda d: len(digits_map[d]), reverse=True)[:2]
flags.append(
f"evidence_numeric_conflict:{top2[0]}{unit}_vs_{top2[1]}{unit}"
)
return flags
def check(
query: str,
answer: str,
evidence: list[EvidenceItem],
) -> GroundingResult:
"""답변 vs evidence grounding 검증 + query intent alignment."""
strong: list[str] = []
weak: list[str] = []
if not answer or not evidence:
return GroundingResult([], [])
evidence_text = " ".join(e.span_text for e in evidence)
# ── Strong 1: fabricated number (equality, not substring) ──
# ⚠ citation marker [n] 제거 후 숫자 추출 (안 그러면 [1][2][3] 이 fabricated 로 오탐)
answer_clean = re.sub(r'\[\d+\]', '', answer)
answer_nums = _extract_number_literals(answer_clean)
evidence_nums = _extract_number_literals(evidence_text)
evidence_digits = {re.sub(r'[^\d]', '', en) for en in evidence_nums}
evidence_digits.discard('')
for num in answer_nums:
digits_only = re.sub(r'[^\d]', '', num)
if digits_only and digits_only not in evidence_digits:
strong.append(f"fabricated_number:{num}")
# ── Strong/Weak 2: query-answer intent alignment ──
query_content = _extract_content_tokens(query)
answer_content = _extract_content_tokens(answer)
if query_content:
missing_terms = query_content - answer_content
important_missing = [
t for t in missing_terms
if t not in GENERIC_TERMS and len(t) >= 2
]
if important_missing:
strong.append(
f"intent_misalignment:{','.join(important_missing[:3])}"
)
elif len(missing_terms) > len(query_content) * 0.5:
weak.append(
f"intent_misalignment_generic:"
f"missing({','.join(list(missing_terms)[:5])})"
)
# ── Weak 1: uncited claim ──
sentences = re.split(r'(?<=[.!?。])\s+', answer)
for s in sentences:
if len(s.strip()) > 20 and not re.search(r'\[\d+\]', s):
weak.append(f"uncited_claim:{s[:40]}")
# ── Weak: evidence 간 숫자 충돌 (Phase 3.5b) ──
conflicts = _check_evidence_numeric_conflicts(evidence)
weak.extend(conflicts)
# ── Weak 2: token overlap ──
answer_tokens = _extract_content_tokens(answer)
evidence_tokens = _extract_content_tokens(evidence_text)
if answer_tokens:
overlap = len(answer_tokens & evidence_tokens) / len(answer_tokens)
if overlap < 0.4:
weak.append(f"low_overlap:{overlap:.2f}")
if strong or weak:
logger.info(
"grounding query=%r strong=%d weak=%d flags=%s",
query[:60],
len(strong),
len(weak),
",".join(strong[:3] + weak[:3]),
)
return GroundingResult(strong, weak)