Plannerland:
  - Query Planner:
      description: "The high-level engine that translates feature requests into a structured data retrieval strategy."
      flow:
        - Query Input:
            description: "Initial request (Online or Offline) specifying required features and input 'givens'."
        - Planning:
            description: "The core coordination layer in engine/chalkengine/planner/."
            modules:
              - Resolver Selection: "Analyzes the feature graph to find the most efficient path of resolvers to satisfy the query."
              - Operator Tree Construction: "Generates a DAG of Python-based operators representing the sequence of data operations."
              - C++ Plan Binding: "Translates the high-level Python operators into a native C++ LogicalTableNode representation via local_plan_factory.py."
        - LogicalTableNode:
            description: "The intermediate C++ representation of the query plan, serving as the common language for execution and distribution."

  - Query Executor:
      description: "The performance-critical backend that compiles logical plans and executes them via the Velox engine."
      entrypoints:
        - [ChalkSQL, ChalkDF]:
            description: "Direct user-facing APIs that bypass the Query Planner to construct LogicalTableNode plans from SQL strings or the fluent DataFrame API."
        - LogicalTableNode:
            description: "The primary input for execution, arriving from the Query Planner or as a rewritten plan from the Metaplanner."
      execution_pipeline:
        description: "The transformation from logical intent to physical data results in libchalk/src/libchalk/chalktable/."
        modules:
          - Plan Optimization: "Applies relational algebra rewrites (e.g., filter pushdown, join reordering) to the LTN tree."
          - Velox Plan Compilation: "Maps the optimized LTN nodes to physical Velox operators and execution pipelines."
          - Velox Runtime: "Orchestrates the low-level execution of the plan against data sources, managing memory and concurrency."
        - results:
            description: "The final materialized data (Arrow/RecordBatches) returned to the user."

  - Metaplanner:
      description: "The distribution engine that enables horizontal scaling by partitioning and rewriting monolithic query plans."
      input:
        - LogicalTableNode:
            description: "The monolithic logical plan that needs to be sharded or distributed."
      metaplanning_process:
        description: "Distributed transformation logic found in engine/chalkengine/metaplanner/."
        modules:
          - Analysis: "Scans the plan to identify sharding opportunities and data-driven partitioning keys."
          - Unloaded Resolvers: "Optimizes shard jobs by pre-computing expensive resolvers into Parquet 'unloads' that are scanned as simple files during sharding."
          - Autosharding: "Determines the optimal number of shards and partitions data sources based on the volume of input 'givens'."
          - Job Scheduling: "Orchestrates the execution lifecycle, ensuring unloads complete before sharded queries begin on the Job Queue."
      output:
        - LogicalTableNode:
            description: "The rewritten, sharded plan where original compute nodes are replaced with distributed scan and aggregation operations."
