Merge pull request #1719 from CCOSTAN/codex/optimize-fully-kiosk-tablet

Update kiosk access and infrastructure cleanup guidance
This commit is contained in:
Carlo Costanzo
2026-04-26 13:28:11 -04:00
committed by GitHub
6 changed files with 17 additions and 16 deletions

View File

@@ -1 +1 @@
2026.4.3
2026.4.4

View File

@@ -21,6 +21,7 @@ homeassistant:
# - !secret external_ip
trusted_users:
192.168.10.17: be280a93c9d7416e98d25d0470f414be
192.168.10.65: 9543feb5402742a0ab90f301f09f1c0e
192.168.10.88: be280a93c9d7416e98d25d0470f414be
allow_bypass_login: true
- type: homeassistant

View File

@@ -7,13 +7,13 @@
# Reusable list extracted from a view for smaller diffs and safer edits.
# -------------------------------------------------------------------
# Notes: Extracted from config/dashboards/kiosk/views/01_kiosk_oveview.yaml key `sections`.
# Keep this Fire 7 kiosk view on built-in cards; avoid live camera/custom-card resources on the old WebView.
######################################################################
- type: grid
cards:
- type: picture-glance
camera_image: camera.frontdoorbell
camera_view: auto
entities: []
grid_options:
columns: 12
@@ -24,49 +24,42 @@
cards:
- type: picture-glance
camera_image: camera.garagecam
camera_view: auto
entities: []
grid_options:
columns: 6
rows: auto
- type: picture-glance
camera_image: camera.driveway
camera_view: auto
entities: []
grid_options:
columns: 6
rows: auto
- type: picture-glance
camera_image: camera.kidsgate
camera_view: auto
entities: []
grid_options:
columns: 6
rows: auto
- type: picture-glance
camera_image: camera.frontlawn
camera_view: auto
entities: []
grid_options:
columns: 6
rows: auto
- type: picture-glance
camera_image: camera.bedroomgate
camera_view: auto
entities: []
grid_options:
columns: 6
rows: auto
- type: picture-glance
camera_image: camera.filtergate
camera_view: auto
entities: []
grid_options:
columns: 6
rows: auto
- type: picture-glance
camera_image: camera.poolcam
camera_view: auto
entities: []
grid_options:
columns: 6

View File

@@ -32,8 +32,6 @@
url: /hacsfiles/power-flow-card-plus/power-flow-card-plus.js?hacstag=618081815026
- type: module
url: /hacsfiles/mini-graph-card/mini-graph-card-bundle.js?hacstag=1512800620130
- type: module
url: /hacsfiles/advanced-camera-card/advanced-camera-card.js?hacstag=3940825527260
- type: module
url: /hacsfiles/search-card/search-card.js?hacstag=19775918003
- type: module

View File

@@ -50,7 +50,7 @@ Live collection of plug-and-play Home Assistant packages. Each YAML file in this
| [github_watched_repo_scout.yaml](github_watched_repo_scout.yaml) | Nightly Joanna dispatch that reviews unread notifications from watched GitHub repos, recommends HA-config ideas, refreshes strong-candidate issues, and marks processed watched-repo notifications read. | `automation.github_watched_repo_scout_nightly`, `script.joanna_dispatch`, `script.send_to_logbook` |
| [proxmox.yaml](proxmox.yaml) | Proxmox runtime and disk pressure monitoring with Repairs + Joanna dispatch for sustained node degradations, plus nightly Frigate reboot. | `binary_sensor.proxmox*_runtime_healthy`, `sensor.proxmox*_disk_used_percentage`, `repairs.create`, `script.joanna_dispatch`, `button.qemu_docker2_101_reboot` |
| [synology_dsm.yaml](synology_dsm.yaml) | Synology DSM integration health normalization for Carlo-NAS01 and Carlo-NVR, with Repairs + Joanna dispatch on sustained integration, security, or storage problems. | `binary_sensor.carlo_*_synology_problem`, `sensor.carlo_*_synology_problem_summary`, `repairs.create`, `script.joanna_dispatch` |
| [infrastructure.yaml](infrastructure.yaml) | Normalized WAN/DNS/backup/domain/cert health, Glances-backed Docker host disk pressure, and website uptime/latency SLO signals for Infrastructure dashboards, plus nightly backup verification and monthly Joanna HA log hygiene review with GitHub issue follow-up. | `sensor.docker_*_disk_used_percentage`, `automation.docker_host_disk_pressure_monitor`, `binary_sensor.infra_website_uptime_slo_breach`, `binary_sensor.infra_website_latency_degraded`, `automation.infra_backup_nightly_verification`, `script.joanna_dispatch` |
| [infrastructure.yaml](infrastructure.yaml) | Normalized WAN/DNS/backup/domain/cert health, Glances-backed Docker host disk pressure with bounded safe Joanna cleanup, and website uptime/latency SLO signals for Infrastructure dashboards, plus nightly backup verification and monthly Joanna HA log hygiene review with GitHub issue follow-up. | `sensor.docker_*_disk_used_percentage`, `automation.docker_host_disk_pressure_monitor`, `binary_sensor.infra_website_uptime_slo_breach`, `binary_sensor.infra_website_latency_degraded`, `automation.infra_backup_nightly_verification`, `script.joanna_dispatch` |
| [onenote_indexer.yaml](onenote_indexer.yaml) | OneNote indexer health/status monitoring for Joanna, failure-repair automation, and a daily duplicate-delete maintenance request. | `sensor.onenote_indexer_last_job_status`, `binary_sensor.onenote_indexer_last_job_successful` |
| [mqtt_status.yaml](mqtt_status.yaml) | Command-line MQTT broker reachability probe with Spook Repairs escalation and Joanna troubleshooting dispatch on outage. | `binary_sensor.mqtt_status_raw`, `binary_sensor.mqtt_broker_problem`, `repairs.create`, `rest_command.bearclaw_command` |
| [mariadb.yaml](mariadb.yaml) | MariaDB recorder health and capacity snapshots with hourly live metrics, weekly admin/recorder polling, and stats-ready numeric sensors. | `sensor.mariadb_status`, `sensor.database_size` |

View File

@@ -13,6 +13,7 @@
# Notes: Monthly HA log hygiene review requests Telegram + GitHub issue follow-up only; Joanna must wait for approval before any changes.
# Notes: Numeric WAN telemetry exposes state_class so recorder can keep long-term statistics.
# Notes: Docker host root disk usage uses Glances-backed normalized sensors; raw Glances sensors are recorder/logbook-filtered.
# Notes: Disk-pressure dispatch allows bounded safe cleanup of disposable caches and old generated backup artifacts, but not live data or restarts.
######################################################################
input_text:
@@ -461,9 +462,11 @@ automation:
disk_used={{ states(used_entity) }},
threshold=90
request: >-
Investigate critical disk pressure on {{ host_name }} and recommend safe remediation.
Investigate critical disk pressure on {{ host_name }} and perform safe remediation when confidence is high.
Check Docker build cache, image/container volumes, logs, backups, and large files first.
Do not delete data, prune containers, or reboot the host unless explicitly requested.
Allowed without confirmation: clear disposable caches, remove unused build cache, and rotate or delete old generated backup artifacts when newer retained copies exist.
Do not delete live application data, remove the only copy of a backup, prune active or in-use Docker resources, stop critical services, or reboot the host without explicit approval.
Reply with resolved=true/false, action_taken, verification, and next_action_required=true/false.
- service: script.send_to_logbook
data:
topic: "DOCKER"
@@ -506,9 +509,11 @@ automation:
disk_used={{ states(used_entity) }},
threshold=80
request: >-
Investigate elevated disk usage on {{ host_name }} and recommend safe cleanup before it becomes critical.
Investigate elevated disk usage on {{ host_name }} and perform safe low-risk cleanup before it becomes critical when confidence is high.
Check Docker build cache, image/container volumes, logs, backups, and large files first.
Do not delete data, prune containers, or reboot the host unless explicitly requested.
Allowed without confirmation: clear disposable caches, remove unused build cache, and rotate or delete old generated backup artifacts when newer retained copies exist.
Do not delete live application data, remove the only copy of a backup, prune active or in-use Docker resources, stop critical services, or reboot the host without explicit approval.
Reply with resolved=true/false, action_taken, verification, and next_action_required=true/false.
- service: script.send_to_logbook
data:
topic: "DOCKER"
@@ -557,6 +562,10 @@ automation:
value: "normal"
- conditions: "{{ current_band == 'normal' and previous_band not in ['normal', 'warning', 'critical'] }}"
sequence:
- service: repairs.remove
continue_on_error: true
data:
issue_id: "{{ issue_id }}"
- service: input_text.set_value
target:
entity_id: "{{ band_entity }}"