Compare commits

...

9 commits

Author SHA1 Message Date
Nick Touran
410ccd662c Reorg some procedures. 2026-01-05 12:08:27 -05:00
Nick Touran
e69b80b52c Add alembic to manage data migrations 2026-01-05 12:05:32 -05:00
Nick Touran
8c73123862 Add postgresql persistence of data in data dict
Moved back to sqlmodel because we do need some way
of putting this info into a database.
2026-01-05 12:04:22 -05:00
Nick Touran
f7ee72a66b AMS: adjust schedule format
Add projects, add MDDL
2025-12-29 12:12:38 -05:00
Nick Touran
f13e1e2ee2 Validate doc type data during build 2025-12-29 12:11:38 -05:00
Nick Touran
373dfe4c3b Lots more Document data definition
Schedule updates:

* Defined Schedule types
* Updated schedule loader to validate with pydantic
* Added ability to specify predecessor type and lead/lag

Other structural/outline stuff as well

Oh and added a unit test.
2025-12-19 14:15:07 -05:00
Nick Touran
36fcb5f260 Add initial data dict 2025-12-12 09:41:25 -05:00
Nick Touran
fb28c6c5c5 [General] Add API docs and pydantic for Data Dict
Fix schedule for latex
2025-12-12 09:40:02 -05:00
Nick Touran
e474c140ee Some AMS specific stuff, and plant work 2025-12-01 17:04:45 -05:00
70 changed files with 3270 additions and 209 deletions

7
.gitignore vendored
View file

@ -1,4 +1,7 @@
# project stuff
documents/api
**/generated_assets/ **/generated_assets/
# Typical Python stuff: # Typical Python stuff:
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/
@ -160,3 +163,7 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear # and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder. # option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/ #.idea/
# Sphinx
**/_build
**/_generated

18
.vscode/settings.json vendored
View file

@ -5,4 +5,22 @@
"source.organizeImports": "explicit" "source.organizeImports": "explicit"
} }
}, },
"files.watcherExclude": {
"**/_build/**": true,
"**/__pycache__": true
},
"search.exclude": {
"**/__pycache__": true,
"**/.pytest_cache": true,
"**/.mypy_cache": true,
"**/build": true,
"**/dist": true,
"**/_build": true,
"**/_build/**": true
},
"files.exclude": {
"**/_build": true,
"**/*.egg-info": true,
"**/__pycache__": true
}
} }

147
alembic.ini Normal file
View file

@ -0,0 +1,147 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts.
# this is typically a path given in POSIX (e.g. forward slashes)
# format, relative to the token %(here)s which refers to the location of this
# ini file
script_location = %(here)s/alembic
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
# Uncomment the line below if you want the files to be prepended with date and time
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
# for all available tokens
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory. for multiple paths, the path separator
# is defined by "path_separator" below.
prepend_sys_path = .
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the tzdata library which can be installed by adding
# `alembic[tz]` to the pip requirements.
# string value is passed to ZoneInfo()
# leave blank for localtime
# timezone =
# max length of characters to apply to the "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to <script_location>/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "path_separator"
# below.
# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions
# path_separator; This indicates what character is used to split lists of file
# paths, including version_locations and prepend_sys_path within configparser
# files such as alembic.ini.
# The default rendered in new alembic.ini files is "os", which uses os.pathsep
# to provide os-dependent path splitting.
#
# Note that in order to support legacy alembic.ini files, this default does NOT
# take place if path_separator is not present in alembic.ini. If this
# option is omitted entirely, fallback logic is as follows:
#
# 1. Parsing of the version_locations option falls back to using the legacy
# "version_path_separator" key, which if absent then falls back to the legacy
# behavior of splitting on spaces and/or commas.
# 2. Parsing of the prepend_sys_path option falls back to the legacy
# behavior of splitting on spaces, commas, or colons.
#
# Valid values for path_separator are:
#
# path_separator = :
# path_separator = ;
# path_separator = space
# path_separator = newline
#
# Use os.pathsep. Default configuration used for new projects.
path_separator = os
# set to 'true' to search source files recursively
# in each "version_locations" directory
# new in Alembic version 1.10
# recursive_version_locations = false
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
# database URL. This is consumed by the user-maintained env.py script only.
# other means of configuring database URLs may be customized within the env.py
# file.
sqlalchemy.url = driver://user:pass@localhost/dbname
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module
# hooks = ruff
# ruff.type = module
# ruff.module = ruff
# ruff.options = check --fix REVISION_SCRIPT_FILENAME
# Alternatively, use the exec runner to execute a binary found on your PATH
# hooks = ruff
# ruff.type = exec
# ruff.executable = ruff
# ruff.options = check --fix REVISION_SCRIPT_FILENAME
# Logging configuration. This is also consumed by the user-maintained
# env.py script only.
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARNING
handlers = console
qualname =
[logger_sqlalchemy]
level = WARNING
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

1
alembic/README Normal file
View file

@ -0,0 +1 @@
pyproject configuration, based on the generic configuration.

80
alembic/env.py Normal file
View file

@ -0,0 +1,80 @@
from logging.config import fileConfig
from sqlalchemy import engine_from_config, pool
from sqlmodel import SQLModel
from alembic import context
from nrsk.db import get_engine
from nrsk.models import Document, User
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
# allow autogeneration of models
target_metadata = SQLModel.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# connectable = engine_from_config(
# config.get_section(config.config_ini_section, {}),
# prefix="sqlalchemy.",
# poolclass=pool.NullPool,
# )
connectable = get_engine()
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

29
alembic/script.py.mako Normal file
View file

@ -0,0 +1,29 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
import sqlmodel
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision: str = ${repr(up_revision)}
down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)}
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
def upgrade() -> None:
"""Upgrade schema."""
${upgrades if upgrades else "pass"}
def downgrade() -> None:
"""Downgrade schema."""
${downgrades if downgrades else "pass"}

View file

@ -0,0 +1,192 @@
"""initial_setup
Revision ID: 3791144a7ad2
Revises:
Create Date: 2026-01-05 10:24:53.993818
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "3791144a7ad2"
down_revision: Union[str, Sequence[str], None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Upgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"informationtype",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("abbrev", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("examples", sa.JSON(), nullable=True),
sa.Column("description", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("retention", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("record", sa.Boolean(), nullable=False),
sa.Column("use_cases", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("notes", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("parent_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["parent_id"],
["informationtype.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"organization",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("abbreviation", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("website", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("is_active", sa.Boolean(), nullable=False),
sa.Column("parent_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["parent_id"],
["organization.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_organization_abbreviation"),
"organization",
["abbreviation"],
unique=False,
)
op.create_index(
op.f("ix_organization_name"), "organization", ["name"], unique=False
)
op.create_table(
"user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("given_name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("family_name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("preferred_name", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("previous_name", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("email", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("joined_on", sa.DateTime(), nullable=True),
sa.Column("deactivated_on", sa.DateTime(), nullable=True),
sa.Column("organization", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("title", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"document",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("number", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("title", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("revision", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("originating_organization_id", sa.Integer(), nullable=True),
sa.Column(
"originator_number", sqlmodel.sql.sqltypes.AutoString(), nullable=True
),
sa.Column(
"originator_revision", sqlmodel.sql.sqltypes.AutoString(), nullable=True
),
sa.Column("type_id", sa.Integer(), nullable=False),
sa.Column(
"revision_comment", sqlmodel.sql.sqltypes.AutoString(), nullable=True
),
sa.Column(
"status",
sa.Enum(
"RESERVED",
"IN_PROGRESS",
"IN_REVIEW",
"REJECTED",
"AUTHORIZED",
"REFERENCE",
"NATIVE",
"APPROVED",
"QUARANTINED",
"SUPERSEDED",
"REVISED",
"VOIDED",
"CLOSED",
name="status",
),
nullable=False,
),
sa.Column(
"usage",
sa.Enum(
"FOR_INFORMATION",
"FOR_STAGE_APPROVAL",
"FOR_BID",
"FOR_CONSTRUCTION",
"FOR_OPERATION",
"AS_BUILT",
name="usage",
),
nullable=False,
),
sa.Column(
"retention_plan", sa.Enum("LIFETIME", name="retention"), nullable=False
),
sa.Column(
"restriction_codes", sqlmodel.sql.sqltypes.AutoString(), nullable=False
),
sa.Column("actual_reviewed_date", sa.DateTime(), nullable=True),
sa.Column("actual_approved_date", sa.DateTime(), nullable=True),
sa.Column(
"filenames", sa.JSON(), server_default=sa.text("'[]'"), nullable=False
),
sa.Column(
"file_notes", sa.JSON(), server_default=sa.text("'[]'"), nullable=False
),
sa.Column(
"checksums", sa.JSON(), server_default=sa.text("'[]'"), nullable=False
),
sa.Column(
"physical_location", sqlmodel.sql.sqltypes.AutoString(), nullable=True
),
sa.Column("notes", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.ForeignKeyConstraint(
["originating_organization_id"],
["organization.id"],
),
sa.ForeignKeyConstraint(
["type_id"],
["informationtype.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"documentuserlink",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("position", sa.Integer(), nullable=False),
sa.Column("role_note", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("document_id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["document_id"],
["document.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id", "document_id", "user_id"),
)
# ### end Alembic commands ###
def downgrade() -> None:
"""Downgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("documentuserlink")
op.drop_table("document")
op.drop_table("user")
op.drop_index(op.f("ix_organization_name"), table_name="organization")
op.drop_index(op.f("ix_organization_abbreviation"), table_name="organization")
op.drop_table("organization")
op.drop_table("informationtype")
# ### end Alembic commands ###

90
documents/AMS.bib Normal file
View file

@ -0,0 +1,90 @@
@techreport{barrieroInformationManagementProcess2010,
title = {Information {{Management Process Description Guideline}}},
author = {Barriero, Amy},
year = 2010,
number = {PDG01-2010},
institution = {NIRMA},
url = {https://international.anl.gov/training/materials/6H/Gilbert/PDG02%20Documents%20and%20Records%20Process%20Description.pdf},
langid = {american},
file = {/pool/Reading/Nuclear/institutions/nirma/PDG01 Information Management Process Description.pdf}
}
@misc{cahillDesignPhilosophyBrief2025,
title = {Design {{Philosophy Brief}}},
author = {Cahill, William},
year = 2025,
publisher = {AMS},
url = {https://maritimesai.kiteworks.com/#/file/8b92c7cb-4444-4a3e-aba6-fdf328f7d2f8?currentPage=1}
}
@techreport{cloverDocumentControlRecords2010,
title = {Document {{Control}} and {{Records Management Process Description}}},
author = {Clover, Bill},
year = 2010,
number = {PDG02-2010},
institution = {NIRMA},
url = {https://international.anl.gov/training/materials/6H/Gilbert/PDG02%20Documents%20and%20Records%20Process%20Description.pdf},
langid = {american},
file = {/pool/Reading/Nuclear/institutions/nirma/PDG02 Documents and Records Process Description.pdf}
}
@techreport{fleerReactorTechnologyStudy25,
title = {Reactor {{Technology Study}}},
author = {Fleer, D and Edens, A and Ciocco, S and Jacqueline, K},
year = 25,
month = nov,
number = {B4M-ES-121043},
institution = {BWXT},
url = {https://kiteworks.bwxt.com/web/file/416b69b9-4c5c-44c9-9605-40a25e181493?currentPage=1},
copyright = {Export Controlled},
file = {/home/nick/pool/Users/Nick/Documents/2025/What is Nuclear LLC/jobs/Marine/AMS docs/B4M-ES-121043_Rev001.pdf}
}
@techreport{halpinInformationManagementNuclear1978d,
title = {Information Management for Nuclear Power Stations: Project Description},
shorttitle = {Information Management for Nuclear Power Stations},
author = {Halpin, D. W.},
year = 1978,
month = mar,
number = {ORO-5270-1},
institution = {Georgia Inst. of Tech., Atlanta (USA). School of Civil Engineering},
doi = {10.2172/6543303},
url = {https://www.osti.gov/biblio/6543303},
abstract = {A study of the information management structure required to support nuclear power plant construction was performed by a joint university-industry group under the sponsorship of the Department of Energy (DOE), formerly the Energy Research and Development Administration (ERDA). The purpose of this study was (1) to study methods for the control of information during the construction and start-up of nuclear power plants, and (2) identify those data elements intrinsic to nuclear power plants which must be maintained in a structured format for quick access and retrieval. Maintenance of the massive amount of data needed for control of a nuclear project during design, procurement, construction, start-up/testing, and operational phases requires a structuring which allows immediate update and retrieval based on a wide variety of access criteria. The objective of the research described has been to identify design concepts which support the development of an information control system responsive to these requirements. A conceptual design of a Management Information Data Base System which can meet the project control and information exchange needs of today's large nuclear power plant construction projects has been completed and an approach recommended for development and implementation of a complete operational system.},
langid = {english},
file = {/pool/Reading/Nuclear/process/configuration management/Information Management for Nuclear Power Stations 1978/Halpin - 1978 - Information management for nuclear power stations project description.pdf}
}
@misc{imoCodeSafetyNuclear1982,
title = {Code of {{Safety}} for {{Nuclear Merchant Ships}}},
author = {IMO},
year = 1982,
month = jun,
number = {A XII/Res.491},
publisher = {Internaional Maritime Organization},
url = {https://wwwcdn.imo.org/localresources/en/KnowledgeCentre/IndexofIMOResolutions/AssemblyDocuments/A.491(12).pdf}
}
@techreport{renuartAdvancedNuclearTechnology2014,
title = {Advanced {{Nuclear Technology}}: {{Data-Centric Configuration Management}} for {{Efficiency}} and {{Cost Reduction}}: {{An Economic Basis}} for {{Implementation}}},
author = {Renuart, R.},
year = 2014,
month = dec,
number = {3002003126},
pages = {170},
institution = {EPRI},
url = {https://www.epri.com/research/products/3002003126},
abstract = {The Electric Power Research Institute (EPRI) Advanced Nuclear Technology (ANT) Program has been working on defining the tools that can be a part of an effective configuration management (CM) system. This includes the potential use of modern digital data management tools that can be useful not only across the plant life cycle, including engineering, procurement, construction (EPC), and decommissioning, but also for the management of plant configuration—control of the licensing basis, plant operation, and input and control of many plant programs.},
langid = {american},
file = {/home/nick/pool/Reading/Nuclear/process/configuration management/Advanced Nuclear Technology:
Data-Centric Configuration Management for
Efficiency and Cost Reduction 000000003002003126.pdf}
}
@misc{SQLModel,
title = {{{SQLModel}}},
url = {https://sqlmodel.tiangolo.com/},
abstract = {SQLModel, SQL databases in Python, designed for simplicity, compatibility, and robustness.},
langid = {english},
file = {/home/nick/Zotero/storage/MA9HAJ52/sqlmodel.tiangolo.com.html}
}

View file

@ -0,0 +1,135 @@
# Many of these came from cloverDocumentControlRecords2010 originally
# Other sources include IEC 631355 https://en.wikipedia.org/wiki/IEC_61355
# and ANSI-N45-2-9-74 https://jetsquality.com/wp-content/uploads/2019/01/ANSI-N45-2-9-74.pdf
# has like >100 record types nicely sorted into use case categories (Design, Procurement, Manufacture, ...)
# I like these the best, but maybe they can be compressed with considering USAGE.
- name: Calculation
abbrev: CALC
use_cases: Documenting an analysis
record: True
retention:
- name: Design Report
abbrev: DREP
use_cases: Documenting a design
record: True
retention:
- name: Design Review Report
abbrev: DREV
use_cases: Documenting the review of a design
record: True
retention:
- name: System Design Description
abbrev: SDD
use_cases: Describing a system
record: True
retention:
- name: Correspondence
abbrev: CSP
use_cases: Communications
record: False
retention:
- name: Drawing
abbrev: DRW
use_cases: Describing SSCs, includes many engineering deliverables
record: True
retention:
- name: Engineering Change Package
abbrev: ECP
use_cases: Describing a formal change to the system configuration
record: True
retention:
- name: Equipment Data Sheets
abbrev: EDS
use_cases: Define technical requirements and operating boundaries
record: True
retention:
- name: Environmental Qualification Package
abbrev: EQP
use_cases: >
Documents describing environmental qualifications of equipment such as
lab reports, thermal aging analyses, radiation resistance data supporting
10 CFR 50.49
record: True
retention:
- name: Form
abbrev: FORM
use_cases: A reusable starting point for other Documents/Records, or for collecting data
record: False
retention: Lifetime
notes: Forms are blank documents.
- name: Instructions
abbrev: INSTR
use_cases: Explanations of how to use systems or equipment
record: True
retention:
- name: Native File
abbrev: NTV
use_cases: A native file i.e. from a proprietary authoring software
record: False
notes: >
Native files are kept for ease of revision. They may also be kept as
additional file attachment alongside the document/record.
- name: Policy
abbrev: POL
use_cases: A policy
record: True
- name: Business Practice/Desk Guide
abbrev: BPDG
record: False
- name: Procedure
abbrev: PROC
use_cases: Defining and dictating how work is done
record: True
retention: Lifetime
- name: Procurement
abbrev: PCMT
use_cases: Related to purchases
record: True
retention: Lifetime
- name: Program Manual/Plan
abbrev: PMAN
use_cases: >
High-level governance documents that describes how the plant will manage
a specific program area (e.g., Radiation Protection, In-Service
Inspection, or Fire Protection).
record: True
retention: Lifetime
- name: Quality Classification List
abbrev: QLST
use_cases: Categorizes every SSC based on importance to safety
record: True
retention:
- name: Radiation Protection Survey
abbrev: RPS
record: True
retention:
- name: Records Transmittal Instructions/Indexing Guide
abbrev: RTI
record: True
retention:
- name: Regulatory Documents
abbrev: REG
use_cases: Safety Analysis Report, Technical Specifications, etc.
record: True
retention:
- name: Setpoints
abbrev: SET
record: True
retention:
- name: Specifications
abbrev: SPEC
record: True
retention:
- name: Training
abbrev: TRN
record: True
retention:
- name: Vendor Drawings
abbrev: VDRW
record: True
retention:
- name: Vendor Information
abbrev: VNFO
record: True
retention:

View file

@ -1,10 +1,14 @@
--- # This file contains a listing of specific software systems
rmdc-systems: # used to implement information management. Data from this file
# is brought into procedures as appropriate.
RMDC:
- name: NukeVault - name: NukeVault
description: Specialized commercial records management system description: Specialized commercial records management system
use-cases: Storing Documents and Records generated during design of Project X use_cases: Storing Documents and Records generated during design of Project X
location: https://nukevault.opennucleonics.org location: https://nukevault.opennucleonics.org
- name: Supplier Portal - name: Supplier Portal
description: A place where our suppliers can get documents description: A place where our suppliers can get documents
use-cases: External suppliers send documents/records to us use_cases: External suppliers send documents/records to us
location: Online location: Online
Data Management:
- name: Data Dictionary

View file

@ -0,0 +1,16 @@
- name: Power Range
val_low: 0.750
val_high: 16
units: MWe
req: R_AMS_1
tags: [electrical]
- name: Backbone Voltage
val: 3.6
units: kVAC
req: R_AMS_2
tags: [electrical]
- name: Backbone Frequency
val: 60
units: Hz
req: R_AMS_2
tags: [electrical]

View file

@ -0,0 +1,193 @@
- name: "TTP"
color: "#1f77b4"
tasks:
- name: "Ship Design Cycle 1"
duration_days: 120
predecessors:
- id: RX0
id: TTP1
- name: "Nuke-Ready Guide"
is_milestone: true
predecessors:
- id: TTP1
- name: "Ship Design Cycle 2"
id: TTP2
predecessors:
- id: TTP1
- id: RX1
duration_days: 120
- name: "RFI (non-stealth)"
is_milestone: true
predecessors:
- id: TTP2
- name: "Ship Design Cycle 3"
id: TTP3
predecessors:
- id: TTP2
duration_days: 120
- name: "RFP"
is_milestone: true
predecessors:
- id: TTP3
- name: "Evaluate Proposals"
id: TTP4
duration_days: 90
predecessors:
- id: TTP3
- name: "Ship Award"
is_milestone: true
predecessors:
- id: TTP4
- name: "Detailed Design & Construction"
id: TTP5
duration_days: 900
predecessors:
- id: TTP4
- name: "TTP Ship Delivery"
is_milestone: true
predecessors:
- id: TTP5
- name: "Test/Trials/Acceptance"
id: TTP6
duration_days: 90
predecessors:
- id: TTP5
- name: "Transit to Homeport"
id: TTP7
duration_days: 30
predecessors:
- id: TTP6
- name: "NEPP Integration"
id: TTP8
duration_days: 60
predecessors:
- id: TTP7
- id: RX5
- id: SY7
- name: "Hot plant testing"
id: TTP9
duration_days: 180
predecessors:
- id: TTP8
- id: SY7
- name: "Shipyard"
color: "#ff7f0e"
tasks:
- name: "Shipyard conceptual design"
id: SY1
start: 2026-01-01
duration_days: 120
predecessors:
- id: RX0
- name: "Real Estate Purchase"
id: SY2
predecessors:
- id: SY1
duration_days: 270
- name: "Shipyard Design Ph 1"
duration_days: 540
id: SY3
predecessors:
- id: SY1
- id: RX1
- name: "Regulatory review"
duration_days: 270
id: SY4
predecessors:
- id: SY1
- name: "Reg Approval 1"
is_milestone: true
predecessors:
- id: SY4
- name: "Shipyard Design Ph 2"
id: SY5
duration_days: 365
predecessors:
- id: SY3
- name: "Reg Approval 2"
is_milestone: true
predecessors:
- id: SY5
- name: "Shipyard Construction Ph 1"
id: SY6
duration_days: 635
predecessors:
- id: SY2
- name: "Shipyard License"
is_milestone: true
predecessors:
- id: SY6
- name: "Shipyard Construction Ph 2"
id: SY7
duration_days: 270
predecessors:
- id: SY6
- name: "Reactor"
color: "#2ca02c"
tasks:
- name: "Rx concept design cycle 1"
id: RX0
duration_days: 50
- name: "Rx concept design cycle 2"
id: RX1
duration_days: 100
predecessors:
- id: RX0
- name: "Rx concept design cycle 3"
id: RX15
duration_days: 300
predecessors:
- id: RX1
- name: "Reactor prelim design"
id: RX2
duration_days: 360
predecessors:
- id: RX15
- name: "Reactor detailed design"
id: RX4
duration_days: 500
predecessors:
- id: RX2
- name: "Reactor manufacturing"
id: RX5
duration_days: 480
predecessors:
- id: RX3
- name: "Reactor License"
tasks:
- name: "Pre-licensing activities"
predecessors:
- id: RX1
- id: RX15
type: FF
duration_days: 450
id: LI1
- name: "Submit Construction Permit Application"
is_milestone: true
predecessors:
- id: RX2
- id: LI1
- name: "CP review by NRC"
id: RX3
duration_days: 450
predecessors:
- id: RX2
- name: "Receive Construction Permit"
is_milestone: true
predecessors:
- id: RX3
- name: "OL review by NRC"
# TODO: should end at end of hot plant testing
predecessors:
- id: RX4
duration_days: 365
- name: "Receive Operating License"
is_milestone: true
predecessors:
- id: RX5
- id: TTP9
# - name: "Fuel Handling License"
# tasks

View file

@ -12,3 +12,9 @@ html[data-theme="dark"] {
--pst-color-primary: rgb(115, 199, 164); --pst-color-primary: rgb(115, 199, 164);
--pst-color-secondary: rgb(21, 197, 124); --pst-color-secondary: rgb(21, 197, 124);
} }
/* Additional customizations for nrsk */
dl.py.attribute {
/* Reduce the space between autodoc attrs */
margin-bottom: 0.5em !important;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB

View file

@ -0,0 +1,27 @@
.. Sadly can't get this to work with the same TOC level in additional info.
{{ data['name'] }} ({{ data['abbrev']}})
{{ "=" * (data['name'] | length + data['abbrev'] | length + 3) }}
Abbrev: {{data['abbrev']}}
{{ data['desc'] }}
Functions
---------
{% for item in data['functions'] %}
.. req:: {{item}}
:id: R_{{data['abbrev']}}_{{loop.index}}
{% endfor %}
Parameters
----------
{% if data['params'] %}
{{ make_list_table_from_mappings(
[('Parameter', 'name'), ('Value', 'val'), ('Tags', 'tags')],
data['params'],
title='System Parameters',
) }}
{% endif %}

View file

@ -10,13 +10,18 @@
# add these directories to sys.path here. If the directory is relative to the # add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here. # documentation root, use os.path.abspath to make it absolute, like shown here.
# #
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import datetime import datetime
import os
import sys
SRC = os.path.abspath("../src")
sys.path.insert(0, SRC)
from nrsk.documents.validate import validate_doc_types
# -- Project information ----------------------------------------------------- # -- Project information -----------------------------------------------------
company_name = "Open Nucleonics" company_name = "Applied Maritime Sciences, LLC"
project_name = "Project 1959"
project = f"{company_name} Governing Documents" project = f"{company_name} Governing Documents"
author = company_name author = company_name
release = "1.0" release = "1.0"
@ -39,11 +44,19 @@ extensions = [
"sphinx.ext.imgmath", "sphinx.ext.imgmath",
"sphinxcontrib.datatemplates", "sphinxcontrib.datatemplates",
"sphinxcontrib.mermaid", "sphinxcontrib.mermaid",
"sphinxcontrib.apidoc",
"sphinx.ext.graphviz", "sphinx.ext.graphviz",
# "sphinx.ext.imgconverter", # SVG to png but rasterizes and bad # "sphinx.ext.imgconverter", # SVG to png but rasterizes and bad
"sphinxcontrib.inkscapeconverter", # SVG to pdf without rasterizing "sphinxcontrib.inkscapeconverter", # SVG to pdf without rasterizing
"sphinx_timeline", "sphinx_timeline",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinx.ext.autosummary",
"sphinxcontrib.autodoc_pydantic",
"sphinx.ext.intersphinx",
"nrsk.schedule.load_schedule", "nrsk.schedule.load_schedule",
"nrsk.plant.plant_data_table",
] ]
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
@ -84,13 +97,35 @@ html_css_files = [
# https://sphinx-needs.readthedocs.io/en/latest/installation.html#plantuml-support # https://sphinx-needs.readthedocs.io/en/latest/installation.html#plantuml-support
plantuml = "java -Djava.awt.headless=true -jar /usr/share/plantuml/plantuml.jar" plantuml = "java -Djava.awt.headless=true -jar /usr/share/plantuml/plantuml.jar"
latex_engine = "xelatex"
latex_elements = {
# "fontenc": r"\usepackage[T2A]{fontenc}",
# "babel": r"\usepackage[english,russian]{babel}",
# "fontpkg": r"""
# \setmainfont{DejaVu Serif}
# \setsansfont{DejaVu Sans}
# \setmonofont{DejaVu Sans Mono}
# """,
"figure_align": "H",
"extraclassoptions": "openany",
#'\makeatletter\@openrightfalse\makeatother'
"extrapackages": r"""
\usepackage{fancyhdr}
\usepackage{etoolbox}
\usepackage{pdflscape}
\usepackage{tabulary}
""",
"preamble": r"""
\AtBeginEnvironment{figure}{\pretocmd{\hyperlink}{\protect}{}{}}
""",
}
# LaTeX document generation options # LaTeX document generation options
# doesn't work with sphinx-needs # doesn't work with sphinx-needs
latex_documents = [ latex_documents = [
( (
"index", "index",
"nrsk.tex", "ams.tex",
"Nuclear Reactor Starter Kit", "AMS Docs",
author, author,
"manual", "manual",
False, False,
@ -123,6 +158,7 @@ latex_documents = [
# ] # ]
rst_prolog = f""" rst_prolog = f"""
.. |inst| replace:: **{company_name}** .. |inst| replace:: **{company_name}**
.. |project| replace:: **{project_name}**
""" """
# will need to move relevant refs somewhere # will need to move relevant refs somewhere
@ -138,3 +174,34 @@ mermaid_version = "10.6.1"
# Sphinx Needs config # Sphinx Needs config
needs_include_needs = True # turn off to hide all needs (e.g. for working docs) needs_include_needs = True # turn off to hide all needs (e.g. for working docs)
needs_extra_options = ["basis"] needs_extra_options = ["basis"]
autodoc_typehints = "description"
autodoc_typehints_description_target = "all"
autodoc_default_options = {
"members": True,
"private-members": False,
"undoc-members": True,
"ignore-module-all": True,
}
autodoc_member_order = "bysource"
apidoc_module_dir = SRC
apidoc_module_first = True
apidoc_output_dir = "api"
apidoc_separate_modules = True
autodoc_pydantic_model_show_field_summary = True
autodoc_pydantic_model_show_validator_summary = True
autodoc_pydantic_field_doc_policy = "both"
autodoc_pydantic_field_docutils_summary = True
set_type_checking_flag = True
intersphinx_mapping = {
"pydantic": ("https://docs.pydantic.dev/latest", None),
"python": ("https://docs.python.org/3", None),
}
def setup(app):
app.connect("builder-inited", validate_doc_types)

View file

@ -10,6 +10,39 @@ Glossary
.. glossary:: .. glossary::
Configuration Management
The process of identifying and documenting the characteristics of a
facility's structures, systems and components (including computer
systems and software), and of ensuring that changes to these
characteristics are properly incorporated into the facility
documentation. :cite:p:`agencyInformationTechnologyNuclear2010`
Controlled document
Documents whose content is maintained uniform among the copies by an
administrative control system. The goal of controlling documents is to
ensure that work is performed using approved current information, not
obsolete information. Important documents to be controlled are uniquely
identified (including revision number, date, and specific copy number),
and distribution is formally controlled. Revisions to controlled
documents are uniquely tracked and implemented, including mandatory page
replacements and receipt acknowledgment. Controlled documents typically
include procedures for operations, surveillance, and maintenance, and
safety basis documents such as the SAR, and hazard and accident
analyses. :cite:p:`agencyInformationTechnologyNuclear2010`
Design basis
The range of conditions and events taken explicitly into account in the
design of a facility, according to established criteria, such that the
facility can withstand them without exceeding authorized limits by the
planned operation of safety systems.
:cite:p:`agencyInformationTechnologyNuclear2010`
Design control
Measures established to ensure that the information from design input
and design process documents for structures, systems, and components is
correctly translated into the final design.
:cite:p:`agencyInformationTechnologyNuclear2010`
Document Document
A written collection of information, instructions, drawings, A written collection of information, instructions, drawings,
specifications, etc. that is *maintained* throughout the specifications, etc. that is *maintained* throughout the
@ -17,6 +50,10 @@ Glossary
*record* in that it is expected to be *maintained* by revisions as *record* in that it is expected to be *maintained* by revisions as
needed. See :need:`R_APPB_45` needed. See :need:`R_APPB_45`
Electronic Document Management System
EDMS
A computerized system that holds and distributes records and documents
Record Record
A written collection of information providing evidence of A written collection of information providing evidence of
work that was done at a specific time. Records are expected work that was done at a specific time. Records are expected
@ -24,4 +61,8 @@ Glossary
for the lifetime of the plant or for a given number of years. for the lifetime of the plant or for a given number of years.
See :need:`R_APPB_79` See :need:`R_APPB_79`
Records Management and Document Control
RMDC
Group responsible for managing project records and documents

View file

@ -3,16 +3,18 @@
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
:numbered: :numbered: 3
:caption: Contents: :caption: Contents:
purpose/index purpose/index
organization/index organization/index
procedures/index procedures/index
plant/index
project/index project/index
bibliography bibliography
requirements/index requirements/index
glossary glossary
api/nrsk

View file

@ -0,0 +1,12 @@
Chemical Addition System
========================
.. datatemplate:yaml:: index.yaml
:template: system.tmpl
Additional info
---------------
.. req:: Contain stuff
.. req:: Do more stuff

View file

@ -0,0 +1,9 @@
---
name: Chemical Addition System
abbrev: CA
safety related: false
functions:
- Preparing, storing, and transferring solutions of lithium
hydroxide (7LiOH) to maintain reactor coolant pH, and hydrazine (N2H2)
to scavenge oxygen from the reactor coolant at low temperatures
notes: That's enriched lithium-7

View file

@ -0,0 +1,13 @@
name: Component Cooling Water System
abbrev: CCW
functions:
- >
Transfer heat from the following components to the RPSW system by CCW
heat exchangers during normal operation, scheduled and unscheduled
shutdowns, including hot and cold maintenance, and during refueling.
* Decay heat removal system heat exchangers
* Makeup and purification system letdown heat exchangers
* Reactor coolant pump heat exchangers
* Control rod drive mechanism cooling jackets
* Suppression pool heat exchangers

View file

@ -0,0 +1,18 @@
name: Reactor Compartment Ventilation System
abbrev: RCV
safety class: II
functions:
- Controlling radioactive gaseous release to the environment
during both normal and emergency (post-LOCA) operation and
maintenance of a low level of airborne radioactivity in
the reactor compartment and auxiliary spaces to permit
entry during normal operation or during both schedule and
unscheduled shutdown.
- Removing heat to the environment from sources within the reactor
compartment
- Containment purging prior to manned entry for maintenance and/or
inspection. Containment purging would be accomplished only when
the reactor is at or below hot shutdown conditions; i.e. decay heat
system is operating.
- Provide a source of clean air to the control areas if high
radioactivity levels are present off ship.

View file

@ -0,0 +1,8 @@
Auxiliary Systems
#################
.. toctree::
:glob:
Chemical Addition System/cas.rst
*

View file

@ -0,0 +1,5 @@
name: Reactor plant control system
params:
- name: Quantity
val: 1
tags: INTERFACE

View file

@ -0,0 +1,14 @@
Containment
===========
.. datatemplate:yaml:: containment.yaml
:template: system.tmpl
Additional info
----------------
.. req:: Contain stuff
:id: R_RCS_A1
.. req:: Do more stuff
:id: R_RCS_A2

View file

@ -0,0 +1,10 @@
---
name: Reactor containment system
abbrev: RCS
functions:
- Contain pressure and radiation
- Keep people out
params:
- name: Quantity
val: 1
tags: INTERFACE

View file

@ -0,0 +1,5 @@
name: Fuel handling equipment
params:
- name: Quantity
val: 1
tags: INTERFACE

View file

@ -0,0 +1,5 @@
Primary Coolant System
======================
.. datatemplate:yaml:: pcs.yaml
:template: system.tmpl

View file

@ -0,0 +1,55 @@
name: Primary Coolant System
abbrev: PCS
functions:
- Remove heat from the core during normal operation
- Generate steam
equipment:
- name: Reactor vessel and closure head
params:
- name: Quantity
val: 1
tags: INTERFACE
- name: Primary coolant pump
desc: Includes motors, coolers, valves, and piping
params:
- name: Quantity
val: 4
tags: INTERFACE
- name: Reactor vessel internals
params:
- name: Quantity
val: 1
tags: INTERFACE
- name: Steam generator
params:
- name: Quantity
val: 12
tags: INTERFACE
- name: Pressurizer
desc: Pressurizer with spray and surge line
params:
- name: Quantity
val: 1
tags: INTERFACE
- name: Nuclear steam plant supports and restraints
params:
- name: Quantity
val: 1
tags: INTERFACE
- name: Control rod drive service structure
params:
- name: Quantity
val: 1
tags: INTERFACE
- name: Reactor coolant system insulation
desc: >
Includes insulation for:
* Reactor vessel and closure head
* Pressurizer
* Surge and spray line piping
* Reactor coolant pumps
params:
- name: Quantity
val: 1
tags: INTERFACE

View file

@ -0,0 +1,5 @@
name: Control rod drive mechanisms
params:
- name: Quantity
val: 37
tags: INTERFACE

View file

@ -0,0 +1,20 @@
name: Shielding System
equipment:
- name: Primary biological shielding
desc: >
Consists of shielding water tanks at
* Top
* Vertical cylinder
* Lower-inner bottom
* Pressure suppression system
params:
- name: Quantity
val: 1
tags: INTERFACE
- name: Reactor enclosure/secondary biological shielding
desc: Consists of 3-inch-thick steel bulkheads
params:
- name: Quantity
val: 1
tags: INTERFACE

View file

@ -0,0 +1,9 @@
Primary Systems
###############
.. toctree::
:glob:
:maxdepth: 1
Containment System/containment.rst
*

View file

@ -0,0 +1,10 @@
Reactor Plant
#############
.. toctree::
:glob:
:maxdepth: 2
Primary Systems/index
Auxiliary Systems/index
*

View file

@ -0,0 +1,6 @@
equipment:
- name: Collision barrier
params:
- name: Quantity
val: 1
tags: INTERFACE

View file

@ -0,0 +1,7 @@
Ship
####
.. toctree::
:glob:
**/*

View file

@ -0,0 +1,9 @@
Shipyard
########
.. toctree::
:glob:
**/*

26
documents/plant/index.rst Normal file
View file

@ -0,0 +1,26 @@
Plant
#####
.. raw:: latex
\begin{landscape}
.. plant-data-table:: plant
:columns: PBS, SSC, Abbrev, Description, Tags
:max-depth: 4
:hide-empty:
.. raw:: latex
\end{landscape}
.. toctree::
:glob:
:maxdepth: 3
Reactor/index
Ship/index
Shipyard/index
*

View file

@ -1,16 +0,0 @@
- name: Calculation
abbrev: CALC
use-cases: Documenting an analysis
record: False
retention: Varies
- name: Procedure
abbrev: PROC
use-cases: Defining and dictating how work is done
record: False
retention: Lifetime
- name: Form
abbrev: FORM
use-cases: Providing evidence of tasks that were done
record: True
retention: Lifetime

View file

@ -1,98 +0,0 @@
Records and Document Management Procedure
-----------------------------------------
This procedure governs the creation, maintenance, and retention of
:term:`Records <Record>` and :term:`Documents <Document>`.
.. impl:: Define processes for lifetime records
:links: R_GDC_1_4
.. impl:: Define processes for Document Control
:links: R_APPB_45
.. impl:: Define processes for Quality Records
:links: R_APPB_79
.. _rmdc-systems:
Systems
^^^^^^^
Documents and records are managed in the following systems.
.. datatemplate:yaml::
:source: /_data/it-systems.yaml
{{ make_list_table_from_mappings(
[('Name', 'name'), ('Use case(s)', 'use-cases'), ('Location', 'location')],
data['rmdc-systems'],
title='RMDC Systems',
) }}
.. _rmdc-origination:
Origination
^^^^^^^^^^^
New records and new or revised documents are originated as invoked by other procedures
and processes. The following steps shall be taken at such times:
* **Originator** shall specify key data defining the new record/document, including:
* Required:
* Title --- a single-line description of what the record/document is
* Record/document type --- This determines template, review, and
retention rules. Should be from :ref:`rmdc-doctypes`
* Originating organization --- the organization or group assigned primary authorship.
Internally-generated records/documents shall be contained on the :ref:`org-chart`, while
others should be the name and department of external entities.
* Optional
* Keywords --- words or phrases to assist in future searches/lookups
.. impl:: Require document index to be updated upon origination
of any new document or record
:links: R_APPB_83
Updating the index at the point of creation is a robust
way to ensure compliance that each document/record will always
be discoverable and retrievable.
.. _rmdc-doctypes:
Record/Document Types
^^^^^^^^^^^^^^^^^^^^^
One of the following record/document types should be assigned to each
record/document. The types are generally associated with specific forms or
templates that include the expected content/sections, and are often created to
satisfy the needs of a lower-level procedure.
.. impl:: Define numerous Record/Document types
There is a timeless debate about having too many vs. too few doc types.
Additional types are added to support more mature procedure sets, but then
people start to struggle to know what type they should use, leading to
cleanup efforts, followed by management declarations to reduce the number of
types.
A good solution to this problem is to be very explicit in your procedural
culture. Ensure that whenever a record or document is being originated, that
each staff member has the relevant procedure open and is following it
precisely. Ensure that the procedures are rigorous and precise about which
record/document types should be generated in each scenario. When a procedure
is written, ensure that any new record/document types are added to the list
of known types.
.. warning:: Don't make one rec/doc type for each individual checklist or form
in each procedure though. Those can all be forms. Or maybe do make them
all form subtypes? Could be nice to really have specific evidence.
Need easy way to make auto-generated forms per procedure then.
.. datatemplate:yaml::
:source: doc-types.yaml
{{ make_list_table_from_mappings(
[('Type', 'name'), ('Abbrev', 'abbrev'), ('Use case(s)', 'use-cases'),
('Record', 'record'), ('Retention', 'retention')],
data,
title='Record/Document types',
) }}

View file

@ -7,4 +7,5 @@ and the management of procedures themselves.
.. toctree:: .. toctree::
:glob: :glob:
information_management/index
* *

View file

@ -0,0 +1,296 @@
.. _rmdc-proc:
Records and Document Management Procedure
=========================================
This procedure governs the creation, intake, maintenance, authorization,
distribution, and retention of :term:`Records <Record>` and :term:`Documents
<Document>`.
Purpose
-------
Systematic management of Records and Documents helps teams execute
large, long-term, and complex projects in a coordinated fashion.
Furthermore, management of quality-related documents is required by
regulations. This procedure implements the following requirements:
.. impl:: Define processes for lifetime records
:links: R_GDC_01_04
.. impl:: Define processes for Document Control
:links: R_APPB_45
.. impl:: Define processes for Quality Records
:links: R_APPB_79
Roles
-----
Roles involved in this procedure include:
Originator
Person who authors a new or revised document
Reviewer
Person who is assigned to review a submitted document, checking for quality
and content
Approver
Person in the releasing organization who is authorized to mark a document as
Approved, thereby enabling its use across the project.
Manager
Person in any organization who is authorized to request document reservations
RMDC Staff
Person responsible for receiving documents and administering :term:`RMDC` IT
systems such as the :term:`EDMS`
Staff
Person in any organization who is authorized to access or submit project
documents/records
Procedure
---------
.. _rmdc-access:
Accessing a document/record
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Internal project staff shall follow these steps when seeking documents or records
to perform project work.
* **Staff** navigates to the project :term:`EDMS` as defined in :ref:`rmdc-systems`
* **Staff** searches EDMS for desired document/record by number, title, and/or other fields
* **Staff** checks ``status`` field and ensures they choose a revision that is
approved for their current work task (generally this requires an APPROVED status).
* **Staff** checks the ``usage`` field and ensures to choose a revision with a
usage marking that is appropriate for their current work task.
* **Staff** accesses the file(s) via the EDMS download or access feature
* If an expected Document/Record cannot be found, appears to have erroneous data, or
is not accessible, **Staff** contacts **RMDC Staff** for assistance
.. _rmdc-origination:
Originating a new document/record
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
New records and new or revised documents are originated as invoked by other procedures
and processes, or by third parties. The following steps shall be taken whenever a new
document or record or document reservation is requested, created, or received:
* **Staff** shall specify document data defining the new record/document in
accordance with the content rules listed in :py:class:`nrsk.models.Document`
* Required:
* Title --- a single-line description of what the record/document is
* Record/document type --- This determines template, review, and
retention rules. Should be from :ref:`rmdc-doctypes`
* Originating organization --- the organization or group assigned primary authorship.
Internally-generated records/documents shall be contained on the :ref:`org-chart`, while
others should be the name and department of external entities.
* Optional
* Keywords --- words or phrases to assist in future searches/lookups
.. impl:: Require document index to be updated upon origination
of any new document or record
:links: R_APPB_83
Updating the index at the point of creation is a robust
way to ensure compliance that each document/record will always
be discoverable and retrievable.
Intake
^^^^^^
Upon receipt of Document/Records from an external party, the following process shall occur:
* **Receiver** determines whether the Document/Record is eligible for
entry into the Document Index.
* **Receiver** inspects Document Index to ensure consistency with the Index and the
received Document
* If the received document/record is not already listed in the Document Index, then
**Receiver** takes on the role of **Originator** and follows the process in
:ref:`rmdc-origination`.
* If the data is incorrect or outdated, **Receiver** updates the data
* **Receiver** uploads the received file(s) to the EDMS.
* **Receiver** updates metadata in the Document Index, including the latest
revision number, authors, received date, and location/URL in the EDMS
* **Receiver** assigns acceptance review task to appropriate person based on
the work breakdown structure that the document was produced under.
* **Reviewer** views Document Index and downloads file(s) as listed
* **Reviewer** inspects files and performs acceptance review as appropriate, potentially
generating a Review Record
* **Reviewer** updates Document Index acceptance metadata field according to review
* If the document/records are not accepted, **Reviewer** informs originating
institution of deficiencies and requests update
Document/Record data management
-------------------------------
.. _rmdc-doctypes:
Record/Document Types
^^^^^^^^^^^^^^^^^^^^^
One of the following record/document types should be assigned to each
record/document. The types are generally associated with specific forms or
templates that include the expected content/sections, and are often created to
satisfy the needs of a lower-level procedure.
.. impl:: Define numerous Record/Document types
There is a timeless debate about having too many vs. too few doc types.
Additional types are added to support more mature procedure sets, but then
people start to struggle to know what type they should use, leading to
cleanup efforts, followed by management declarations to reduce the number of
types.
A good solution to this problem is to be very explicit in your procedural
culture. Ensure that whenever a record or document is being originated, that
each staff member has the relevant procedure open and is following it
precisely. Ensure that the procedures are rigorous and precise about which
record/document types should be generated in each scenario. When a procedure
is written, ensure that any new record/document types are added to the list
of known types.
.. warning:: Don't make one rec/doc type for each individual checklist or form
in each procedure though. Those can all be forms. Or maybe do make them
all form subtypes? Could be nice to really have specific evidence.
Need easy way to make auto-generated forms per procedure then.
.. datatemplate:yaml::
:source: /_data/doc-types.yaml
{{ make_list_table_from_mappings(
[('Type', 'name'), ('Abbrev', 'abbrev'), ('Use case(s)', 'use_cases'),
('Record', 'record'), ('Retention', 'retention')],
data,
title='Record/Document types',
) }}
Document Numbering
^^^^^^^^^^^^^^^^^^
Document numbers (or IDs) are human-usable codes that uniquely identify
the document/record for purposes of cross-referencing and discussion.
All projects may use |inst| corporate-level document numbering as appropriate
in addition to the project-specific policies. Corporate number shall
For corporate-level documents/records not associated with any specific project
or system, the document numbers shall be of the form: ``AMS-{type}-00000``,
where {type} is the document type abbreviation in all CAPS, and the number
starts at 00001 and increments from there. When 100,000 or more numbers are
needed, the number continues increasing.
For project-level documents/records not associated with any specific system,
numbers shall be of the same form, but with ``{AMS}}`` replaced with the
project abbreviation, e.g. ``TTP``.
Documents/records associated with a specific project and system shall have document
numbers of the form:
``{proj}-{sys}-{type}-00000``
where `{sys}` is the system abbreviation in all CAPS.
Active projects and their abbreviations may be found in :ref:`projects`.
.. _rmdc-doc-status:
Document Statuses
^^^^^^^^^^^^^^^^^
Document Status indicates where a document or record is in its lifecycle, and
determines if and how it may be used in design or operation. Statuses fall into
three top-level categories. These statuses are derived from
:cite:p:`cloverDocumentControlRecords2010`.
.. autoclass:: nrsk.models.Document.STATUS
:no-index:
:no-members:
* **Not Yet Approved** --- The following statuses apply to Documents that
generally shall not be used to support plant design or operations:
.. autoattribute:: nrsk.models.Document.STATUS.RESERVED
:no-index:
:no-value:
.. autoattribute:: nrsk.models.Document.STATUS.IN_PROGRESS
:no-index:
:no-value:
.. autoattribute:: nrsk.models.Document.STATUS.IN_REVIEW
:no-index:
:no-value:
.. autoattribute:: nrsk.models.Document.STATUS.REJECTED
:no-index:
:no-value:
.. autoattribute:: nrsk.models.Document.STATUS.REFERENCE
:no-index:
:no-value:
.. autoattribute:: nrsk.models.Document.STATUS.NATIVE
:no-index:
:no-value:
* **Approved** --- The following statuses apply to active documents that allow
plant design or operations:
.. autoattribute:: nrsk.models.Document.STATUS.APPROVED
:no-index:
:no-value:
* **No Longer Approved** --- The following statuses apply to documents
that are no longer approved for use other than reference:
.. autoattribute:: nrsk.models.Document.STATUS.QUARANTINED
:no-index:
:no-value:
.. autoattribute:: nrsk.models.Document.STATUS.SUPERSEDED
:no-index:
:no-value:
.. autoattribute:: nrsk.models.Document.STATUS.REVISED
:no-index:
:no-value:
.. autoattribute:: nrsk.models.Document.STATUS.VOIDED
:no-index:
:no-value:
.. autoattribute:: nrsk.models.Document.STATUS.CLOSED
:no-index:
:no-value:
.. note:: These statuses are validated in the data dictionary
in :py:class:`~nrsk.models.Document`
.. _rmdc-systems:
Document/Record Management Systems
----------------------------------
Documents and records are managed in the following systems.
.. datatemplate:yaml::
:source: /_data/it-systems.yaml
{{ make_list_table_from_mappings(
[('Name', 'name'), ('Use case(s)', 'use_cases'), ('Location', 'location')],
data['RMDC'],
title='RMDC Systems',
) }}
See Also
--------

View file

@ -0,0 +1,9 @@
*********************************
Information Management Procedures
*********************************
.. toctree::
information_management_plan
document_management

View file

@ -0,0 +1,126 @@
Information Management Plan
===========================
Purpose
-------
This plan is the highest-level description of how information is
managed at |inst|. It defines the information management requirements
and explains the chosen processes and tools that meet the requirements.
Scope
-----
This plan applies to creation, storage, exchange, and retirement of project
information related to |project|. This includes plant configuration management
data as defined in :cite:p:`barrieroInformationManagementProcess2010`. The plan
is not limited to information affecting quality, it also includes business
information.
Background
----------
The potential benefits of the digital transformation are well known across all
business sectors. Numerous commercial nuclear information management studies
have further suggested that properly implemented information management can improve
efficiency and quality while reducing costs
:cite:p:`halpinInformationManagementNuclear1978d,agencyInformationTechnologyNuclear2010,barrieroInformationManagementProcess2010,renuartAdvancedNuclearTechnology2014`
In addition, management of information related to product quality is subject
to nuclear quality regulations in all jurisdictions.
Requirements
------------
.. req:: Quality-related information shall be managed in accordance with 10 CFR 50 Appendix B
:id: R_INFO_APPB
:links: R_10CFR50_APPB
:tags: quality
:basis: Compliance with Appendix B is necessary for licensing
Note that non-quality related information is not necessarily subject
to this requirement.
.. req:: A data dictionary shall be maintained defining controlled data
:id: R_DATA_DICT
:basis: It will provide a central reference for all project members to
find specific, precise, and up-to-date data definitions to enable
unambiguous communications and collaboration.
The dictionary shall define data types, data fields, constraints on the
fields, relationships between the data, source, sensitivity, usage,
owner/steward, sample values, and transformation logic, as applicable. It
shall be revision controlled such that changes can be clearly seen and
remembered.
.. req:: Data shall be managed such that data exchanges and transformations between
parties and systems can be readily automated
:id: R_DATA_EXCHANGE
:basis: Over the project life, numerous parties and systems will ramp up
and down due to changing relationships and technologies. Automated data
exchanges are expected to improve the ease, cost, speed, and quality of
the inevitable exchanges and transformations.
This effectively requires rich data import and export capabilities
in each tool used to manage data.
.. req:: Data shall be subject to role-based access controls (RBAC) or stronger
:id: R_DATA_ACCESS
:basis: Role-based access control (RBAC) is a strong standard
covering the needs of commercial nuclear information
from export control and business sensitivity perspectives.
More sensitive data, such as Security Related Information,
may use stronger access controls such as ABAC or MAC.
Implementation
--------------
This section defines the specific implementation of the requirements.
General principles
^^^^^^^^^^^^^^^^^^
A hub data architecture has been chosen for this project, based on
arguments and experiences in :cite:p:`agencyInformationTechnologyNuclear2010`.
.. figure:: /_static/data-hub.png
Hub architecture, from :cite:p:`agencyInformationTechnologyNuclear2010`
This is designed to enable rapid integration of a wide variety of partner
organizations, specialized information management tools, and engineering/design
tools while striving to future-proof the multi-decade project.
The underlying data layer consists of:
* Structured text (e.g. YAML, XML, JSON) controlled in version-controlled repositories
* Databases (e.g. Postgres)
* Documents/drawings (PDFs, native files, HTML) stored on corporate drives and managed
by the Records Management/Document Control system
* Technical data (3D models, simulation input/output, laser scans, schedule dumps) stored
on corporate drives, managed by the Technical Data Management system
Above the data layer sits the data authoring and manipulation layer, which includes:
* Office tools: word processors, spreadsheets, text editors, IDEs, etc., including
online collaboration tools
* PM tools: Primavera P6, HR tools
* Engineering tools: SolidWorks, ANSYS, CASMO, MCNP, Intergraph, Revit
* Construction tools
* Maintenance tools
One-way or bidirectional data exchanges between tools and institutions occur
through the API, which reads the data layer and presents data representations to
authorized users or services in clearly-defined formats over the network.
.. _info-mgmt-data-dict:
Data Dictionary
^^^^^^^^^^^^^^^
The data dictionary is defined and maintained as described in
:need:`I_DATA_DICT`.
The data dictionary itself is located at :ref:`data-dict`.
.. insert render of the data dictionary table here.
Technology stack
^^^^^^^^^^^^^^^^
.. insert render of the IT systems table here.

View file

@ -1,11 +1,10 @@
.. thinking of putting like, all the calcs you have to do during design, .. _projects:
calibrations during commissioning,
work during operations. This would be where we could be like, "Hey don't forget
to include impurities in shielding calculations"
##################
Project Management
##################
.. toctree:: .. toctree::
:glob:
:maxdepth: 2
* schedule
mddl

View file

@ -0,0 +1,50 @@
Master Document and Delivery List
#################################
The MDDL is a comprehensive, hierarchical catalog of all the documents and files
related to the test reactor project. The MDDL includes information such as
document number, revision, title, and maturity status (e.g., completion
percentage). This serves as a central reference point, providing an overview of
the project's documentation and helping team members find relevant and
up-to-date information. Furthermore, the MDDL helps maintain organization,
control, and versioning of documents.
* Program Deliverables
* Full set of QA program procedures, including engineering and
configuration management
* Design Deliverables
* Design basis events and plant functions
* System Design Documents for all systems (including requirements/functions,
equipment list, parameters)
* Long-lead equipment list
* Probabilistic Risk Assessment (PRA) model
* General arrangement (3D model and extracted drawings)
* P&ID drawings for all systems
* Engineering simulator software
* Training simulator software
* Plant safety model (i.e. RELAP)
* Core models (i.e. CASMO/SIMULATE)
* Shielding models (i.e. MCNP)
* Balance of plant model (i.e. Flownex)
* Fuel performance models (i.e. FRAPCON and FRAPTRAN)
* Pipe restraint models
* Equipment models (i.e. SolidWorks)
* Civil/structural models, coupled to ship motions (i.e. ANSYS)
* Emergency planning documents
* Plant Technical Specifications
* Plant Operating Procedures
* Project Management Deliverables
* Resource-loaded schedule out to TTP operation
* Bottoms-up parametric cost estimate model and reports
* Regulatory Submittals
* Topical Reports
* Argument that Ship is the site (and so construction can occur before CP)
* Argument that TTP is a prototype reactor (104 license option)
* Argument
* Construction Permit Application
* Preliminary Safety Analysis Reports
* Environment Report
* Operating Permit Application

View file

@ -0,0 +1,20 @@
# Data regarding the various project the company is working on.
# The abbreviations drive document/record numbering
projects:
- name: Training and Test Platform
abbrev: TTP
description: >
The FOAK AMS nuclear ship, owned and operated by AMS and used for training and testing.
Effectively it produces the information validating the commercial readiness of
higher-powered ships. It's intended to be of the same design as the commercial
power plant, but may be downrated e.g. from 50 MWe to 10 MWe.
- name: Integration Shipyard
abbrev: ISH
description: >
The shipyard that integrates nuclear power plants into pre-built ships.
- name: Nuclear Ready
abbrev: NRDY
description: >
Standard interfacing information that defines how to make a ship that can
easily be outfitted with a nuclear power system at the Integration
Shipyard.

View file

@ -0,0 +1,4 @@
Risk Register
#############

View file

@ -1,4 +1,12 @@
Project Schedule .. raw:: latex
################
\begin{landscape}
Milestone Schedule
##################
.. schedule:: _data/schedule.yaml .. schedule:: _data/schedule.yaml
.. raw:: latex
\end{landscape}

View file

@ -12,7 +12,8 @@ bases.
:caption: Contents: :caption: Contents:
:glob: :glob:
stakeholder/index stakeholder/ams
national/index national/index
industry/*
standards/index standards/index
*

View file

@ -0,0 +1,26 @@
EPRI ALWR
=========
This page contains some high-level requirements sourced from the EPRI Advanced
Light Water Reactor Utility Requirements Document :cite:p:`mprAdvancedLightWater99a`
.. req:: Passive ALWRs shall not require safety-related AC electric power other than
inverter supplied AC power for I&C.
:id: R_ALWR_1
.. req:: 72-hours without manual operator action.
:id: R_ALWR_2
For transients and accidents analyzed under the initiating event plus
single failure Licensing Design Basis assumptions (which include loss of all ac
power), no credit for manual operator action shall be necessary to meet core
protection regulatory limits for at least 72 hours following initial indication
of the need for action (i.e., approximately the time of the initiating event).
.. req:: Only simple actions and assistance shall be necessary beyond 72 hours
:id: R_ALWR_3
.. req:: Off-site dose limits shall be maintained for at least 72 hours without
the need for off-site assistance.
:id: R_ALWR_4

View file

@ -24,7 +24,7 @@ and `RG 1.232 <https://www.nrc.gov/docs/ML1732/ML17325A611.pdf>`_.
.. needtable:: Appendix A summary .. needtable:: Appendix A summary
:filter: id.startswith("R_GDC") :filter: id.startswith("R_GDC")
:columns: id :columns: id, title
.. include:: /generated_assets/10-cfr-50-app-a-list.rst .. include:: /generated_assets/10-cfr-50-app-a-list.rst

View file

@ -0,0 +1,77 @@
AMS Stakeholder Requirements
============================
The following requirements come from the Project Leaders at AMS.
.. req:: Develop a new nuclear maritime industry.
:id: R_PROJ_1959
.. req:: The first vessel shall have an electrical power output range of 750 kWe -- 16 MWe.
:id: R_AMS_1
:links: R_PROJ_1959
:basis: Appropriate for commercial demonstrator, per Axioms
:cite:p:`cahillDesignPhilosophyBrief2025`
.. req:: The electrical backbone shall operate at 6.6 kVAC at 60 Hz
:id: R_AMS_2
:links: R_PROJ_1959
:basis: Maximum commercial compatibility, per Axioms
:cite:p:`cahillDesignPhilosophyBrief2025`
.. req:: The demonstrator vessel shall be in the water by the Summer of 2030.
:id: R_AMS_3
:links: R_PROJ_1959
:basis: Desire to move fast, per Axioms
:cite:p:`cahillDesignPhilosophyBrief2025`
.. req:: The demonstrator vessel shall be flagged to US standards.
:id: R_AMS_4
:links: R_PROJ_1959
:basis: Per Axioms
:cite:p:`cahillDesignPhilosophyBrief2025`
.. req:: The demonstrator vessel architecture shall be grounded in IMO Resolution A.491(XII)
:id: R_AMS_5
:links: R_PROJ_1959
:basis: Per Axioms document
Document reference: :cite:p:`imoCodeSafetyNuclear1982`.
Source :cite:p:`cahillDesignPhilosophyBrief2025`.
.. req:: The demonstrator vessel shall facilitate future augmentation of the power system
without massive amounts of construction.
:id: R_AMS_6
:links: R_PROJ_1959
:basis: Desire for drop-in nuclear-readiness, per Axioms
:cite:p:`cahillDesignPhilosophyBrief2025`
.. req:: The reactor system shall comply with Passive Plant safety requirements defined in EPRI ALWR URD
:id: R_AMS_7
:basis: This satisfies modern customer expectations of safety
:links: R_PROJ_1959, R_ALWR_1, R_ALWR_2, R_ALWR_3, R_ALWR_4
The EPRI ALWR URD is :cite:p:`mprAdvancedLightWater99a`
.. req:: The vessel shall have a double bottom for the full length of the ship
:basis: USCG per 3.1.2.2 of CNSG status
.. req:: The vessel shall have two compartment subdivision
:basis: USGC and AMS rules, per 3.1.2.2 and 3.1.2.3 of CNSG status
.. needflow:: Engineering plan to develop a nuke on a ship
:alt: Engineering plan
:root_id: R_PROJ_1959
:config: lefttoright
:show_link_names:
:border_color:
[status == 'open']:FF0000,
[status == 'in progress']:0000FF,
[status == 'closed']:00FF00

View file

@ -0,0 +1,32 @@
Design Basis Events
===================
The Plant is designed to maintain fuel integrity in the following
design basis events:
External events
---------------
* Collision
* Grounding
* Flooding
* Sinking
* Heavy weather
* Fire
* Explosion
* Earthquake
Internal events
---------------
* Fire
* Large-break LOCA
* Medium-break LOCA
* Small-break LOCA
* Transient overpower
* Station blackout
Key safety functions
--------------------
* Maintain subcriticality, even inverted
* Maintain decay heat removal, even inverted
* Self-transport to remote anchoring location

View file

@ -1,5 +1,5 @@
[project] [project]
name = "nuclear-reactor-starter-kit" name = "ams-reactor-starter-kit"
version = "0.1.0" version = "0.1.0"
authors = [ authors = [
{ name="Nick Touran", email="nick@whatisnuclear.com" }, { name="Nick Touran", email="nick@whatisnuclear.com" },
@ -9,7 +9,7 @@ description = """\
and tools supporting efficient nuclear energy endeavors.\ and tools supporting efficient nuclear energy endeavors.\
""" """
readme = "README.md" readme = "README.md"
requires-python = ">=3.9" requires-python = ">=3.12"
dependencies = [ dependencies = [
"openpyxl", "openpyxl",
"pyyaml", "pyyaml",
@ -24,7 +24,7 @@ dependencies = [
"sphinxcontrib-bibtex >= 2.6.1", "sphinxcontrib-bibtex >= 2.6.1",
"sphinxcontrib-glossaryused @ git+https://github.com/partofthething/glossaryused@bb321e6581b4c0618cd6dc4f1fd8355d314aee4d", "sphinxcontrib-glossaryused @ git+https://github.com/partofthething/glossaryused@bb321e6581b4c0618cd6dc4f1fd8355d314aee4d",
"sphinx-autobuild", "sphinx-autobuild",
"sphinxcontrib.datatemplates", "sphinxcontrib-datatemplates",
"sphinxcontrib-mermaid", "sphinxcontrib-mermaid",
"sphinxcontrib-svg2pdfconverter", "sphinxcontrib-svg2pdfconverter",
"sphinx-timeline", "sphinx-timeline",
@ -32,6 +32,18 @@ dependencies = [
"matplotlib", "matplotlib",
"pandas", "pandas",
"jpype1", "jpype1",
"ruamel-yaml>=0.18.16",
"pydantic>=2.12.5",
"sphinx-autodoc-typehints>=3.5.2",
"email-validator>=2.3.0",
"sphinxcontrib-apidoc>=0.6.0",
"autodoc-pydantic>=2.2.0",
"sqlmodel>=0.0.31",
"fastapi>=0.128.0",
"uvicorn>=0.38.0",
"python-dotenv>=1.2.1",
"psycopg2>=2.9.11",
"alembic>=1.17.2",
] ]
classifiers = [ classifiers = [
"Programming Language :: Python :: 3", "Programming Language :: Python :: 3",
@ -81,3 +93,25 @@ include_trailing_comma = true
force_grid_wrap = 0 force_grid_wrap = 0
line_length = 88 line_length = 88
profile = "black" profile = "black"
[dependency-groups]
dev = [
"ipython>=8.18.1",
"pytest>=9.0.2",
]
[tool.alembic]
script_location = "%(here)s/alembic"
# file_template = "%%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s"
prepend_sys_path = [
"."
]
[[tool.alembic.post_write_hooks]]
name = "ruff"
type = "module"
module = "ruff"
options = "check --fix REVISION_SCRIPT_FILENAME"

View file

@ -0,0 +1,7 @@
"""NRSK root."""
from pathlib import Path
PACKAGE_ROOT = Path(__file__).resolve().parent
PROJECT_ROOT = PACKAGE_ROOT.parent.parent
DOCS_ROOT = PACKAGE_ROOT.parent.parent / "documents"

18
src/nrsk/db.py Normal file
View file

@ -0,0 +1,18 @@
"""Database management code."""
import os
from dotenv import load_dotenv
from sqlmodel import Session, SQLModel, create_engine, select
load_dotenv()
POSTGRES_USER = os.getenv("POSTGRES_USER")
POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD")
POSTGRES_PATH = os.getenv("POSTGRES_PATH")
def get_engine():
return create_engine(
f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_PATH}",
)

View file

View file

@ -0,0 +1,97 @@
"""Document data intake."""
import os
from contextlib import asynccontextmanager
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse
from sqlmodel import Session, SQLModel, select
from nrsk.db import get_engine
# import others to create DB?
from nrsk.models import Document, User
engine = get_engine()
@asynccontextmanager
async def lifespan(app: FastAPI):
# --- Startup Logic ---
yield # The app runs while it's "yielding"
# --- Shutdown Logic ---
print("Shutting down safely")
app = FastAPI(lifespan=lifespan)
@app.get("/schema")
def get_schema():
# This generates the JSON Schema from your SQLModel/Pydantic model
return Document.model_json_schema(mode="serialization")
@app.post("/submit")
def submit_data(data: Document):
with Session(engine) as session:
breakpoint()
data = Document.model_validate(data)
session.add(data)
session.commit()
return {"status": "success", "id": data.id}
@app.get("/documents/")
def read_documents(skip: int = 0, limit: int = 10):
with Session(engine) as session:
statement = select(Document).offset(skip).limit(limit)
results = session.exec(statement).all()
return results
@app.get("/", response_class=HTMLResponse)
def get_form():
return """
<!DOCTYPE html>
<html>
<head>
<title>QA Entry Form</title>
<script src="https://cdn.jsdelivr.net/npm/@json-editor/json-editor@latest/dist/jsoneditor.min.js"></script>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css">
</head>
<body class="container mt-5">
<h2>Submit QA Revision</h2>
<div id="editor_holder"></div>
<button id="submit" class="btn btn-primary mt-3">Save to Database</button>
<script>
// 1. Fetch the schema from FastAPI
fetch('/schema').then(res => res.json()).then(schema => {
const editor = new JSONEditor(document.getElementById('editor_holder'), {
schema: schema,
theme: 'bootstrap5',
iconlib: 'fontawesome5'
});
// 2. Handle Submission
document.getElementById('submit').addEventListener('click', () => {
const errors = editor.validate();
if (errors.length) {
alert("Validation Error: " + JSON.stringify(errors));
return;
}
fetch('/submit', {
method: 'POST',
headers: {'Content-Type': 'application/json'},
body: JSON.stringify(editor.getValue())
}).then(response => alert("Saved Successfully!"));
});
});
</script>
</body>
</html>
"""

View file

@ -0,0 +1,23 @@
"""Seed DB for documents, e.g. with doc types"""
from sqlmodel import Session
from nrsk import DOCS_ROOT
from nrsk.db import get_engine
from nrsk.documents.validate import validate_doc_types
def seed_doc_types():
engine = get_engine()
doc_types = validate_doc_types(DOCS_ROOT / "_data" / "doc-types.yaml")
with Session(engine) as session:
for dtype in doc_types:
session.add(dtype)
session.commit()
if __name__ == "__main__":
seed_doc_types()
print("seeded doc types")

View file

@ -0,0 +1,23 @@
"""
Validate document data during build.
In particular, check doc types.
"""
import pathlib
import yaml
from nrsk.models import InformationTypes
def sphinx_validate_doc_types(app) -> dict:
"""Ensure doc type data is valid."""
fpath = pathlib.Path(app.srcdir) / "_data" / "doc-types.yaml"
return validate_doc_types(fpath)
def validate_doc_types(fpath: str) -> dict:
with open(fpath) as f:
data = yaml.safe_load(f)
return InformationTypes.validate_python(data)

666
src/nrsk/models.py Normal file
View file

@ -0,0 +1,666 @@
"""
Define the Data Dictionary.
Implementation of Data Dictionary
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. impl:: Maintain the Data Dictionary base data using Pydantic
:id: I_DATA_DICT
:links: R_DATA_DICT
The data dictionary is managed using Pydantic. Pydantic allows for
concise Python code to richly define data models and their fields. From a single
class definition, it provides data validation, automatic rich documentation (via
automatic a Sphinx plugin), an integration with FastAPI for data exchange, and
relatively easy integration with sqlalchemy for database persistence. Changes to
the schema can be managed and controlled via the revision control system, and
changes to a single source (the Python code) will automatically propagate the
rendered documentation and, potentially the database (e.g. using *alembic*)
Using SQLAchemy as the database engine enables wide flexibility in underlying
database technology, including PostgreSQL, MySQL, SQLite, Oracle, and MS SQL
Server. Pydantic models allows us to validate data loaded from a database,
directly from structured text file, or from JSON data delivered via the network.
Analysis of Alternatives
^^^^^^^^^^^^^^^^^^^^^^^^
SQLModel :cite:p:`SQLModel` was considered as the data layer base, but it was
determined to be less mature than pydantic and sqlalchemy, with inadequate
documentation related to field validation. It was determined to use Pydantic
directly for schema definitions.
.. _data-dict:
Data Dictionary
^^^^^^^^^^^^^^^
This is the official Data Dictionary discussed in :ref:`the Information
Management Plan <info-mgmt-data-dict>`.
"""
import re
from datetime import datetime, timedelta
from enum import StrEnum
from typing import Annotated, Any, Optional
from uuid import UUID, uuid4
# _PK_TYPE = UUID
# moving away from UUID at least temporarily b/c SQLite doesn't
# really support it, which makes adding new data via DBeaver harder
_PK_TYPE = int
from pydantic import (
AnyUrl,
BaseModel,
ConfigDict,
EmailStr,
PositiveInt,
TypeAdapter,
ValidationError,
computed_field,
field_validator,
model_validator,
)
from sqlalchemy import text
from sqlmodel import JSON, Column, Field, Relationship, SQLModel
ALL_CAPS = re.compile("^[A-Z]$")
class NRSKModel(SQLModel):
id: _PK_TYPE = Field(
# default_factory=uuid4,
description="The unique ID of this object. Used as a primary key in the database.",
primary_key=True,
# schema_extra={
# "examples": ["3fa85f64-5717-4562-b3fc-2c963f66afa6"],
# },
)
class DocumentUserLink(NRSKModel, table=True):
"""Linkages between users and documents."""
position: int = Field(default=0)
"""Integer indicating order of people"""
role_note: str = Field(
default="",
)
"""Extra information about role such as 'lead' or 'section 2.4'"""
document_id: _PK_TYPE | None = Field(
foreign_key="document.id", primary_key=True, default=None
)
user_id: _PK_TYPE | None = Field(
foreign_key="user.id", primary_key=True, default=None
)
class User(NRSKModel, table=True):
"""A person involved in the Project."""
given_name: str
family_name: str
preferred_name: str | None = None
previous_name: str | None = None
email: EmailStr
joined_on: datetime | None
deactivated_on: datetime | None
organization: str | None
title: str | None
contributed: list["Document"] = Relationship(
back_populates="contributors", link_model=DocumentUserLink
)
class OpenItem(NRSKModel):
name: str
status: str
created_on: datetime
closed_on: datetime | None = None
class SSC(NRSKModel):
"""
A Structure, System, or Component in the plant.
This is a generic hierarchical object that can represent plants, units,
buildings and their structures, systems, subsystems, components,
subcomponents, etc.
A physical tree of buildings/structures/rooms may have overlapping
contents in terms of systems/components/equipment/parts
"""
name: str
pbs_code: str | None = Field(
description="An integer sequence that determines the 'system number' and also the ordering in printouts",
schema_extra={
"examples": ["1.2.3", "20.5.11"],
},
default="",
)
"""PBS code is tied closely to the structure of the PBS, obviously. If 1.2
is a category level, that's ok, but that doesn't imply that the second level
of PBS 2 is also a category level; it may be systems.
Since this can change in major PBS reorganizations, it should not be used
for cross-referencing (use ID).
"""
abbrev: str = Field(
description="A human-friendly abbreviation uniquely defining the system"
)
parent: Optional["SSC"] = None
functions: list[str | None] = Field(
description="Functions of this system", default=None
)
@field_validator("abbrev", mode="after")
@classmethod
def abbrev_must_be_all_caps(cls, v: str) -> str: # noqa: D102
if not re.match(ALL_CAPS, v):
raise ValueError("{v} must be all CAPS")
@field_validator("pbs_code", mode="after")
@classmethod
def pbs_must_be_int_sequence(cls, v: str) -> str: # noqa: D102
if not v or re.match(r"^(\d+\.?)+$", v):
raise ValueError(f"{v} must be an integer sequence, like 1.2.3")
class SystemsList(BaseModel):
"""A flat list of Systems in the plant.
Can be used e.g. to render a snapshot of the Master Systems List.
Does not include categories like "Nuclear Island" or "Primary Systems".
We may want another structure that represents the whole tree in a
well-defined manner, or we may want to add a 'path' attr
to systems that define where they live.
"""
systems: list[SSC]
class ParamDef(NRSKModel):
"""A parameter class defining an aspect of plant design."""
name: str = Field(
schema_extra={"examples": ["Nominal gross power"]},
)
"""Name of the parameter class."""
description: str
"""Detailed description of what parameters of this type represent"""
valid_units: list[str | None] = Field(
schema_extra={"examples": ["MW", "W", "shp"]}, default=None
)
"""List of units allowed"""
class ParamVal(NRSKModel):
"""A particular value of a Parameter, assigned to a particular SSC."""
ssc: SSC
pdef: ParamDef
value: str
units: str | None = None
pedigree: str = Field(
description="Indication of how well it is known (rough estimate, final design, as-built)."
)
source: str = Field(description="Where this version of the value came from")
class ITSystem(NRSKModel):
"""An IT system used by the project."""
name: str
vendor: str
version: str | None = None
use_cases: list[str] = Field(
schema_extra={
"examples": [
[
"Document management",
]
],
}
)
"""One or more use cases this system is used for."""
physical_location: str = Field(description="Where the system is physically located")
url: AnyUrl | None = Field(description="Full URL to the system", default=None)
custodian: User | None = Field(
description="Person currently in charge of system", default=None
)
launched_on: datetime | None = None
retired_on: datetime | None = None
quality_related: bool
class InformationType(NRSKModel, table=True):
"""A type/kind/class of Information, Document, or Record."""
model_config = ConfigDict(extra="forbid")
name: str
abbrev: str
examples: list[str] | None = Field(
default=None,
sa_column=Column(JSON),
)
description: str = ""
retention: str | None = ""
record: bool = True
use_cases: str = ""
notes: str = ""
parent_id: _PK_TYPE | None = Field(default=None, foreign_key="informationtype.id")
# Add these two relationships for easier DB parsing in code
parent: Optional["InformationType"] = Relationship(
back_populates="subtypes",
sa_relationship_kwargs={"remote_side": "InformationType.id"},
)
subtypes: list["InformationType"] = Relationship(back_populates="parent")
InformationTypes = TypeAdapter(list[InformationType])
"""A list of document types."""
class Document(NRSKModel, table=True):
"""
Data dictionary entry for Documents and Records.
Document data is designed to satisfy the needs defined in :ref:`rmdc-proc`.
See Also
--------
* Some of the field definitions come from CFIHOS
https://www.jip36-cfihos.org/wp-content/uploads/2023/08/v.1.5.1-CFIHOS-Specification-Document-1.docx
* ISO-19650 has different Status Codes defining suitability level (for information, as-built)
https://ukbimframework.org/wp-content/uploads/2020/05/ISO19650-2Edition4.pdf
"""
class STATUS(StrEnum):
"""Document Status options."""
# Much of the wording here comes from cloverDocumentControlRecords2010.
# NOTE: if you add or remove a status, be sure to also update the
# category property below AND :ref:`rmdc-doc-status`!
## Not Yet Approved:
RESERVED = "RESERVED"
"""
A Document ID has been assigned, but the document is in development or
has not yet been started (default).
"""
IN_PROGRESS = "IN PROGRESS"
"""One or more authors are creating or revising the document."""
IN_REVIEW = "IN REVIEW"
"""A completed draft of the document has been submitted and is pending review."""
REJECTED = "REJECTED"
"""A draft that was rejected by the review team and may be revised and resubmitted."""
AUTHORIZED = "AUTHORIZED"
"""A controlled revision that has been signed but is not yet effective.
Such documents may be used for training, etc. Documents with this status may
be used for plant modifications in a work package, but not for normal operations."""
REFERENCE = "REFERENCE"
"""Document is stored in EDMS for ease of access and reference, but
there is no assertion that the information is the latest available.
Useful for Standards, engineering handbook excerpts, vendor notices."""
NATIVE = "NATIVE"
"""A document file that may be in EDMS in the native file format. Not
used in the field because they (a) may require special software to view
and (b) may not be controlled for field use (i.e. not quarantined if
errors are discovered)."""
## Approved:
APPROVED = "APPROVED"
"""A document revision that has been submitted by the releasing
organization and that is authorized for the use case defined in
the suitability code.
* A drawing with this status during operation reflects the plant configuration
* A drawing with this status before or during construction reflects that it is
ready to be fabricated/built
* A procedure with this status is effective.
"""
## No longer Approved:
QUARANTINED = "QUARANTINED"
"""(On hold, Suspended) A document revision that was previously
authorized and has been placed on hold, e.g. a procedure that cannot be
performed as written or a design that is known to have pending changes."""
SUPERSEDED = "SUPERSEDED"
"""A document that has been replaced by another document. The new
document is to be recorded in the index."""
REVISED = "REVISED"
"""A document that has been replaced by a subsequent revision of that
document."""
VOIDED = "VOIDED"
"""A document or revision that is no longer needed and there is no
revision or superseding document. This would also be used for documents
that have reached a predetermined expiration date, such as a temporary
procedure."""
CLOSED = "CLOSED"
"""(Archived) A document for which the work has been completed."""
@property
def category(self) -> str:
"""High-level status category: Not yet approved, Approved, or No Longer Approved."""
if self.value in {
self.RESERVED,
self.IN_PROGRESS,
self.IN_REVIEW,
self.REJECTED,
self.AUTHORIZED,
self.REFERENCE,
self.NATIVE,
}:
return "Not Yet Approved"
if self.value in {self.APPROVED}:
return "Approved"
return "No Longer Approved"
class USAGE(StrEnum):
"""Usage options.
Usage governs what use cases a document may be used for. It is a notion
derived from the ISO 19650 'suitability' idea, but used in combination
with the NIRMA status codes. It allows a document to be approved for
e.g. a conceptual design stage without letting it inadvertently be
released for bid or manufacture. Releasing organizations can update the
suitability as needed.
See https://ukbimframework.org/wp-content/uploads/2020/09/Guidance-Part-C_Facilitating-the-common-data-environment-workflow-and-technical-solutions_Edition-1.pdf
"""
FOR_INFORMATION = "FOR INFORMATION"
"""A document revision that may be used for information only, not for
any contractual purpose."""
FOR_STAGE_APPROVAL = "FOR STAGE APPROVAL"
"""A document revision that is considered complete for the contractual stage in
which it was created. For example, in a Preliminary Design phase, this
usage would indicate that it is at the expected usage level for
preliminary design. Most design-phase documents that are not yet ready
for bid or construction will be marked for this usage."""
FOR_BID = "FOR BID"
"""A document revision that is ready to be sent to external parties for bid.
During the bid process, changes may be expected based on vendor feedback."""
FOR_CONSTRUCTION = "FOR CONSTRUCTION"
"""A document revision that is ready to be sent to the field for manufacture,
fabrication, construction. An approved document with this usage implies
that all the quality, regulatory, and design aspects are in place, and
that work can proceed. However, what is constructed is not yet
authorized for operation."""
FOR_OPERATION = "FOR OPERATION"
"""A document revision that can be used to operate the business and/or plant.
Procedures of this usage may be used to do work or operate equipment."""
AS_BUILT = "AS BUILT"
"""A document revision that is an as-built record of construction or manufacture.
Documents of this usage may be used to operate the plant."""
class RETENTION(StrEnum):
"""Retention plan options.
Retention plans define how long the document or record is to be
kept before it is destroyed.
.. note:: May want this to actually be a timedelta
"""
LIFETIME = "LIFETIME"
"""Lifetime of the plant."""
# use_attribute_docstrings allows us to just use docstrings and get
# the same info in both the JSON Schema and also the Sphinx render
model_config = ConfigDict(use_attribute_docstrings=True)
number: str
"""The identification number meeting the document numbering rules"""
title: str = Field(
schema_extra={
"examples": ["CNSG Development and Status 1966-1977"],
},
)
"""Descriptive title explaining the contents"""
revision: str = Field(
schema_extra={
"examples": ["0", "1", "1a", "A"],
},
)
"""Revision code"""
originating_organization_id: _PK_TYPE | None = Field(
foreign_key="organization.id",
description="The organization that owns or issued this document",
default=None,
)
# This allows you to do `my_document.orginating_organization` in Python
originating_organization: "Organization" = Relationship()
originator_number: str | None = None
"""The originating organization's document number (if originated externally)."""
originator_revision: str | None = None
"""The originating organization's revision code (if originated externally)."""
type_id: _PK_TYPE = Field(
foreign_key="informationtype.id",
description="The ID of the InformationType",
)
# type: "InformationType" = Relationship()
contributors: list[User] = Relationship(
back_populates="contributed",
link_model=DocumentUserLink,
sa_relationship_kwargs={
"order_by": "DocumentUserLink.position",
"lazy": "selectin",
},
)
"""Holds all relationships with users but does not show up in JSON Schema"""
@computed_field
@property
def authors(self) -> list[User]:
"""List of author info for the UI."""
return [{"id": a.id, "name": a.name} for a in self.contributors]
@computed_field
@property
def reviewers(self) -> list[User]:
"""List of reviewer info for the UI."""
return [
{"id": a.id, "name": a.name}
for a in self.contributors
if a.role == "reviewer"
]
# revision_reviewers: list[RevisionReviewerLink] = Relationship(
# back_populates="reviewed",
# link_model=RevisionReviewerLink,
# sa_relationship_kwargs={
# "order_by": "RevisionReviewerLink.position",
# "cascade": "all, delete-orphan",
# },
# )
# """The reviewer(s), if any."""
# revision_approvers: list[RevisionApproverLink] = Relationship(
# back_populates="approved",
# link_model=RevisionApproverLink,
# sa_relationship_kwargs={
# "order_by": "RevisionApproverLink.position",
# "cascade": "all, delete-orphan",
# },
# )
# """The approver(s), if any."""
revision_comment: str | None = None
"""Explanation of what changed in this revision"""
status: STATUS = STATUS.RESERVED
usage: USAGE = USAGE.FOR_INFORMATION
retention_plan: RETENTION = RETENTION.LIFETIME
restriction_codes: str = Field(
description="Markings for export control, legal, etc.", default=""
)
actual_reviewed_date: datetime | None = None
actual_approved_date: datetime | None = None
# filenames may be empty at first, i.e. for RESERVED docs
filenames: list[str] = Field(
description="Filenames of files attached to this Document. Main file should be the first.",
default_factory=list,
sa_column=Column(JSON, nullable=False, server_default=text("'[]'")),
)
file_notes: list[str] = Field(
description="Short description of each file represented in filenames.",
default_factory=list,
sa_column=Column(JSON, nullable=False, server_default=text("'[]'")),
)
checksums: list[str] = Field(
description="SHA-256 checksum of each file for data integrity",
default_factory=list,
sa_column=Column(JSON, nullable=False, server_default=text("'[]'")),
)
"""Checksums are used to verify long-term data integrity against tampering
and data degradation. While BLAKE3 checksums are faster, SHA-256 is more standard
and built-in at this point. In the future, switching to BLAKE3 may make sense for
easier periodic re-verification of large data libraries."""
physical_location: str | None = Field(
description="Location of a media (only valid when not stored as an electronic file).",
default=None,
)
notes: str = Field(
description="Additional information about the Document/Record", default=""
)
@computed_field
@property
def status_category(self) -> str:
"""The top-level status category, derived from Document Status"""
return self.status.category
@model_validator(mode="after")
def cant_have_electronic_and_physical_location(self) -> "Document": # noqa: D102
has_physical_location = self.physical_location is not None
has_file = self.filenames is not None
if has_physical_location and has_file:
raise ValueError(
"Cannot provide both physical_location and filename(s). They are mutually exclusive."
)
return self
class Organization(NRSKModel, table=True):
"""An organization of people: companies, departments, governments, etc."""
name: str = Field(index=True)
"""Organization Name"""
abbreviation: str | None = Field(default=None, index=True)
website: str | None = None
is_active: bool = Field(default=True)
# allow it to be hierarchical to capture full org trees and refer to
# divisions
parent_id: _PK_TYPE | None = Field(
default=None,
foreign_key="organization.id",
)
"""The parent organization this org reports to"""
parent: Optional["Organization"] = Relationship(
back_populates="child_orgs",
sa_relationship_kwargs={"remote_side": "Organization.id"},
)
child_orgs: list["Organization"] = Relationship(back_populates="parent")
class PredecessorTask(NRSKModel):
"""Link to a predecessor task."""
class PRED_TYPE(StrEnum): # noqa: N801
"""Predecessor relationship type."""
FS = "FS"
"""Finish-to-start: predecessor finishes before successor starts (very common)"""
FF = "FF"
"""Finish-to-finish: predecessor finishes before successor can finish"""
SS = "SS"
"""Start-to-start: predecessor starts before successor starts"""
SF = "SF"
"""Start-to-finish: predecessor starts before successor finishes (uncommon, maybe shift change)"""
id: str
"""ID of the predecessor task."""
type: PRED_TYPE = PRED_TYPE.FS
lag: timedelta | None = Field(
description="Lag time. Negative timedelta implies negative lag "
"(lead time, starts before predecessor ends)",
default=None,
)
class ScheduledTask(NRSKModel):
"""Scheduled task, e.g. in P6."""
name: str
id: str | None = None
is_milestone: bool = False
predecessors: list[PredecessorTask] = []
duration: timedelta | None = None
actual_start: datetime | None = None
actual_end: datetime | None = None
scheduled_start: datetime | None = None
scheduled_end: datetime | None = None
@model_validator(mode="before")
@classmethod
def convert_days_to_duration(cls, data: Any) -> Any:
"""Allow input of duration_days, but convert on way in."""
if isinstance(data, dict):
days = data.get("duration_days")
if days is not None:
data["duration"] = timedelta(days=float(days))
del data["duration_days"]
return data
class ScheduleLane(BaseModel):
"""A section of a schedule."""
name: str
color: str | None = None
tasks: list[ScheduledTask]
ScheduleInput = TypeAdapter(list[ScheduleLane])
"""A list of lanes, representing full schedule input."""

View file

View file

@ -0,0 +1,97 @@
"""
Read plant information like systems, equipment, & params from a folder structure.
This reads it into the standard data structures defined via Pydantic,
which can then be used for any other purpose (reporting, etc.)
The structure here is path/to/system where the folders define the
functional hierarchy (i.e. plant, 'island', system, subsystem).
Some files can exist in the hierarchy:
* System data files *.yaml
* System documents *.rst
The documents often make use of the data in the yaml file through
system-level (or other) ``datatemplate`` directives, e.g. to print
out a list of System Functions or Parameters.
This module parses the directory tree and YAML files, combining them into one
big tree of data.
Future considerations:
* It may make sense to have ``system.yaml`` (or ``equipment.yaml``) and
``parameters.yaml`` in each of these folders for longer-term efficient
loading of just the Systems List vs. the entire Equipment List (which
will end up being more efficient in a proper database). Or not... I mean
we could just statically render everything and it'd be pretty performant
during reads. Maybe just have system, equipment, and param info in the
yaml file.
"""
import logging
from pathlib import Path
from ruamel.yaml import YAML
logger = logging.getLogger(__name__)
def load_yaml_tree(root_path: str | Path) -> dict:
"""Load a directory tree of files to represent the Plant systems and params."""
root_path = Path(root_path)
yaml = YAML(typ="safe")
tree = {}
for root, dirs, files in root_path.walk():
# Ensure empty folders get included in tree.
current = tree
rel = root.relative_to(root_path)
parts = rel.parts
logger.info(f"loading {parts}")
# drill into the part of the tree where we are
for part in parts:
if part not in current:
current[part] = {}
current = current[part]
for file in files:
if file.endswith(".yaml"):
data = yaml.load(root / file)
current.update(data)
if parts and not current:
current.update({"name": parts[-1]})
logger.info(f"{current}")
assign_hierarchical_code(tree)
return tree
def assign_hierarchical_code(data, current_path=""):
"""
Traverses a nested dictionary and adds a 'pbs_code' key to every
dictionary at any level of nesting, containing its hierarchical path.
The dictionary is modified in place.
Args:
data (dict): The dictionary to traverse and modify.
current_path (str): The current hierarchical path string (e.g., "1.", "2.3.").
"""
if not isinstance(data, dict):
return
item_counter = 1
keys_to_process = list(data.keys())
for key in keys_to_process:
value = data[key]
# e.g., if current_path="1.", the next item's number is "1.1"
new_path = f"{current_path}{item_counter}"
if isinstance(value, dict):
value["pbs_code"] = new_path
assign_hierarchical_code(value, new_path + ".")
item_counter += 1

View file

@ -0,0 +1,139 @@
"""Sphinx directive that makes tables of Plant Data from the PBS tree.
Since individual system-level data can be nicely handled with datatemplates,
this custom directive just looks at the whole tree and makes the PBS
structure.
This is somewhat duplicative of the TOC directive in the Plant folder,
but the automatic sphinx numbering and lack of abbrev is a bit sad.
"""
import os
from pathlib import Path
from docutils import nodes
from docutils.parsers.rst.directives.tables import Table
from sphinx.util import logging
from nrsk.plant.load_plant_data import load_yaml_tree
logger = logging.getLogger("[plant_data_table]")
class PlantBreakdownStructureTable(Table):
"""Plant Breakdown Structure Table."""
has_content = False
required_arguments = 1
optional_arguments = 0
option_spec = {
"start-node": str,
"columns": lambda x: [c.strip() for c in x.split(",")],
"max-depth": int,
"hide-empty": lambda x: True,
}
def get_default_columns(self):
return ["Path", "Value", "Tags"]
def run(self):
env = self.state.document.settings.env
pbs_path = Path(env.srcdir) / Path(self.arguments[0])
logger.info(f"[plant-data-table] Loading data from: {pbs_path}")
if not pbs_path.exists():
logger.warning(f"Input data not found: {pbs_path}")
return [nodes.paragraph(text=f"PBS data not found: {pbs_path}")]
data = load_yaml_tree(pbs_path)
# Drill down to optional key-path
if "key-path" in self.options:
keys = self.options["key-path"].split(".")
logger.info(f"Using subkey: {keys}")
for k in keys:
data = data[k]
max_depth = int(self.options.get("max-depth", 10))
hide_empty = "hide-empty" in self.options
columns = self.options.get("columns")
if not columns:
columns = self.get_default_columns()
# Build table
table_node = nodes.table()
classes = table_node.get("classes", []) # want table wider: this doesn't work
classes.append("full-width")
table_node["classes"] = classes
tgroup = nodes.tgroup(cols=len(columns))
table_node += tgroup
# Header
for _ in columns:
tgroup += nodes.colspec(colwidth=10)
head = nodes.thead()
tgroup += head
row = nodes.row()
for col in columns:
row += nodes.entry("", nodes.paragraph(text=col))
head += row
# Body
tbody = nodes.tbody()
tgroup += tbody
def walk(obj, path="", depth=0):
if depth >= max_depth:
return
if not isinstance(obj, dict):
return
for k, v in obj.items():
current_path = f"{path}.{k}" if path else k
if hide_empty and self.is_empty(v):
continue
if not isinstance(v, dict):
continue
self.add_row(tbody, columns, current_path, v, depth)
if "functions" not in obj:
# stop if you hit a system with functions
walk(v, current_path, depth + 1)
walk(data)
return [table_node]
def is_empty(self, value):
return value in ({}, [], "", None)
def add_row(self, tbody, columns, path, value, depth):
"""Add a row to the table."""
row = nodes.row()
indent1 = "" * depth # em spaces for indentation
indent2 = "" * depth * 2
cols = []
cols.append(indent1 + value.get("pbs_code", ""))
cols.append(indent2 + value.get("name", "(noname)"))
cols.append(value.get("abbrev", ""))
cols.append(value.get("desc", ""))
cols.append(value.get("tags", ""))
for col in cols:
entry = nodes.entry()
para = nodes.paragraph()
para += nodes.Text(col)
entry += para
row += entry
tbody += row
def setup(app):
"""Setup for sphinx extension."""
app.add_directive("plant-data-table", PlantBreakdownStructureTable)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}

View file

@ -0,0 +1,42 @@
"""Load plant PBS data."""
from collections import defaultdict
from pathlib import Path
from ruamel.yaml import YAML
def load_yaml_tree(root_path: str | Path) -> dict:
"""Load data from yaml tree."""
root_path = Path(root_path)
yaml = YAML(typ="safe")
tree = {}
for yaml_file in sorted(root_path.rglob("*.yaml")):
rel = yaml_file.relative_to(root_path).with_suffix("") # remove .yaml
parts = (
rel.parent.parts if rel.name == "index" else (*rel.parent.parts, rel.name)
)
current = tree
for part in parts[:-1]:
if part not in current:
current[part] = {}
current = current[part]
key = parts[-1]
data = yaml.load(yaml_file)
if key == "index": # treat index.yaml as folder metadata
current.update(data or {})
else:
if (
current.get(key) is not None
and isinstance(current[key], dict)
and isinstance(data, dict)
):
current[key].update(data) # merge if conflict
else:
current[key] = data
return tree

View file

View file

@ -9,13 +9,12 @@ import logging
import os import os
import re import re
from datetime import datetime from datetime import datetime
from glob import glob from pathlib import Path
import jpype import jpype
import jpype.imports import jpype.imports # required though not explicitly 'used'
import matplotlib.dates as mdates import matplotlib.dates as mdates
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import mpl_toolkits.axisartist as axisartist
import pandas as pd import pandas as pd
import yaml import yaml
from docutils import nodes from docutils import nodes
@ -29,18 +28,13 @@ logger.setLevel(logging.DEBUG)
# Start JVM with MPXJ jar # Start JVM with MPXJ jar
jpype.startJVM(classpath=["/home/nick/repos/mpxj/mpxj-lib/*"]) jpype.startJVM(classpath=["/home/nick/repos/mpxj/mpxj-lib/*"])
from java.io import File from java.io import File # noqa: E402
from java.time import LocalDateTime # noqa: E402 from java.time import LocalDateTime # noqa: E402
from org.mpxj import ( # noqa: E402 from org.mpxj import ( # noqa: E402
Availability,
Duration, Duration,
FieldType,
ProjectFile, ProjectFile,
Relation, Relation,
RelationType, RelationType,
Resource,
TaskField,
TaskType,
TimeUnit, TimeUnit,
) )
from org.mpxj.cpm import MicrosoftScheduler, PrimaveraScheduler # noqa: E402 from org.mpxj.cpm import MicrosoftScheduler, PrimaveraScheduler # noqa: E402
@ -50,11 +44,29 @@ from org.mpxj.writer import ( # noqa:E402
UniversalProjectWriter, UniversalProjectWriter,
) )
from nrsk.models import PredecessorTask, ScheduleInput
_PT = PredecessorTask.PRED_TYPE
RELATION_MAP = {
_PT.FF: RelationType.FINISH_FINISH,
_PT.FS: RelationType.FINISH_START,
_PT.SS: RelationType.START_START,
_PT.SF: RelationType.START_FINISH,
}
def create_task(parent, name, duration): def create_task(parent, name, duration):
"""Make a planned task.""" """Make a planned task."""
task = parent.addTask() task = parent.addTask()
task.setName(name) task.setName(name)
if duration:
# apply duration in way that makes schedule solver solve for it
# Some tasks may not have durations but then will require appropriate
# predecessor links to become scheduled
# but then they'll need a work resource or else you'll get error:
# >>> Task has no duration value and no resource assignments with a work
# value: [Task id=35 uniqueID=35 name=Pre-licensing activities]
task.setDuration(duration) task.setDuration(duration)
task.setActualDuration(Duration.getInstance(0, duration.getUnits())) task.setActualDuration(Duration.getInstance(0, duration.getUnits()))
task.setRemainingDuration(duration) task.setRemainingDuration(duration)
@ -66,6 +78,7 @@ def load_from_yaml(fname: str = "schedule.yaml") -> ProjectFile:
"""Load data file in YAML format.""" """Load data file in YAML format."""
with open(fname) as f: with open(fname) as f:
data = yaml.safe_load(f) data = yaml.safe_load(f)
data = ScheduleInput.validate_python(data)
project = ProjectFile() project = ProjectFile()
@ -75,35 +88,44 @@ def load_from_yaml(fname: str = "schedule.yaml") -> ProjectFile:
predecessors = {} predecessors = {}
tasks_by_id = {} tasks_by_id = {}
for lane in data["lanes"]: for lane in data:
summary = project.addTask() summary = project.addTask()
summary.setName(lane["name"]) summary.setName(lane.name)
for task_d in lane["tasks"]: for task_d in lane.tasks:
if task_d.get("milestone"): if task_d.is_milestone:
task = create_task( task = create_task(
summary, task_d["name"], Duration.getInstance(0, TimeUnit.DAYS) summary, task_d.name, Duration.getInstance(0, TimeUnit.DAYS)
) )
else: else:
if not task_d.get("duration_days"): duration = (
raise ValueError(f"{task_d} needs a duration") Duration.getInstance(task_d.duration.days, TimeUnit.DAYS)
task = create_task( if task_d.duration
summary, else None
task_d["name"],
Duration.getInstance(task_d["duration_days"], TimeUnit.DAYS),
) )
# track predecessors by ID to build after all tasks exist task = create_task(summary, task_d.name, duration)
if tid := task_d.get("id"): # track predecessors to build after all tasks exist
if tid := task_d.id:
tasks_by_id[tid] = task tasks_by_id[tid] = task
for pred_id in task_d.get("predecessors", []): for pred_data in task_d.predecessors:
pred_ids = predecessors.get(task, []) pred_ids = predecessors.get(task, [])
pred_ids.append(pred_id) pred_ids.append(pred_data)
predecessors[task] = pred_ids predecessors[task] = pred_ids
for task in project.getTasks(): for task in project.getTasks():
for pred_id in predecessors.get(task, []): for pred_data in predecessors.get(task, []):
pred = tasks_by_id[pred_id] pred_id = pred_data.id
task.addPredecessor(Relation.Builder().predecessorTask(pred)) pred_task = tasks_by_id[pred_id]
type = RELATION_MAP[pred_data.type]
# lag_duration is handled/translated by pydantic into timedelta
if lag := pred_data.lag:
lag_days = lag.days # note that this truncates to nearest day
else:
lag_days = 0
lag = Duration.getInstance(lag_days, TimeUnit.DAYS)
task.addPredecessor(
Relation.Builder().predecessorTask(pred_task).lag(lag).type(type)
)
return project return project
@ -196,7 +218,7 @@ def _preprocess_plot(project):
return df, df_deps return df, df_deps
def plot_schedule( def plot_schedule( # noqa: C901
input_fname: str = "scheduled.xml", project=None, output_fname: str = "schedule.svg" input_fname: str = "scheduled.xml", project=None, output_fname: str = "schedule.svg"
): ):
"""Generate plot of schedule.""" """Generate plot of schedule."""
@ -270,7 +292,7 @@ def plot_schedule(
plt.title("AMS High-Level Schedule") plt.title("AMS High-Level Schedule")
# plt.tight_layout() # plt.tight_layout()
plt.savefig(output_fname) plt.savefig(output_fname)
plt.show() # plt.show()
class ScheduleDirective(Directive): class ScheduleDirective(Directive):
@ -281,65 +303,69 @@ class ScheduleDirective(Directive):
def run(self): # noqa: D102 def run(self): # noqa: D102
env = self.state.document.settings.env env = self.state.document.settings.env
builder = env.app.builder
schedule_data = self.arguments[0] schedule_data = self.arguments[0]
schedule_data_abs = os.path.join(env.srcdir, schedule_data) schedule_data_abs = Path(env.srcdir) / schedule_data
if not os.path.exists(schedule_data_abs): if not schedule_data_abs.exists():
logger.error(f"Schedule file not found: {schedule_data_abs}") logger.error(f"Schedule file not found: {schedule_data_abs}")
return [] return []
# Image output directory # put image within _static so html builder knows to copy it over.
gen_dir = os.path.join(env.app.srcdir, "generated_assets") gen_dir = Path(env.app.srcdir) / "_static" / "generated_assets"
ensuredir(gen_dir) ensuredir(gen_dir)
ensuredir(os.path.join(env.app.outdir, "_downloads"))
# Name of the generated file # Name of the generated file
base = os.path.splitext(os.path.basename(schedule_data))[0] base = os.path.splitext(os.path.basename(schedule_data))[0]
out_image = os.path.join(gen_dir, f"{base}.svg") out_image = gen_dir / f"{base}.svg"
start_date = datetime(2026, 1, 1) start_date = datetime(2026, 1, 1)
proj = load_from_yaml(fname=schedule_data) proj = load_from_yaml(fname=schedule_data)
solve_schedule(proj, start_date) solve_schedule(proj, start_date)
plot_schedule(project=proj, output_fname=out_image) plot_schedule(project=proj, output_fname=out_image)
writer = UniversalProjectWriter(FileFormat.MSPDI) writer = UniversalProjectWriter(FileFormat.MSPDI)
writer.write(proj, os.path.join("_build", "_downloads", f"{base}_mspdi.xml")) writer.write(proj, gen_dir / f"{base}_mspdi.xml")
env.note_dependency(schedule_data_abs) env.note_dependency(schedule_data_abs)
rel = str(os.path.relpath(out_image, env.app.srcdir))
# trying to mock /generated_assets/schedule.svg for the build folder
# but it ends up in _images actually.
# somewhat hacky but works in subfolders
abs_rel = os.path.join("/", rel)
image_node = nodes.image(uri=abs_rel)
uri = builder.get_relative_uri(env.docname, "_images/" + f"{base}.svg")
uri = uri.replace(".html", "")
ref_node = nodes.reference("", "", refuri=uri) uri = f"/_static/generated_assets/{base}.svg"
image_node = nodes.image(uri=uri)
paragraph = nodes.paragraph()
# download link only makes sense in web env, not PDF
builder_name = self.state.document.settings.env.app.builder.name
if builder_name not in ("html", "singlehtml", "dirhtml"):
paragraph += image_node
else:
# add hyperlink to image. Since this may be called from a subdir we need
# relative paths that walk up appropriately.
docname = env.docname # subdir/mydoc
relative_root_path = "../" * docname.count(os.sep)
hyperlink_uri = relative_root_path + uri[1:]
# Result when docname is 'subdir/mydoc':
# hyperlink_uri will be: ../_static/generated_assets/my_diagram.svg
ref_node = nodes.reference("", "", refuri=hyperlink_uri)
ref_node += image_node ref_node += image_node
ref_node["target"] = "_blank" ref_node["target"] = "_blank"
ref_node["rel"] = "noopener" ref_node["rel"] = "noopener"
paragraph += ref_node
uri_dl1 = builder.get_relative_uri( # and hyperlink to schedule data
env.docname, "_downloads/" + f"{base}_mspdi.xml" hyperlink_uri = (
relative_root_path + f"_static/generated_assets/{base}_mspdi.xml"
) )
uri_dl1 = uri_dl1.replace(".html", "")
download1 = nodes.reference( download1 = nodes.reference(
text="Download schedule in MS Project XML format", text="Download schedule in MS Project XML format",
refuri=uri_dl1, refuri=hyperlink_uri,
classes=["download-link"], classes=["download-link"],
) )
paragraph = nodes.paragraph()
paragraph += ref_node
paragraph += download1 paragraph += download1
return [paragraph] return [paragraph]
def setup(app): def setup(app): # noqa: D103
"""Setup for sphinx extension."""
app.add_directive("schedule", ScheduleDirective) app.add_directive("schedule", ScheduleDirective)
return { return {

0
tests/__init__.py Normal file
View file

View file

View file

@ -0,0 +1,53 @@
"""Tests for Document model."""
from datetime import datetime
import pytest
from pydantic import ValidationError
from nrsk.models import Document
@pytest.fixture
def valid_document_data(): # noqa: D103
return {
"uuid": "2deac04a-d1d1-4e42-b1a7-cc941d9da9b5",
"title": "Project Proposal Q4",
"revision": "2",
"type": "CALC",
"originators": ["jane@example.com"],
"review_status": "IN REVIEW",
"status": "RESERVED",
}
def test_document_model_success(valid_document_data):
"""Test that valid input data correctly creates a Document instance."""
doc = Document(**valid_document_data)
assert isinstance(doc, Document)
assert doc.title == "Project Proposal Q4"
assert doc.status == Document.STATUS.RESERVED
assert doc.status.value == "RESERVED"
assert doc.status_category == "Not Yet Approved"
@pytest.mark.parametrize(
"invalid_status",
[
"Reserved", # Capitalized (case sensitive)
"re-served", # Hyphenated (typo)
"finalized", # Non-existent status
123, # Wrong type (integer)
],
)
def test_document_status_invalid_enum(valid_document_data, invalid_status):
"""Tests that the model raises ValidationError for invalid status strings."""
data = valid_document_data.copy()
data["status"] = invalid_status
with pytest.raises(ValidationError) as excinfo:
Document(**data)
assert any("status" in err["loc"] for err in excinfo.value.errors())
assert "Input should be " in str(excinfo.value)