diff --git a/.gitignore b/.gitignore index 84b05ff..2dcfed6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,4 @@ -# project stuff -documents/api **/generated_assets/ - # Typical Python stuff: # Byte-compiled / optimized / DLL files __pycache__/ @@ -162,8 +159,4 @@ cython_debug/ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ - -# Sphinx -**/_build -**/_generated +#.idea/ \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 1026bb8..f0099ea 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -5,22 +5,4 @@ "source.organizeImports": "explicit" } }, - "files.watcherExclude": { - "**/_build/**": true, - "**/__pycache__": true - }, - "search.exclude": { - "**/__pycache__": true, - "**/.pytest_cache": true, - "**/.mypy_cache": true, - "**/build": true, - "**/dist": true, - "**/_build": true, - "**/_build/**": true - }, - "files.exclude": { - "**/_build": true, - "**/*.egg-info": true, - "**/__pycache__": true - } } \ No newline at end of file diff --git a/alembic.ini b/alembic.ini deleted file mode 100644 index 1b03b05..0000000 --- a/alembic.ini +++ /dev/null @@ -1,147 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts. -# this is typically a path given in POSIX (e.g. forward slashes) -# format, relative to the token %(here)s which refers to the location of this -# ini file -script_location = %(here)s/alembic - -# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s -# Uncomment the line below if you want the files to be prepended with date and time -# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file -# for all available tokens -# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s - -# sys.path path, will be prepended to sys.path if present. -# defaults to the current working directory. for multiple paths, the path separator -# is defined by "path_separator" below. -prepend_sys_path = . - - -# timezone to use when rendering the date within the migration file -# as well as the filename. -# If specified, requires the tzdata library which can be installed by adding -# `alembic[tz]` to the pip requirements. -# string value is passed to ZoneInfo() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; This defaults -# to /versions. When using multiple version -# directories, initial revisions must be specified with --version-path. -# The path separator used here should be the separator specified by "path_separator" -# below. -# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions - -# path_separator; This indicates what character is used to split lists of file -# paths, including version_locations and prepend_sys_path within configparser -# files such as alembic.ini. -# The default rendered in new alembic.ini files is "os", which uses os.pathsep -# to provide os-dependent path splitting. -# -# Note that in order to support legacy alembic.ini files, this default does NOT -# take place if path_separator is not present in alembic.ini. If this -# option is omitted entirely, fallback logic is as follows: -# -# 1. Parsing of the version_locations option falls back to using the legacy -# "version_path_separator" key, which if absent then falls back to the legacy -# behavior of splitting on spaces and/or commas. -# 2. Parsing of the prepend_sys_path option falls back to the legacy -# behavior of splitting on spaces, commas, or colons. -# -# Valid values for path_separator are: -# -# path_separator = : -# path_separator = ; -# path_separator = space -# path_separator = newline -# -# Use os.pathsep. Default configuration used for new projects. -path_separator = os - -# set to 'true' to search source files recursively -# in each "version_locations" directory -# new in Alembic version 1.10 -# recursive_version_locations = false - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -# database URL. This is consumed by the user-maintained env.py script only. -# other means of configuring database URLs may be customized within the env.py -# file. -sqlalchemy.url = driver://user:pass@localhost/dbname - - -[post_write_hooks] -# post_write_hooks defines scripts or Python functions that are run -# on newly generated revision scripts. See the documentation for further -# detail and examples - -# format using "black" - use the console_scripts runner, against the "black" entrypoint -# hooks = black -# black.type = console_scripts -# black.entrypoint = black -# black.options = -l 79 REVISION_SCRIPT_FILENAME - -# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module -# hooks = ruff -# ruff.type = module -# ruff.module = ruff -# ruff.options = check --fix REVISION_SCRIPT_FILENAME - -# Alternatively, use the exec runner to execute a binary found on your PATH -# hooks = ruff -# ruff.type = exec -# ruff.executable = ruff -# ruff.options = check --fix REVISION_SCRIPT_FILENAME - -# Logging configuration. This is also consumed by the user-maintained -# env.py script only. -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARNING -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARNING -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/alembic/README b/alembic/README deleted file mode 100644 index fdacc05..0000000 --- a/alembic/README +++ /dev/null @@ -1 +0,0 @@ -pyproject configuration, based on the generic configuration. \ No newline at end of file diff --git a/alembic/env.py b/alembic/env.py deleted file mode 100644 index b2c509d..0000000 --- a/alembic/env.py +++ /dev/null @@ -1,80 +0,0 @@ -from logging.config import fileConfig - -from sqlalchemy import engine_from_config, pool -from sqlmodel import SQLModel - -from alembic import context -from nrsk.db import get_engine -from nrsk.models import Document, User - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -if config.config_file_name is not None: - fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -# allow autogeneration of models -target_metadata = SQLModel.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline() -> None: - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, - target_metadata=target_metadata, - literal_binds=True, - dialect_opts={"paramstyle": "named"}, - ) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online() -> None: - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - # connectable = engine_from_config( - # config.get_section(config.config_ini_section, {}), - # prefix="sqlalchemy.", - # poolclass=pool.NullPool, - # ) - connectable = get_engine() - - with connectable.connect() as connection: - context.configure(connection=connection, target_metadata=target_metadata) - - with context.begin_transaction(): - context.run_migrations() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/alembic/script.py.mako b/alembic/script.py.mako deleted file mode 100644 index 697cf67..0000000 --- a/alembic/script.py.mako +++ /dev/null @@ -1,29 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" -from typing import Sequence, Union - -from alembic import op -import sqlalchemy as sa -import sqlmodel -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision: str = ${repr(up_revision)} -down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)} -branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} -depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} - - -def upgrade() -> None: - """Upgrade schema.""" - ${upgrades if upgrades else "pass"} - - -def downgrade() -> None: - """Downgrade schema.""" - ${downgrades if downgrades else "pass"} diff --git a/alembic/versions/3791144a7ad2_initial_setup.py b/alembic/versions/3791144a7ad2_initial_setup.py deleted file mode 100644 index 292ff33..0000000 --- a/alembic/versions/3791144a7ad2_initial_setup.py +++ /dev/null @@ -1,192 +0,0 @@ -"""initial_setup - -Revision ID: 3791144a7ad2 -Revises: -Create Date: 2026-01-05 10:24:53.993818 - -""" - -from typing import Sequence, Union - -import sqlalchemy as sa -import sqlmodel - -from alembic import op - -# revision identifiers, used by Alembic. -revision: str = "3791144a7ad2" -down_revision: Union[str, Sequence[str], None] = None -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None - - -def upgrade() -> None: - """Upgrade schema.""" - # ### commands auto generated by Alembic - please adjust! ### - op.create_table( - "informationtype", - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("abbrev", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("examples", sa.JSON(), nullable=True), - sa.Column("description", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("retention", sqlmodel.sql.sqltypes.AutoString(), nullable=True), - sa.Column("record", sa.Boolean(), nullable=False), - sa.Column("use_cases", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("notes", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("parent_id", sa.Integer(), nullable=True), - sa.ForeignKeyConstraint( - ["parent_id"], - ["informationtype.id"], - ), - sa.PrimaryKeyConstraint("id"), - ) - op.create_table( - "organization", - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("abbreviation", sqlmodel.sql.sqltypes.AutoString(), nullable=True), - sa.Column("website", sqlmodel.sql.sqltypes.AutoString(), nullable=True), - sa.Column("is_active", sa.Boolean(), nullable=False), - sa.Column("parent_id", sa.Integer(), nullable=True), - sa.ForeignKeyConstraint( - ["parent_id"], - ["organization.id"], - ), - sa.PrimaryKeyConstraint("id"), - ) - op.create_index( - op.f("ix_organization_abbreviation"), - "organization", - ["abbreviation"], - unique=False, - ) - op.create_index( - op.f("ix_organization_name"), "organization", ["name"], unique=False - ) - op.create_table( - "user", - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("given_name", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("family_name", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("preferred_name", sqlmodel.sql.sqltypes.AutoString(), nullable=True), - sa.Column("previous_name", sqlmodel.sql.sqltypes.AutoString(), nullable=True), - sa.Column("email", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("joined_on", sa.DateTime(), nullable=True), - sa.Column("deactivated_on", sa.DateTime(), nullable=True), - sa.Column("organization", sqlmodel.sql.sqltypes.AutoString(), nullable=True), - sa.Column("title", sqlmodel.sql.sqltypes.AutoString(), nullable=True), - sa.PrimaryKeyConstraint("id"), - ) - op.create_table( - "document", - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("number", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("title", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("revision", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("originating_organization_id", sa.Integer(), nullable=True), - sa.Column( - "originator_number", sqlmodel.sql.sqltypes.AutoString(), nullable=True - ), - sa.Column( - "originator_revision", sqlmodel.sql.sqltypes.AutoString(), nullable=True - ), - sa.Column("type_id", sa.Integer(), nullable=False), - sa.Column( - "revision_comment", sqlmodel.sql.sqltypes.AutoString(), nullable=True - ), - sa.Column( - "status", - sa.Enum( - "RESERVED", - "IN_PROGRESS", - "IN_REVIEW", - "REJECTED", - "AUTHORIZED", - "REFERENCE", - "NATIVE", - "APPROVED", - "QUARANTINED", - "SUPERSEDED", - "REVISED", - "VOIDED", - "CLOSED", - name="status", - ), - nullable=False, - ), - sa.Column( - "usage", - sa.Enum( - "FOR_INFORMATION", - "FOR_STAGE_APPROVAL", - "FOR_BID", - "FOR_CONSTRUCTION", - "FOR_OPERATION", - "AS_BUILT", - name="usage", - ), - nullable=False, - ), - sa.Column( - "retention_plan", sa.Enum("LIFETIME", name="retention"), nullable=False - ), - sa.Column( - "restriction_codes", sqlmodel.sql.sqltypes.AutoString(), nullable=False - ), - sa.Column("actual_reviewed_date", sa.DateTime(), nullable=True), - sa.Column("actual_approved_date", sa.DateTime(), nullable=True), - sa.Column( - "filenames", sa.JSON(), server_default=sa.text("'[]'"), nullable=False - ), - sa.Column( - "file_notes", sa.JSON(), server_default=sa.text("'[]'"), nullable=False - ), - sa.Column( - "checksums", sa.JSON(), server_default=sa.text("'[]'"), nullable=False - ), - sa.Column( - "physical_location", sqlmodel.sql.sqltypes.AutoString(), nullable=True - ), - sa.Column("notes", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.ForeignKeyConstraint( - ["originating_organization_id"], - ["organization.id"], - ), - sa.ForeignKeyConstraint( - ["type_id"], - ["informationtype.id"], - ), - sa.PrimaryKeyConstraint("id"), - ) - op.create_table( - "documentuserlink", - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("position", sa.Integer(), nullable=False), - sa.Column("role_note", sqlmodel.sql.sqltypes.AutoString(), nullable=False), - sa.Column("document_id", sa.Integer(), nullable=False), - sa.Column("user_id", sa.Integer(), nullable=False), - sa.ForeignKeyConstraint( - ["document_id"], - ["document.id"], - ), - sa.ForeignKeyConstraint( - ["user_id"], - ["user.id"], - ), - sa.PrimaryKeyConstraint("id", "document_id", "user_id"), - ) - # ### end Alembic commands ### - - -def downgrade() -> None: - """Downgrade schema.""" - # ### commands auto generated by Alembic - please adjust! ### - op.drop_table("documentuserlink") - op.drop_table("document") - op.drop_table("user") - op.drop_index(op.f("ix_organization_name"), table_name="organization") - op.drop_index(op.f("ix_organization_abbreviation"), table_name="organization") - op.drop_table("organization") - op.drop_table("informationtype") - # ### end Alembic commands ### diff --git a/documents/AMS.bib b/documents/AMS.bib deleted file mode 100644 index 4b0fc6f..0000000 --- a/documents/AMS.bib +++ /dev/null @@ -1,90 +0,0 @@ -@techreport{barrieroInformationManagementProcess2010, - title = {Information {{Management Process Description Guideline}}}, - author = {Barriero, Amy}, - year = 2010, - number = {PDG01-2010}, - institution = {NIRMA}, - url = {https://international.anl.gov/training/materials/6H/Gilbert/PDG02%20Documents%20and%20Records%20Process%20Description.pdf}, - langid = {american}, - file = {/pool/Reading/Nuclear/institutions/nirma/PDG01 Information Management Process Description.pdf} -} - -@misc{cahillDesignPhilosophyBrief2025, - title = {Design {{Philosophy Brief}}}, - author = {Cahill, William}, - year = 2025, - publisher = {AMS}, - url = {https://maritimesai.kiteworks.com/#/file/8b92c7cb-4444-4a3e-aba6-fdf328f7d2f8?currentPage=1} -} - -@techreport{cloverDocumentControlRecords2010, - title = {Document {{Control}} and {{Records Management Process Description}}}, - author = {Clover, Bill}, - year = 2010, - number = {PDG02-2010}, - institution = {NIRMA}, - url = {https://international.anl.gov/training/materials/6H/Gilbert/PDG02%20Documents%20and%20Records%20Process%20Description.pdf}, - langid = {american}, - file = {/pool/Reading/Nuclear/institutions/nirma/PDG02 Documents and Records Process Description.pdf} -} - -@techreport{fleerReactorTechnologyStudy25, - title = {Reactor {{Technology Study}}}, - author = {Fleer, D and Edens, A and Ciocco, S and Jacqueline, K}, - year = 25, - month = nov, - number = {B4M-ES-121043}, - institution = {BWXT}, - url = {https://kiteworks.bwxt.com/web/file/416b69b9-4c5c-44c9-9605-40a25e181493?currentPage=1}, - copyright = {Export Controlled}, - file = {/home/nick/pool/Users/Nick/Documents/2025/What is Nuclear LLC/jobs/Marine/AMS docs/B4M-ES-121043_Rev001.pdf} -} - -@techreport{halpinInformationManagementNuclear1978d, - title = {Information Management for Nuclear Power Stations: Project Description}, - shorttitle = {Information Management for Nuclear Power Stations}, - author = {Halpin, D. W.}, - year = 1978, - month = mar, - number = {ORO-5270-1}, - institution = {Georgia Inst. of Tech., Atlanta (USA). School of Civil Engineering}, - doi = {10.2172/6543303}, - url = {https://www.osti.gov/biblio/6543303}, - abstract = {A study of the information management structure required to support nuclear power plant construction was performed by a joint university-industry group under the sponsorship of the Department of Energy (DOE), formerly the Energy Research and Development Administration (ERDA). The purpose of this study was (1) to study methods for the control of information during the construction and start-up of nuclear power plants, and (2) identify those data elements intrinsic to nuclear power plants which must be maintained in a structured format for quick access and retrieval. Maintenance of the massive amount of data needed for control of a nuclear project during design, procurement, construction, start-up/testing, and operational phases requires a structuring which allows immediate update and retrieval based on a wide variety of access criteria. The objective of the research described has been to identify design concepts which support the development of an information control system responsive to these requirements. A conceptual design of a Management Information Data Base System which can meet the project control and information exchange needs of today's large nuclear power plant construction projects has been completed and an approach recommended for development and implementation of a complete operational system.}, - langid = {english}, - file = {/pool/Reading/Nuclear/process/configuration management/Information Management for Nuclear Power Stations 1978/Halpin - 1978 - Information management for nuclear power stations project description.pdf} -} - -@misc{imoCodeSafetyNuclear1982, - title = {Code of {{Safety}} for {{Nuclear Merchant Ships}}}, - author = {IMO}, - year = 1982, - month = jun, - number = {A XII/Res.491}, - publisher = {Internaional Maritime Organization}, - url = {https://wwwcdn.imo.org/localresources/en/KnowledgeCentre/IndexofIMOResolutions/AssemblyDocuments/A.491(12).pdf} -} - -@techreport{renuartAdvancedNuclearTechnology2014, - title = {Advanced {{Nuclear Technology}}: {{Data-Centric Configuration Management}} for {{Efficiency}} and {{Cost Reduction}}: {{An Economic Basis}} for {{Implementation}}}, - author = {Renuart, R.}, - year = 2014, - month = dec, - number = {3002003126}, - pages = {170}, - institution = {EPRI}, - url = {https://www.epri.com/research/products/3002003126}, - abstract = {The Electric Power Research Institute (EPRI) Advanced Nuclear Technology (ANT) Program has been working on defining the tools that can be a part of an effective configuration management (CM) system. This includes the potential use of modern digital data management tools that can be useful not only across the plant life cycle, including engineering, procurement, construction (EPC), and decommissioning, but also for the management of plant configuration—control of the licensing basis, plant operation, and input and control of many plant programs.}, - langid = {american}, - file = {/home/nick/pool/Reading/Nuclear/process/configuration management/Advanced Nuclear Technology: -Data-Centric Configuration Management for -Efficiency and Cost Reduction 000000003002003126.pdf} -} - -@misc{SQLModel, - title = {{{SQLModel}}}, - url = {https://sqlmodel.tiangolo.com/}, - abstract = {SQLModel, SQL databases in Python, designed for simplicity, compatibility, and robustness.}, - langid = {english}, - file = {/home/nick/Zotero/storage/MA9HAJ52/sqlmodel.tiangolo.com.html} -} diff --git a/documents/_data/doc-types.yaml b/documents/_data/doc-types.yaml deleted file mode 100644 index 93ccd98..0000000 --- a/documents/_data/doc-types.yaml +++ /dev/null @@ -1,135 +0,0 @@ -# Many of these came from cloverDocumentControlRecords2010 originally -# Other sources include IEC 631355 https://en.wikipedia.org/wiki/IEC_61355 -# and ANSI-N45-2-9-74 https://jetsquality.com/wp-content/uploads/2019/01/ANSI-N45-2-9-74.pdf -# has like >100 record types nicely sorted into use case categories (Design, Procurement, Manufacture, ...) -# I like these the best, but maybe they can be compressed with considering USAGE. - -- name: Calculation - abbrev: CALC - use_cases: Documenting an analysis - record: True - retention: -- name: Design Report - abbrev: DREP - use_cases: Documenting a design - record: True - retention: -- name: Design Review Report - abbrev: DREV - use_cases: Documenting the review of a design - record: True - retention: -- name: System Design Description - abbrev: SDD - use_cases: Describing a system - record: True - retention: -- name: Correspondence - abbrev: CSP - use_cases: Communications - record: False - retention: -- name: Drawing - abbrev: DRW - use_cases: Describing SSCs, includes many engineering deliverables - record: True - retention: -- name: Engineering Change Package - abbrev: ECP - use_cases: Describing a formal change to the system configuration - record: True - retention: -- name: Equipment Data Sheets - abbrev: EDS - use_cases: Define technical requirements and operating boundaries - record: True - retention: -- name: Environmental Qualification Package - abbrev: EQP - use_cases: > - Documents describing environmental qualifications of equipment such as - lab reports, thermal aging analyses, radiation resistance data supporting - 10 CFR 50.49 - record: True - retention: -- name: Form - abbrev: FORM - use_cases: A reusable starting point for other Documents/Records, or for collecting data - record: False - retention: Lifetime - notes: Forms are blank documents. -- name: Instructions - abbrev: INSTR - use_cases: Explanations of how to use systems or equipment - record: True - retention: -- name: Native File - abbrev: NTV - use_cases: A native file i.e. from a proprietary authoring software - record: False - notes: > - Native files are kept for ease of revision. They may also be kept as - additional file attachment alongside the document/record. -- name: Policy - abbrev: POL - use_cases: A policy - record: True -- name: Business Practice/Desk Guide - abbrev: BPDG - record: False -- name: Procedure - abbrev: PROC - use_cases: Defining and dictating how work is done - record: True - retention: Lifetime -- name: Procurement - abbrev: PCMT - use_cases: Related to purchases - record: True - retention: Lifetime -- name: Program Manual/Plan - abbrev: PMAN - use_cases: > - High-level governance documents that describes how the plant will manage - a specific program area (e.g., Radiation Protection, In-Service - Inspection, or Fire Protection). - record: True - retention: Lifetime -- name: Quality Classification List - abbrev: QLST - use_cases: Categorizes every SSC based on importance to safety - record: True - retention: -- name: Radiation Protection Survey - abbrev: RPS - record: True - retention: -- name: Records Transmittal Instructions/Indexing Guide - abbrev: RTI - record: True - retention: -- name: Regulatory Documents - abbrev: REG - use_cases: Safety Analysis Report, Technical Specifications, etc. - record: True - retention: -- name: Setpoints - abbrev: SET - record: True - retention: -- name: Specifications - abbrev: SPEC - record: True - retention: -- name: Training - abbrev: TRN - record: True - retention: -- name: Vendor Drawings - abbrev: VDRW - record: True - retention: -- name: Vendor Information - abbrev: VNFO - record: True - retention: diff --git a/documents/_data/it-systems.yaml b/documents/_data/it-systems.yaml index c3d6881..8052e0e 100644 --- a/documents/_data/it-systems.yaml +++ b/documents/_data/it-systems.yaml @@ -1,14 +1,10 @@ -# This file contains a listing of specific software systems -# used to implement information management. Data from this file -# is brought into procedures as appropriate. -RMDC: +--- +rmdc-systems: - name: NukeVault description: Specialized commercial records management system - use_cases: Storing Documents and Records generated during design of Project X + use-cases: Storing Documents and Records generated during design of Project X location: https://nukevault.opennucleonics.org - name: Supplier Portal description: A place where our suppliers can get documents - use_cases: External suppliers send documents/records to us + use-cases: External suppliers send documents/records to us location: Online -Data Management: - - name: Data Dictionary diff --git a/documents/_data/plant-parameters.yaml b/documents/_data/plant-parameters.yaml deleted file mode 100644 index 1e999b6..0000000 --- a/documents/_data/plant-parameters.yaml +++ /dev/null @@ -1,16 +0,0 @@ -- name: Power Range - val_low: 0.750 - val_high: 16 - units: MWe - req: R_AMS_1 - tags: [electrical] -- name: Backbone Voltage - val: 3.6 - units: kVAC - req: R_AMS_2 - tags: [electrical] -- name: Backbone Frequency - val: 60 - units: Hz - req: R_AMS_2 - tags: [electrical] diff --git a/documents/_data/schedule.yaml b/documents/_data/schedule.yaml deleted file mode 100644 index 3413595..0000000 --- a/documents/_data/schedule.yaml +++ /dev/null @@ -1,193 +0,0 @@ -- name: "TTP" - color: "#1f77b4" - tasks: - - name: "Ship Design Cycle 1" - duration_days: 120 - predecessors: - - id: RX0 - id: TTP1 - - name: "Nuke-Ready Guide" - is_milestone: true - predecessors: - - id: TTP1 - - name: "Ship Design Cycle 2" - id: TTP2 - predecessors: - - id: TTP1 - - id: RX1 - duration_days: 120 - - name: "RFI (non-stealth)" - is_milestone: true - predecessors: - - id: TTP2 - - name: "Ship Design Cycle 3" - id: TTP3 - predecessors: - - id: TTP2 - duration_days: 120 - - name: "RFP" - is_milestone: true - predecessors: - - id: TTP3 - - name: "Evaluate Proposals" - id: TTP4 - duration_days: 90 - predecessors: - - id: TTP3 - - name: "Ship Award" - is_milestone: true - predecessors: - - id: TTP4 - - name: "Detailed Design & Construction" - id: TTP5 - duration_days: 900 - predecessors: - - id: TTP4 - - name: "TTP Ship Delivery" - is_milestone: true - predecessors: - - id: TTP5 - - name: "Test/Trials/Acceptance" - id: TTP6 - duration_days: 90 - predecessors: - - id: TTP5 - - name: "Transit to Homeport" - id: TTP7 - duration_days: 30 - predecessors: - - id: TTP6 - - name: "NEPP Integration" - id: TTP8 - duration_days: 60 - predecessors: - - id: TTP7 - - id: RX5 - - id: SY7 - - name: "Hot plant testing" - id: TTP9 - duration_days: 180 - predecessors: - - id: TTP8 - - id: SY7 - -- name: "Shipyard" - color: "#ff7f0e" - tasks: - - name: "Shipyard conceptual design" - id: SY1 - start: 2026-01-01 - duration_days: 120 - predecessors: - - id: RX0 - - name: "Real Estate Purchase" - id: SY2 - predecessors: - - id: SY1 - duration_days: 270 - - name: "Shipyard Design Ph 1" - duration_days: 540 - id: SY3 - predecessors: - - id: SY1 - - id: RX1 - - name: "Regulatory review" - duration_days: 270 - id: SY4 - predecessors: - - id: SY1 - - name: "Reg Approval 1" - is_milestone: true - predecessors: - - id: SY4 - - name: "Shipyard Design Ph 2" - id: SY5 - duration_days: 365 - predecessors: - - id: SY3 - - name: "Reg Approval 2" - is_milestone: true - predecessors: - - id: SY5 - - name: "Shipyard Construction Ph 1" - id: SY6 - duration_days: 635 - predecessors: - - id: SY2 - - name: "Shipyard License" - is_milestone: true - predecessors: - - id: SY6 - - name: "Shipyard Construction Ph 2" - id: SY7 - duration_days: 270 - predecessors: - - id: SY6 - -- name: "Reactor" - color: "#2ca02c" - tasks: - - name: "Rx concept design cycle 1" - id: RX0 - duration_days: 50 - - name: "Rx concept design cycle 2" - id: RX1 - duration_days: 100 - predecessors: - - id: RX0 - - name: "Rx concept design cycle 3" - id: RX15 - duration_days: 300 - predecessors: - - id: RX1 - - name: "Reactor prelim design" - id: RX2 - duration_days: 360 - predecessors: - - id: RX15 - - name: "Reactor detailed design" - id: RX4 - duration_days: 500 - predecessors: - - id: RX2 - - name: "Reactor manufacturing" - id: RX5 - duration_days: 480 - predecessors: - - id: RX3 - -- name: "Reactor License" - tasks: - - name: "Pre-licensing activities" - predecessors: - - id: RX1 - - id: RX15 - type: FF - duration_days: 450 - id: LI1 - - name: "Submit Construction Permit Application" - is_milestone: true - predecessors: - - id: RX2 - - id: LI1 - - name: "CP review by NRC" - id: RX3 - duration_days: 450 - predecessors: - - id: RX2 - - name: "Receive Construction Permit" - is_milestone: true - predecessors: - - id: RX3 - - name: "OL review by NRC" - # TODO: should end at end of hot plant testing - predecessors: - - id: RX4 - duration_days: 365 - - name: "Receive Operating License" - is_milestone: true - predecessors: - - id: RX5 - - id: TTP9 -# - name: "Fuel Handling License" -# tasks diff --git a/documents/_static/css/custom.css b/documents/_static/css/custom.css index ab954f4..f084d46 100644 --- a/documents/_static/css/custom.css +++ b/documents/_static/css/custom.css @@ -12,9 +12,3 @@ html[data-theme="dark"] { --pst-color-primary: rgb(115, 199, 164); --pst-color-secondary: rgb(21, 197, 124); } - -/* Additional customizations for nrsk */ -dl.py.attribute { - /* Reduce the space between autodoc attrs */ - margin-bottom: 0.5em !important; -} \ No newline at end of file diff --git a/documents/_static/data-hub.png b/documents/_static/data-hub.png deleted file mode 100644 index 92387a5..0000000 Binary files a/documents/_static/data-hub.png and /dev/null differ diff --git a/documents/_templates/system.tmpl b/documents/_templates/system.tmpl deleted file mode 100644 index 9d9de25..0000000 --- a/documents/_templates/system.tmpl +++ /dev/null @@ -1,27 +0,0 @@ - -.. Sadly can't get this to work with the same TOC level in additional info. - {{ data['name'] }} ({{ data['abbrev']}}) - {{ "=" * (data['name'] | length + data['abbrev'] | length + 3) }} - -Abbrev: {{data['abbrev']}} - -{{ data['desc'] }} - -Functions ---------- - -{% for item in data['functions'] %} -.. req:: {{item}} - :id: R_{{data['abbrev']}}_{{loop.index}} -{% endfor %} - - -Parameters ----------- -{% if data['params'] %} -{{ make_list_table_from_mappings( - [('Parameter', 'name'), ('Value', 'val'), ('Tags', 'tags')], - data['params'], - title='System Parameters', - ) }} -{% endif %} \ No newline at end of file diff --git a/documents/conf.py b/documents/conf.py index bf7bc5a..47509a3 100644 --- a/documents/conf.py +++ b/documents/conf.py @@ -10,18 +10,13 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) import datetime -import os -import sys - -SRC = os.path.abspath("../src") -sys.path.insert(0, SRC) - -from nrsk.documents.validate import validate_doc_types # -- Project information ----------------------------------------------------- -company_name = "Applied Maritime Sciences, LLC" -project_name = "Project 1959" +company_name = "Open Nucleonics" project = f"{company_name} Governing Documents" author = company_name release = "1.0" @@ -44,19 +39,11 @@ extensions = [ "sphinx.ext.imgmath", "sphinxcontrib.datatemplates", "sphinxcontrib.mermaid", - "sphinxcontrib.apidoc", "sphinx.ext.graphviz", # "sphinx.ext.imgconverter", # SVG to png but rasterizes and bad "sphinxcontrib.inkscapeconverter", # SVG to pdf without rasterizing "sphinx_timeline", - "sphinx.ext.autodoc", - "sphinx.ext.napoleon", - "sphinx_autodoc_typehints", - "sphinx.ext.autosummary", - "sphinxcontrib.autodoc_pydantic", - "sphinx.ext.intersphinx", "nrsk.schedule.load_schedule", - "nrsk.plant.plant_data_table", ] # Add any paths that contain templates here, relative to this directory. @@ -97,35 +84,13 @@ html_css_files = [ # https://sphinx-needs.readthedocs.io/en/latest/installation.html#plantuml-support plantuml = "java -Djava.awt.headless=true -jar /usr/share/plantuml/plantuml.jar" -latex_engine = "xelatex" -latex_elements = { - # "fontenc": r"\usepackage[T2A]{fontenc}", - # "babel": r"\usepackage[english,russian]{babel}", - # "fontpkg": r""" - # \setmainfont{DejaVu Serif} - # \setsansfont{DejaVu Sans} - # \setmonofont{DejaVu Sans Mono} - # """, - "figure_align": "H", - "extraclassoptions": "openany", - #'\makeatletter\@openrightfalse\makeatother' - "extrapackages": r""" - \usepackage{fancyhdr} - \usepackage{etoolbox} - \usepackage{pdflscape} - \usepackage{tabulary} - """, - "preamble": r""" - \AtBeginEnvironment{figure}{\pretocmd{\hyperlink}{\protect}{}{}} - """, -} # LaTeX document generation options # doesn't work with sphinx-needs latex_documents = [ ( "index", - "ams.tex", - "AMS Docs", + "nrsk.tex", + "Nuclear Reactor Starter Kit", author, "manual", False, @@ -158,7 +123,6 @@ latex_documents = [ # ] rst_prolog = f""" .. |inst| replace:: **{company_name}** -.. |project| replace:: **{project_name}** """ # will need to move relevant refs somewhere @@ -174,34 +138,3 @@ mermaid_version = "10.6.1" # Sphinx Needs config needs_include_needs = True # turn off to hide all needs (e.g. for working docs) needs_extra_options = ["basis"] - -autodoc_typehints = "description" -autodoc_typehints_description_target = "all" -autodoc_default_options = { - "members": True, - "private-members": False, - "undoc-members": True, - "ignore-module-all": True, -} -autodoc_member_order = "bysource" - -apidoc_module_dir = SRC -apidoc_module_first = True -apidoc_output_dir = "api" -apidoc_separate_modules = True - -autodoc_pydantic_model_show_field_summary = True -autodoc_pydantic_model_show_validator_summary = True -autodoc_pydantic_field_doc_policy = "both" -autodoc_pydantic_field_docutils_summary = True - -set_type_checking_flag = True - -intersphinx_mapping = { - "pydantic": ("https://docs.pydantic.dev/latest", None), - "python": ("https://docs.python.org/3", None), -} - - -def setup(app): - app.connect("builder-inited", validate_doc_types) diff --git a/documents/glossary.rst b/documents/glossary.rst index 2a73852..a7d4952 100644 --- a/documents/glossary.rst +++ b/documents/glossary.rst @@ -10,39 +10,6 @@ Glossary .. glossary:: - Configuration Management - The process of identifying and documenting the characteristics of a - facility's structures, systems and components (including computer - systems and software), and of ensuring that changes to these - characteristics are properly incorporated into the facility - documentation. :cite:p:`agencyInformationTechnologyNuclear2010` - - Controlled document - Documents whose content is maintained uniform among the copies by an - administrative control system. The goal of controlling documents is to - ensure that work is performed using approved current information, not - obsolete information. Important documents to be controlled are uniquely - identified (including revision number, date, and specific copy number), - and distribution is formally controlled. Revisions to controlled - documents are uniquely tracked and implemented, including mandatory page - replacements and receipt acknowledgment. Controlled documents typically - include procedures for operations, surveillance, and maintenance, and - safety basis documents such as the SAR, and hazard and accident - analyses. :cite:p:`agencyInformationTechnologyNuclear2010` - - Design basis - The range of conditions and events taken explicitly into account in the - design of a facility, according to established criteria, such that the - facility can withstand them without exceeding authorized limits by the - planned operation of safety systems. - :cite:p:`agencyInformationTechnologyNuclear2010` - - Design control - Measures established to ensure that the information from design input - and design process documents for structures, systems, and components is - correctly translated into the final design. - :cite:p:`agencyInformationTechnologyNuclear2010` - Document A written collection of information, instructions, drawings, specifications, etc. that is *maintained* throughout the @@ -50,19 +17,11 @@ Glossary *record* in that it is expected to be *maintained* by revisions as needed. See :need:`R_APPB_45` - Electronic Document Management System - EDMS - A computerized system that holds and distributes records and documents - Record A written collection of information providing evidence of work that was done at a specific time. Records are expected to be *retained* for a certain retention period, e.g. for the lifetime of the plant or for a given number of years. See :need:`R_APPB_79` - - Records Management and Document Control - RMDC - Group responsible for managing project records and documents \ No newline at end of file diff --git a/documents/index.rst b/documents/index.rst index 11c792f..7ca4a02 100644 --- a/documents/index.rst +++ b/documents/index.rst @@ -3,18 +3,16 @@ .. toctree:: :maxdepth: 2 - :numbered: 3 + :numbered: :caption: Contents: purpose/index organization/index procedures/index - plant/index project/index bibliography requirements/index glossary - api/nrsk diff --git a/documents/plant/Reactor/Auxiliary Systems/Chemical Addition System/cas.rst b/documents/plant/Reactor/Auxiliary Systems/Chemical Addition System/cas.rst deleted file mode 100644 index ab288f2..0000000 --- a/documents/plant/Reactor/Auxiliary Systems/Chemical Addition System/cas.rst +++ /dev/null @@ -1,12 +0,0 @@ -Chemical Addition System -======================== - -.. datatemplate:yaml:: index.yaml - :template: system.tmpl - -Additional info ---------------- - -.. req:: Contain stuff - -.. req:: Do more stuff \ No newline at end of file diff --git a/documents/plant/Reactor/Auxiliary Systems/Chemical Addition System/index.yaml b/documents/plant/Reactor/Auxiliary Systems/Chemical Addition System/index.yaml deleted file mode 100644 index cbaf121..0000000 --- a/documents/plant/Reactor/Auxiliary Systems/Chemical Addition System/index.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -name: Chemical Addition System -abbrev: CA -safety related: false -functions: - - Preparing, storing, and transferring solutions of lithium - hydroxide (7LiOH) to maintain reactor coolant pH, and hydrazine (N2H2) - to scavenge oxygen from the reactor coolant at low temperatures -notes: That's enriched lithium-7 diff --git a/documents/plant/Reactor/Auxiliary Systems/Component Cooling Water system/index.yaml b/documents/plant/Reactor/Auxiliary Systems/Component Cooling Water system/index.yaml deleted file mode 100644 index 7053f01..0000000 --- a/documents/plant/Reactor/Auxiliary Systems/Component Cooling Water system/index.yaml +++ /dev/null @@ -1,13 +0,0 @@ -name: Component Cooling Water System -abbrev: CCW -functions: - - > - Transfer heat from the following components to the RPSW system by CCW - heat exchangers during normal operation, scheduled and unscheduled - shutdowns, including hot and cold maintenance, and during refueling. - - * Decay heat removal system heat exchangers - * Makeup and purification system letdown heat exchangers - * Reactor coolant pump heat exchangers - * Control rod drive mechanism cooling jackets - * Suppression pool heat exchangers diff --git a/documents/plant/Reactor/Auxiliary Systems/Reactor Compartment Ventilation System/index.yaml b/documents/plant/Reactor/Auxiliary Systems/Reactor Compartment Ventilation System/index.yaml deleted file mode 100644 index c1afdec..0000000 --- a/documents/plant/Reactor/Auxiliary Systems/Reactor Compartment Ventilation System/index.yaml +++ /dev/null @@ -1,18 +0,0 @@ -name: Reactor Compartment Ventilation System -abbrev: RCV -safety class: II -functions: - - Controlling radioactive gaseous release to the environment - during both normal and emergency (post-LOCA) operation and - maintenance of a low level of airborne radioactivity in - the reactor compartment and auxiliary spaces to permit - entry during normal operation or during both schedule and - unscheduled shutdown. - - Removing heat to the environment from sources within the reactor - compartment - - Containment purging prior to manned entry for maintenance and/or - inspection. Containment purging would be accomplished only when - the reactor is at or below hot shutdown conditions; i.e. decay heat - system is operating. - - Provide a source of clean air to the control areas if high - radioactivity levels are present off ship. diff --git a/documents/plant/Reactor/Auxiliary Systems/index.rst b/documents/plant/Reactor/Auxiliary Systems/index.rst deleted file mode 100644 index 3c96b81..0000000 --- a/documents/plant/Reactor/Auxiliary Systems/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Auxiliary Systems -################# - -.. toctree:: - :glob: - - Chemical Addition System/cas.rst - * \ No newline at end of file diff --git a/documents/plant/Reactor/I&C Systems/Plant Control System/index.yaml b/documents/plant/Reactor/I&C Systems/Plant Control System/index.yaml deleted file mode 100644 index aa79c14..0000000 --- a/documents/plant/Reactor/I&C Systems/Plant Control System/index.yaml +++ /dev/null @@ -1,5 +0,0 @@ -name: Reactor plant control system -params: - - name: Quantity - val: 1 - tags: INTERFACE diff --git a/documents/plant/Reactor/Primary Systems/Containment System/containment.rst b/documents/plant/Reactor/Primary Systems/Containment System/containment.rst deleted file mode 100644 index df6c1bc..0000000 --- a/documents/plant/Reactor/Primary Systems/Containment System/containment.rst +++ /dev/null @@ -1,14 +0,0 @@ -Containment -=========== - -.. datatemplate:yaml:: containment.yaml - :template: system.tmpl - -Additional info ----------------- - -.. req:: Contain stuff - :id: R_RCS_A1 - -.. req:: Do more stuff - :id: R_RCS_A2 \ No newline at end of file diff --git a/documents/plant/Reactor/Primary Systems/Containment System/containment.yaml b/documents/plant/Reactor/Primary Systems/Containment System/containment.yaml deleted file mode 100644 index 4d3d5e4..0000000 --- a/documents/plant/Reactor/Primary Systems/Containment System/containment.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -name: Reactor containment system -abbrev: RCS -functions: - - Contain pressure and radiation - - Keep people out -params: - - name: Quantity - val: 1 - tags: INTERFACE diff --git a/documents/plant/Reactor/Primary Systems/Fuel Handling System/index.yaml b/documents/plant/Reactor/Primary Systems/Fuel Handling System/index.yaml deleted file mode 100644 index d4f573e..0000000 --- a/documents/plant/Reactor/Primary Systems/Fuel Handling System/index.yaml +++ /dev/null @@ -1,5 +0,0 @@ -name: Fuel handling equipment -params: - - name: Quantity - val: 1 - tags: INTERFACE diff --git a/documents/plant/Reactor/Primary Systems/Reactor Coolant System/pcs.rst b/documents/plant/Reactor/Primary Systems/Reactor Coolant System/pcs.rst deleted file mode 100644 index f245d7b..0000000 --- a/documents/plant/Reactor/Primary Systems/Reactor Coolant System/pcs.rst +++ /dev/null @@ -1,5 +0,0 @@ -Primary Coolant System -====================== - -.. datatemplate:yaml:: pcs.yaml - :template: system.tmpl \ No newline at end of file diff --git a/documents/plant/Reactor/Primary Systems/Reactor Coolant System/pcs.yaml b/documents/plant/Reactor/Primary Systems/Reactor Coolant System/pcs.yaml deleted file mode 100644 index bee7537..0000000 --- a/documents/plant/Reactor/Primary Systems/Reactor Coolant System/pcs.yaml +++ /dev/null @@ -1,55 +0,0 @@ -name: Primary Coolant System -abbrev: PCS -functions: - - Remove heat from the core during normal operation - - Generate steam -equipment: - - name: Reactor vessel and closure head - params: - - name: Quantity - val: 1 - tags: INTERFACE - - name: Primary coolant pump - desc: Includes motors, coolers, valves, and piping - params: - - name: Quantity - val: 4 - tags: INTERFACE - - name: Reactor vessel internals - params: - - name: Quantity - val: 1 - tags: INTERFACE - - name: Steam generator - params: - - name: Quantity - val: 12 - tags: INTERFACE - - name: Pressurizer - desc: Pressurizer with spray and surge line - params: - - name: Quantity - val: 1 - tags: INTERFACE - - name: Nuclear steam plant supports and restraints - params: - - name: Quantity - val: 1 - tags: INTERFACE - - name: Control rod drive service structure - params: - - name: Quantity - val: 1 - tags: INTERFACE - - name: Reactor coolant system insulation - desc: > - Includes insulation for: - - * Reactor vessel and closure head - * Pressurizer - * Surge and spray line piping - * Reactor coolant pumps - params: - - name: Quantity - val: 1 - tags: INTERFACE diff --git a/documents/plant/Reactor/Primary Systems/Reactor Core System/index.yaml b/documents/plant/Reactor/Primary Systems/Reactor Core System/index.yaml deleted file mode 100644 index 3df1082..0000000 --- a/documents/plant/Reactor/Primary Systems/Reactor Core System/index.yaml +++ /dev/null @@ -1,5 +0,0 @@ -name: Control rod drive mechanisms -params: - - name: Quantity - val: 37 - tags: INTERFACE diff --git a/documents/plant/Reactor/Primary Systems/Shielding System/index.yaml b/documents/plant/Reactor/Primary Systems/Shielding System/index.yaml deleted file mode 100644 index 6795000..0000000 --- a/documents/plant/Reactor/Primary Systems/Shielding System/index.yaml +++ /dev/null @@ -1,20 +0,0 @@ -name: Shielding System -equipment: - - name: Primary biological shielding - desc: > - Consists of shielding water tanks at - - * Top - * Vertical cylinder - * Lower-inner bottom - * Pressure suppression system - params: - - name: Quantity - val: 1 - tags: INTERFACE - - name: Reactor enclosure/secondary biological shielding - desc: Consists of 3-inch-thick steel bulkheads - params: - - name: Quantity - val: 1 - tags: INTERFACE diff --git a/documents/plant/Reactor/Primary Systems/index.rst b/documents/plant/Reactor/Primary Systems/index.rst deleted file mode 100644 index 5033b34..0000000 --- a/documents/plant/Reactor/Primary Systems/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Primary Systems -############### - -.. toctree:: - :glob: - :maxdepth: 1 - - Containment System/containment.rst - * \ No newline at end of file diff --git a/documents/plant/Reactor/index.rst b/documents/plant/Reactor/index.rst deleted file mode 100644 index a317ad3..0000000 --- a/documents/plant/Reactor/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Reactor Plant -############# - -.. toctree:: - :glob: - :maxdepth: 2 - - Primary Systems/index - Auxiliary Systems/index - * \ No newline at end of file diff --git a/documents/plant/Ship/Collision barrier/index.yaml b/documents/plant/Ship/Collision barrier/index.yaml deleted file mode 100644 index 1ddba0c..0000000 --- a/documents/plant/Ship/Collision barrier/index.yaml +++ /dev/null @@ -1,6 +0,0 @@ -equipment: - - name: Collision barrier - params: - - name: Quantity - val: 1 - tags: INTERFACE diff --git a/documents/plant/Ship/index.rst b/documents/plant/Ship/index.rst deleted file mode 100644 index 16c41dc..0000000 --- a/documents/plant/Ship/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Ship -#### - -.. toctree:: - :glob: - - **/* \ No newline at end of file diff --git a/documents/plant/Shipyard/index.rst b/documents/plant/Shipyard/index.rst deleted file mode 100644 index d11c191..0000000 --- a/documents/plant/Shipyard/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Shipyard -######## - -.. toctree:: - :glob: - - **/* - - diff --git a/documents/plant/index.rst b/documents/plant/index.rst deleted file mode 100644 index 354a2c7..0000000 --- a/documents/plant/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -Plant -##### - -.. raw:: latex - - \begin{landscape} - -.. plant-data-table:: plant - :columns: PBS, SSC, Abbrev, Description, Tags - :max-depth: 4 - :hide-empty: - -.. raw:: latex - - \end{landscape} - -.. toctree:: - :glob: - :maxdepth: 3 - - Reactor/index - Ship/index - Shipyard/index - * - - \ No newline at end of file diff --git a/documents/procedures/administration/doc-types.yaml b/documents/procedures/administration/doc-types.yaml new file mode 100644 index 0000000..4314e58 --- /dev/null +++ b/documents/procedures/administration/doc-types.yaml @@ -0,0 +1,16 @@ +- name: Calculation + abbrev: CALC + use-cases: Documenting an analysis + record: False + retention: Varies +- name: Procedure + abbrev: PROC + use-cases: Defining and dictating how work is done + record: False + retention: Lifetime +- name: Form + abbrev: FORM + use-cases: Providing evidence of tasks that were done + record: True + retention: Lifetime + diff --git a/documents/procedures/administration/document_management.rst b/documents/procedures/administration/document_management.rst new file mode 100644 index 0000000..33d111f --- /dev/null +++ b/documents/procedures/administration/document_management.rst @@ -0,0 +1,98 @@ +Records and Document Management Procedure +----------------------------------------- + +This procedure governs the creation, maintenance, and retention of +:term:`Records ` and :term:`Documents `. + +.. impl:: Define processes for lifetime records + :links: R_GDC_1_4 + +.. impl:: Define processes for Document Control + :links: R_APPB_45 + +.. impl:: Define processes for Quality Records + :links: R_APPB_79 + + +.. _rmdc-systems: + +Systems +^^^^^^^ + +Documents and records are managed in the following systems. + +.. datatemplate:yaml:: + :source: /_data/it-systems.yaml + + {{ make_list_table_from_mappings( + [('Name', 'name'), ('Use case(s)', 'use-cases'), ('Location', 'location')], + data['rmdc-systems'], + title='RMDC Systems', + ) }} + +.. _rmdc-origination: + +Origination +^^^^^^^^^^^ +New records and new or revised documents are originated as invoked by other procedures +and processes. The following steps shall be taken at such times: + +* **Originator** shall specify key data defining the new record/document, including: + * Required: + * Title --- a single-line description of what the record/document is + * Record/document type --- This determines template, review, and + retention rules. Should be from :ref:`rmdc-doctypes` + * Originating organization --- the organization or group assigned primary authorship. + Internally-generated records/documents shall be contained on the :ref:`org-chart`, while + others should be the name and department of external entities. + * Optional + * Keywords --- words or phrases to assist in future searches/lookups + + +.. impl:: Require document index to be updated upon origination + of any new document or record + :links: R_APPB_83 + + Updating the index at the point of creation is a robust + way to ensure compliance that each document/record will always + be discoverable and retrievable. + +.. _rmdc-doctypes: + +Record/Document Types +^^^^^^^^^^^^^^^^^^^^^ +One of the following record/document types should be assigned to each +record/document. The types are generally associated with specific forms or +templates that include the expected content/sections, and are often created to +satisfy the needs of a lower-level procedure. + +.. impl:: Define numerous Record/Document types + + There is a timeless debate about having too many vs. too few doc types. + Additional types are added to support more mature procedure sets, but then + people start to struggle to know what type they should use, leading to + cleanup efforts, followed by management declarations to reduce the number of + types. + + A good solution to this problem is to be very explicit in your procedural + culture. Ensure that whenever a record or document is being originated, that + each staff member has the relevant procedure open and is following it + precisely. Ensure that the procedures are rigorous and precise about which + record/document types should be generated in each scenario. When a procedure + is written, ensure that any new record/document types are added to the list + of known types. + + .. warning:: Don't make one rec/doc type for each individual checklist or form + in each procedure though. Those can all be forms. Or maybe do make them + all form subtypes? Could be nice to really have specific evidence. + Need easy way to make auto-generated forms per procedure then. + +.. datatemplate:yaml:: + :source: doc-types.yaml + + {{ make_list_table_from_mappings( + [('Type', 'name'), ('Abbrev', 'abbrev'), ('Use case(s)', 'use-cases'), + ('Record', 'record'), ('Retention', 'retention')], + data, + title='Record/Document types', + ) }} \ No newline at end of file diff --git a/documents/procedures/administration/index.rst b/documents/procedures/administration/index.rst index 43d42df..89ad3d2 100644 --- a/documents/procedures/administration/index.rst +++ b/documents/procedures/administration/index.rst @@ -7,5 +7,4 @@ and the management of procedures themselves. .. toctree:: :glob: - information_management/index * \ No newline at end of file diff --git a/documents/procedures/administration/information_management/document_management.rst b/documents/procedures/administration/information_management/document_management.rst deleted file mode 100644 index eee7106..0000000 --- a/documents/procedures/administration/information_management/document_management.rst +++ /dev/null @@ -1,296 +0,0 @@ -.. _rmdc-proc: - -Records and Document Management Procedure -========================================= -This procedure governs the creation, intake, maintenance, authorization, -distribution, and retention of :term:`Records ` and :term:`Documents -`. - -Purpose -------- -Systematic management of Records and Documents helps teams execute -large, long-term, and complex projects in a coordinated fashion. -Furthermore, management of quality-related documents is required by -regulations. This procedure implements the following requirements: - -.. impl:: Define processes for lifetime records - :links: R_GDC_01_04 - -.. impl:: Define processes for Document Control - :links: R_APPB_45 - -.. impl:: Define processes for Quality Records - :links: R_APPB_79 - -Roles ------ -Roles involved in this procedure include: - -Originator - Person who authors a new or revised document - -Reviewer - Person who is assigned to review a submitted document, checking for quality - and content - -Approver - Person in the releasing organization who is authorized to mark a document as - Approved, thereby enabling its use across the project. - -Manager - Person in any organization who is authorized to request document reservations - -RMDC Staff - Person responsible for receiving documents and administering :term:`RMDC` IT - systems such as the :term:`EDMS` - -Staff - Person in any organization who is authorized to access or submit project - documents/records - -Procedure ---------- - -.. _rmdc-access: - -Accessing a document/record -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Internal project staff shall follow these steps when seeking documents or records -to perform project work. - -* **Staff** navigates to the project :term:`EDMS` as defined in :ref:`rmdc-systems` -* **Staff** searches EDMS for desired document/record by number, title, and/or other fields -* **Staff** checks ``status`` field and ensures they choose a revision that is - approved for their current work task (generally this requires an APPROVED status). -* **Staff** checks the ``usage`` field and ensures to choose a revision with a - usage marking that is appropriate for their current work task. -* **Staff** accesses the file(s) via the EDMS download or access feature -* If an expected Document/Record cannot be found, appears to have erroneous data, or - is not accessible, **Staff** contacts **RMDC Staff** for assistance - - -.. _rmdc-origination: - -Originating a new document/record -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -New records and new or revised documents are originated as invoked by other procedures -and processes, or by third parties. The following steps shall be taken whenever a new -document or record or document reservation is requested, created, or received: - -* **Staff** shall specify document data defining the new record/document in - accordance with the content rules listed in :py:class:`nrsk.models.Document` - - * Required: - - * Title --- a single-line description of what the record/document is - * Record/document type --- This determines template, review, and - retention rules. Should be from :ref:`rmdc-doctypes` - * Originating organization --- the organization or group assigned primary authorship. - Internally-generated records/documents shall be contained on the :ref:`org-chart`, while - others should be the name and department of external entities. - - * Optional - - * Keywords --- words or phrases to assist in future searches/lookups - - -.. impl:: Require document index to be updated upon origination - of any new document or record - :links: R_APPB_83 - - Updating the index at the point of creation is a robust - way to ensure compliance that each document/record will always - be discoverable and retrievable. - - -Intake -^^^^^^ -Upon receipt of Document/Records from an external party, the following process shall occur: - -* **Receiver** determines whether the Document/Record is eligible for - entry into the Document Index. -* **Receiver** inspects Document Index to ensure consistency with the Index and the - received Document - - * If the received document/record is not already listed in the Document Index, then - **Receiver** takes on the role of **Originator** and follows the process in - :ref:`rmdc-origination`. - * If the data is incorrect or outdated, **Receiver** updates the data - -* **Receiver** uploads the received file(s) to the EDMS. -* **Receiver** updates metadata in the Document Index, including the latest - revision number, authors, received date, and location/URL in the EDMS -* **Receiver** assigns acceptance review task to appropriate person based on - the work breakdown structure that the document was produced under. -* **Reviewer** views Document Index and downloads file(s) as listed -* **Reviewer** inspects files and performs acceptance review as appropriate, potentially - generating a Review Record -* **Reviewer** updates Document Index acceptance metadata field according to review -* If the document/records are not accepted, **Reviewer** informs originating - institution of deficiencies and requests update - -Document/Record data management -------------------------------- - -.. _rmdc-doctypes: - -Record/Document Types -^^^^^^^^^^^^^^^^^^^^^ -One of the following record/document types should be assigned to each -record/document. The types are generally associated with specific forms or -templates that include the expected content/sections, and are often created to -satisfy the needs of a lower-level procedure. - -.. impl:: Define numerous Record/Document types - - There is a timeless debate about having too many vs. too few doc types. - Additional types are added to support more mature procedure sets, but then - people start to struggle to know what type they should use, leading to - cleanup efforts, followed by management declarations to reduce the number of - types. - - A good solution to this problem is to be very explicit in your procedural - culture. Ensure that whenever a record or document is being originated, that - each staff member has the relevant procedure open and is following it - precisely. Ensure that the procedures are rigorous and precise about which - record/document types should be generated in each scenario. When a procedure - is written, ensure that any new record/document types are added to the list - of known types. - - .. warning:: Don't make one rec/doc type for each individual checklist or form - in each procedure though. Those can all be forms. Or maybe do make them - all form subtypes? Could be nice to really have specific evidence. - Need easy way to make auto-generated forms per procedure then. - -.. datatemplate:yaml:: - :source: /_data/doc-types.yaml - - {{ make_list_table_from_mappings( - [('Type', 'name'), ('Abbrev', 'abbrev'), ('Use case(s)', 'use_cases'), - ('Record', 'record'), ('Retention', 'retention')], - data, - title='Record/Document types', - ) }} - - -Document Numbering -^^^^^^^^^^^^^^^^^^ - -Document numbers (or IDs) are human-usable codes that uniquely identify -the document/record for purposes of cross-referencing and discussion. - -All projects may use |inst| corporate-level document numbering as appropriate -in addition to the project-specific policies. Corporate number shall - -For corporate-level documents/records not associated with any specific project -or system, the document numbers shall be of the form: ``AMS-{type}-00000``, -where {type} is the document type abbreviation in all CAPS, and the number -starts at 00001 and increments from there. When 100,000 or more numbers are -needed, the number continues increasing. - -For project-level documents/records not associated with any specific system, -numbers shall be of the same form, but with ``{AMS}}`` replaced with the -project abbreviation, e.g. ``TTP``. - -Documents/records associated with a specific project and system shall have document -numbers of the form: - -``{proj}-{sys}-{type}-00000`` - -where `{sys}` is the system abbreviation in all CAPS. - -Active projects and their abbreviations may be found in :ref:`projects`. - - -.. _rmdc-doc-status: - -Document Statuses -^^^^^^^^^^^^^^^^^ -Document Status indicates where a document or record is in its lifecycle, and -determines if and how it may be used in design or operation. Statuses fall into -three top-level categories. These statuses are derived from -:cite:p:`cloverDocumentControlRecords2010`. - -.. autoclass:: nrsk.models.Document.STATUS - :no-index: - :no-members: - - * **Not Yet Approved** --- The following statuses apply to Documents that - generally shall not be used to support plant design or operations: - - .. autoattribute:: nrsk.models.Document.STATUS.RESERVED - :no-index: - :no-value: - - .. autoattribute:: nrsk.models.Document.STATUS.IN_PROGRESS - :no-index: - :no-value: - - .. autoattribute:: nrsk.models.Document.STATUS.IN_REVIEW - :no-index: - :no-value: - - .. autoattribute:: nrsk.models.Document.STATUS.REJECTED - :no-index: - :no-value: - - .. autoattribute:: nrsk.models.Document.STATUS.REFERENCE - :no-index: - :no-value: - - .. autoattribute:: nrsk.models.Document.STATUS.NATIVE - :no-index: - :no-value: - - * **Approved** --- The following statuses apply to active documents that allow - plant design or operations: - - .. autoattribute:: nrsk.models.Document.STATUS.APPROVED - :no-index: - :no-value: - - * **No Longer Approved** --- The following statuses apply to documents - that are no longer approved for use other than reference: - - .. autoattribute:: nrsk.models.Document.STATUS.QUARANTINED - :no-index: - :no-value: - - .. autoattribute:: nrsk.models.Document.STATUS.SUPERSEDED - :no-index: - :no-value: - - .. autoattribute:: nrsk.models.Document.STATUS.REVISED - :no-index: - :no-value: - - .. autoattribute:: nrsk.models.Document.STATUS.VOIDED - :no-index: - :no-value: - - .. autoattribute:: nrsk.models.Document.STATUS.CLOSED - :no-index: - :no-value: - -.. note:: These statuses are validated in the data dictionary - in :py:class:`~nrsk.models.Document` - -.. _rmdc-systems: - -Document/Record Management Systems ----------------------------------- - -Documents and records are managed in the following systems. - -.. datatemplate:yaml:: - :source: /_data/it-systems.yaml - - {{ make_list_table_from_mappings( - [('Name', 'name'), ('Use case(s)', 'use_cases'), ('Location', 'location')], - data['RMDC'], - title='RMDC Systems', - ) }} - - -See Also --------- diff --git a/documents/procedures/administration/information_management/index.rst b/documents/procedures/administration/information_management/index.rst deleted file mode 100644 index 96f9997..0000000 --- a/documents/procedures/administration/information_management/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -********************************* -Information Management Procedures -********************************* - -.. toctree:: - - information_management_plan - document_management - \ No newline at end of file diff --git a/documents/procedures/administration/information_management/information_management_plan.rst b/documents/procedures/administration/information_management/information_management_plan.rst deleted file mode 100644 index 5115672..0000000 --- a/documents/procedures/administration/information_management/information_management_plan.rst +++ /dev/null @@ -1,126 +0,0 @@ -Information Management Plan -=========================== - -Purpose -------- -This plan is the highest-level description of how information is -managed at |inst|. It defines the information management requirements -and explains the chosen processes and tools that meet the requirements. - -Scope ------ -This plan applies to creation, storage, exchange, and retirement of project -information related to |project|. This includes plant configuration management -data as defined in :cite:p:`barrieroInformationManagementProcess2010`. The plan -is not limited to information affecting quality, it also includes business -information. - - -Background ----------- -The potential benefits of the digital transformation are well known across all -business sectors. Numerous commercial nuclear information management studies -have further suggested that properly implemented information management can improve -efficiency and quality while reducing costs -:cite:p:`halpinInformationManagementNuclear1978d,agencyInformationTechnologyNuclear2010,barrieroInformationManagementProcess2010,renuartAdvancedNuclearTechnology2014` -In addition, management of information related to product quality is subject -to nuclear quality regulations in all jurisdictions. - -Requirements ------------- - -.. req:: Quality-related information shall be managed in accordance with 10 CFR 50 Appendix B - :id: R_INFO_APPB - :links: R_10CFR50_APPB - :tags: quality - :basis: Compliance with Appendix B is necessary for licensing - - Note that non-quality related information is not necessarily subject - to this requirement. - -.. req:: A data dictionary shall be maintained defining controlled data - :id: R_DATA_DICT - :basis: It will provide a central reference for all project members to - find specific, precise, and up-to-date data definitions to enable - unambiguous communications and collaboration. - - The dictionary shall define data types, data fields, constraints on the - fields, relationships between the data, source, sensitivity, usage, - owner/steward, sample values, and transformation logic, as applicable. It - shall be revision controlled such that changes can be clearly seen and - remembered. - -.. req:: Data shall be managed such that data exchanges and transformations between - parties and systems can be readily automated - :id: R_DATA_EXCHANGE - :basis: Over the project life, numerous parties and systems will ramp up - and down due to changing relationships and technologies. Automated data - exchanges are expected to improve the ease, cost, speed, and quality of - the inevitable exchanges and transformations. - - This effectively requires rich data import and export capabilities - in each tool used to manage data. - -.. req:: Data shall be subject to role-based access controls (RBAC) or stronger - :id: R_DATA_ACCESS - :basis: Role-based access control (RBAC) is a strong standard - covering the needs of commercial nuclear information - from export control and business sensitivity perspectives. - - More sensitive data, such as Security Related Information, - may use stronger access controls such as ABAC or MAC. - -Implementation --------------- -This section defines the specific implementation of the requirements. - -General principles -^^^^^^^^^^^^^^^^^^ -A hub data architecture has been chosen for this project, based on -arguments and experiences in :cite:p:`agencyInformationTechnologyNuclear2010`. - -.. figure:: /_static/data-hub.png - - Hub architecture, from :cite:p:`agencyInformationTechnologyNuclear2010` - -This is designed to enable rapid integration of a wide variety of partner -organizations, specialized information management tools, and engineering/design -tools while striving to future-proof the multi-decade project. - -The underlying data layer consists of: - -* Structured text (e.g. YAML, XML, JSON) controlled in version-controlled repositories -* Databases (e.g. Postgres) -* Documents/drawings (PDFs, native files, HTML) stored on corporate drives and managed - by the Records Management/Document Control system -* Technical data (3D models, simulation input/output, laser scans, schedule dumps) stored - on corporate drives, managed by the Technical Data Management system - -Above the data layer sits the data authoring and manipulation layer, which includes: - -* Office tools: word processors, spreadsheets, text editors, IDEs, etc., including - online collaboration tools -* PM tools: Primavera P6, HR tools -* Engineering tools: SolidWorks, ANSYS, CASMO, MCNP, Intergraph, Revit -* Construction tools -* Maintenance tools - -One-way or bidirectional data exchanges between tools and institutions occur -through the API, which reads the data layer and presents data representations to -authorized users or services in clearly-defined formats over the network. - -.. _info-mgmt-data-dict: - -Data Dictionary -^^^^^^^^^^^^^^^ -The data dictionary is defined and maintained as described in -:need:`I_DATA_DICT`. - -The data dictionary itself is located at :ref:`data-dict`. - - -.. insert render of the data dictionary table here. - -Technology stack -^^^^^^^^^^^^^^^^ -.. insert render of the IT systems table here. diff --git a/documents/project/index.rst b/documents/project/index.rst index cc2a09b..0724714 100644 --- a/documents/project/index.rst +++ b/documents/project/index.rst @@ -1,10 +1,11 @@ -.. _projects: +.. thinking of putting like, all the calcs you have to do during design, + calibrations during commissioning, + work during operations. This would be where we could be like, "Hey don't forget + to include impurities in shielding calculations" -################## -Project Management -################## .. toctree:: + :glob: + :maxdepth: 2 - schedule - mddl \ No newline at end of file + * \ No newline at end of file diff --git a/documents/project/mddl.rst b/documents/project/mddl.rst deleted file mode 100644 index dcc6577..0000000 --- a/documents/project/mddl.rst +++ /dev/null @@ -1,50 +0,0 @@ -Master Document and Delivery List -################################# - -The MDDL is a comprehensive, hierarchical catalog of all the documents and files -related to the test reactor project. The MDDL includes information such as -document number, revision, title, and maturity status (e.g., completion -percentage). This serves as a central reference point, providing an overview of -the project's documentation and helping team members find relevant and -up-to-date information. Furthermore, the MDDL helps maintain organization, -control, and versioning of documents. - -* Program Deliverables - * Full set of QA program procedures, including engineering and - configuration management - -* Design Deliverables - * Design basis events and plant functions - * System Design Documents for all systems (including requirements/functions, - equipment list, parameters) - * Long-lead equipment list - * Probabilistic Risk Assessment (PRA) model - * General arrangement (3D model and extracted drawings) - * P&ID drawings for all systems - * Engineering simulator software - * Training simulator software - * Plant safety model (i.e. RELAP) - * Core models (i.e. CASMO/SIMULATE) - * Shielding models (i.e. MCNP) - * Balance of plant model (i.e. Flownex) - * Fuel performance models (i.e. FRAPCON and FRAPTRAN) - * Pipe restraint models - * Equipment models (i.e. SolidWorks) - * Civil/structural models, coupled to ship motions (i.e. ANSYS) - * Emergency planning documents - * Plant Technical Specifications - * Plant Operating Procedures - -* Project Management Deliverables - * Resource-loaded schedule out to TTP operation - * Bottoms-up parametric cost estimate model and reports - -* Regulatory Submittals - * Topical Reports - * Argument that Ship is the site (and so construction can occur before CP) - * Argument that TTP is a prototype reactor (104 license option) - * Argument - * Construction Permit Application - * Preliminary Safety Analysis Reports - * Environment Report - * Operating Permit Application \ No newline at end of file diff --git a/documents/project/projects.yaml b/documents/project/projects.yaml deleted file mode 100644 index 30382ae..0000000 --- a/documents/project/projects.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Data regarding the various project the company is working on. -# The abbreviations drive document/record numbering -projects: - - name: Training and Test Platform - abbrev: TTP - description: > - The FOAK AMS nuclear ship, owned and operated by AMS and used for training and testing. - Effectively it produces the information validating the commercial readiness of - higher-powered ships. It's intended to be of the same design as the commercial - power plant, but may be downrated e.g. from 50 MWe to 10 MWe. - - name: Integration Shipyard - abbrev: ISH - description: > - The shipyard that integrates nuclear power plants into pre-built ships. - - name: Nuclear Ready - abbrev: NRDY - description: > - Standard interfacing information that defines how to make a ship that can - easily be outfitted with a nuclear power system at the Integration - Shipyard. diff --git a/documents/project/risks.rst b/documents/project/risks.rst deleted file mode 100644 index 86729f7..0000000 --- a/documents/project/risks.rst +++ /dev/null @@ -1,4 +0,0 @@ - -Risk Register -############# - diff --git a/documents/project/schedule.rst b/documents/project/schedule.rst index 9afc10d..baa1d10 100644 --- a/documents/project/schedule.rst +++ b/documents/project/schedule.rst @@ -1,12 +1,4 @@ -.. raw:: latex +Project Schedule +################ - \begin{landscape} - -Milestone Schedule -################## - -.. schedule:: _data/schedule.yaml - -.. raw:: latex - - \end{landscape} \ No newline at end of file +.. schedule:: _data/schedule.yaml \ No newline at end of file diff --git a/documents/requirements/index.rst b/documents/requirements/index.rst index b93c569..2612ffe 100644 --- a/documents/requirements/index.rst +++ b/documents/requirements/index.rst @@ -12,8 +12,7 @@ bases. :caption: Contents: :glob: - stakeholder/ams + stakeholder/index national/index - industry/* standards/index - + * diff --git a/documents/requirements/industry/epri_alwr_urd.rst b/documents/requirements/industry/epri_alwr_urd.rst deleted file mode 100644 index 85b24de..0000000 --- a/documents/requirements/industry/epri_alwr_urd.rst +++ /dev/null @@ -1,26 +0,0 @@ -EPRI ALWR -========= - -This page contains some high-level requirements sourced from the EPRI Advanced -Light Water Reactor Utility Requirements Document :cite:p:`mprAdvancedLightWater99a` - -.. req:: Passive ALWRs shall not require safety-related AC electric power other than - inverter supplied AC power for I&C. - :id: R_ALWR_1 - -.. req:: 72-hours without manual operator action. - :id: R_ALWR_2 - - For transients and accidents analyzed under the initiating event plus - single failure Licensing Design Basis assumptions (which include loss of all ac - power), no credit for manual operator action shall be necessary to meet core - protection regulatory limits for at least 72 hours following initial indication - of the need for action (i.e., approximately the time of the initiating event). - -.. req:: Only simple actions and assistance shall be necessary beyond 72 hours - :id: R_ALWR_3 - -.. req:: Off-site dose limits shall be maintained for at least 72 hours without - the need for off-site assistance. - :id: R_ALWR_4 - diff --git a/documents/requirements/national/USA/10cfr50.rst b/documents/requirements/national/USA/10cfr50.rst index d7734ec..1354882 100644 --- a/documents/requirements/national/USA/10cfr50.rst +++ b/documents/requirements/national/USA/10cfr50.rst @@ -24,7 +24,7 @@ and `RG 1.232 `_. .. needtable:: Appendix A summary :filter: id.startswith("R_GDC") - :columns: id, title + :columns: id .. include:: /generated_assets/10-cfr-50-app-a-list.rst diff --git a/documents/requirements/stakeholder/ams.rst b/documents/requirements/stakeholder/ams.rst deleted file mode 100644 index a377924..0000000 --- a/documents/requirements/stakeholder/ams.rst +++ /dev/null @@ -1,77 +0,0 @@ -AMS Stakeholder Requirements -============================ - -The following requirements come from the Project Leaders at AMS. - -.. req:: Develop a new nuclear maritime industry. - :id: R_PROJ_1959 - -.. req:: The first vessel shall have an electrical power output range of 750 kWe -- 16 MWe. - :id: R_AMS_1 - :links: R_PROJ_1959 - :basis: Appropriate for commercial demonstrator, per Axioms - - :cite:p:`cahillDesignPhilosophyBrief2025` - -.. req:: The electrical backbone shall operate at 6.6 kVAC at 60 Hz - :id: R_AMS_2 - :links: R_PROJ_1959 - :basis: Maximum commercial compatibility, per Axioms - - :cite:p:`cahillDesignPhilosophyBrief2025` - -.. req:: The demonstrator vessel shall be in the water by the Summer of 2030. - :id: R_AMS_3 - :links: R_PROJ_1959 - :basis: Desire to move fast, per Axioms - - :cite:p:`cahillDesignPhilosophyBrief2025` - -.. req:: The demonstrator vessel shall be flagged to US standards. - :id: R_AMS_4 - :links: R_PROJ_1959 - :basis: Per Axioms - - :cite:p:`cahillDesignPhilosophyBrief2025` - -.. req:: The demonstrator vessel architecture shall be grounded in IMO Resolution A.491(XII) - :id: R_AMS_5 - :links: R_PROJ_1959 - :basis: Per Axioms document - - Document reference: :cite:p:`imoCodeSafetyNuclear1982`. - - Source :cite:p:`cahillDesignPhilosophyBrief2025`. - -.. req:: The demonstrator vessel shall facilitate future augmentation of the power system - without massive amounts of construction. - :id: R_AMS_6 - :links: R_PROJ_1959 - :basis: Desire for drop-in nuclear-readiness, per Axioms - - :cite:p:`cahillDesignPhilosophyBrief2025` - -.. req:: The reactor system shall comply with Passive Plant safety requirements defined in EPRI ALWR URD - :id: R_AMS_7 - :basis: This satisfies modern customer expectations of safety - :links: R_PROJ_1959, R_ALWR_1, R_ALWR_2, R_ALWR_3, R_ALWR_4 - - The EPRI ALWR URD is :cite:p:`mprAdvancedLightWater99a` - -.. req:: The vessel shall have a double bottom for the full length of the ship - :basis: USCG per 3.1.2.2 of CNSG status - -.. req:: The vessel shall have two compartment subdivision - :basis: USGC and AMS rules, per 3.1.2.2 and 3.1.2.3 of CNSG status - - - -.. needflow:: Engineering plan to develop a nuke on a ship - :alt: Engineering plan - :root_id: R_PROJ_1959 - :config: lefttoright - :show_link_names: - :border_color: - [status == 'open']:FF0000, - [status == 'in progress']:0000FF, - [status == 'closed']:00FF00 \ No newline at end of file diff --git a/documents/safety/design-basis-events.rst b/documents/safety/design-basis-events.rst deleted file mode 100644 index 28aceba..0000000 --- a/documents/safety/design-basis-events.rst +++ /dev/null @@ -1,32 +0,0 @@ -Design Basis Events -=================== - -The Plant is designed to maintain fuel integrity in the following -design basis events: - -External events ---------------- - -* Collision -* Grounding -* Flooding -* Sinking -* Heavy weather -* Fire -* Explosion -* Earthquake - -Internal events ---------------- -* Fire -* Large-break LOCA -* Medium-break LOCA -* Small-break LOCA -* Transient overpower -* Station blackout - -Key safety functions --------------------- -* Maintain subcriticality, even inverted -* Maintain decay heat removal, even inverted -* Self-transport to remote anchoring location diff --git a/pyproject.toml b/pyproject.toml index f8c18a0..6bc7229 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [project] -name = "ams-reactor-starter-kit" +name = "nuclear-reactor-starter-kit" version = "0.1.0" authors = [ { name="Nick Touran", email="nick@whatisnuclear.com" }, @@ -9,7 +9,7 @@ description = """\ and tools supporting efficient nuclear energy endeavors.\ """ readme = "README.md" -requires-python = ">=3.12" +requires-python = ">=3.9" dependencies = [ "openpyxl", "pyyaml", @@ -24,7 +24,7 @@ dependencies = [ "sphinxcontrib-bibtex >= 2.6.1", "sphinxcontrib-glossaryused @ git+https://github.com/partofthething/glossaryused@bb321e6581b4c0618cd6dc4f1fd8355d314aee4d", "sphinx-autobuild", - "sphinxcontrib-datatemplates", + "sphinxcontrib.datatemplates", "sphinxcontrib-mermaid", "sphinxcontrib-svg2pdfconverter", "sphinx-timeline", @@ -32,18 +32,6 @@ dependencies = [ "matplotlib", "pandas", "jpype1", - "ruamel-yaml>=0.18.16", - "pydantic>=2.12.5", - "sphinx-autodoc-typehints>=3.5.2", - "email-validator>=2.3.0", - "sphinxcontrib-apidoc>=0.6.0", - "autodoc-pydantic>=2.2.0", - "sqlmodel>=0.0.31", - "fastapi>=0.128.0", - "uvicorn>=0.38.0", - "python-dotenv>=1.2.1", - "psycopg2>=2.9.11", - "alembic>=1.17.2", ] classifiers = [ "Programming Language :: Python :: 3", @@ -93,25 +81,3 @@ include_trailing_comma = true force_grid_wrap = 0 line_length = 88 profile = "black" - -[dependency-groups] -dev = [ - "ipython>=8.18.1", - "pytest>=9.0.2", -] - -[tool.alembic] -script_location = "%(here)s/alembic" -# file_template = "%%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s" - -prepend_sys_path = [ - "." -] - -[[tool.alembic.post_write_hooks]] -name = "ruff" -type = "module" -module = "ruff" -options = "check --fix REVISION_SCRIPT_FILENAME" - - diff --git a/src/nrsk/__init__.py b/src/nrsk/__init__.py index d99be3a..e69de29 100644 --- a/src/nrsk/__init__.py +++ b/src/nrsk/__init__.py @@ -1,7 +0,0 @@ -"""NRSK root.""" - -from pathlib import Path - -PACKAGE_ROOT = Path(__file__).resolve().parent -PROJECT_ROOT = PACKAGE_ROOT.parent.parent -DOCS_ROOT = PACKAGE_ROOT.parent.parent / "documents" diff --git a/src/nrsk/db.py b/src/nrsk/db.py deleted file mode 100644 index b6e44ea..0000000 --- a/src/nrsk/db.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Database management code.""" - -import os - -from dotenv import load_dotenv -from sqlmodel import Session, SQLModel, create_engine, select - -load_dotenv() - -POSTGRES_USER = os.getenv("POSTGRES_USER") -POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD") -POSTGRES_PATH = os.getenv("POSTGRES_PATH") - - -def get_engine(): - return create_engine( - f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_PATH}", - ) diff --git a/src/nrsk/documents/__init__.py b/src/nrsk/documents/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/nrsk/documents/intake.py b/src/nrsk/documents/intake.py deleted file mode 100644 index 06c8d8a..0000000 --- a/src/nrsk/documents/intake.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Document data intake.""" - -import os -from contextlib import asynccontextmanager - -from fastapi import FastAPI, Request -from fastapi.responses import HTMLResponse -from sqlmodel import Session, SQLModel, select - -from nrsk.db import get_engine - -# import others to create DB? -from nrsk.models import Document, User - -engine = get_engine() - - -@asynccontextmanager -async def lifespan(app: FastAPI): - # --- Startup Logic --- - - yield # The app runs while it's "yielding" - - # --- Shutdown Logic --- - print("Shutting down safely") - - -app = FastAPI(lifespan=lifespan) - - -@app.get("/schema") -def get_schema(): - # This generates the JSON Schema from your SQLModel/Pydantic model - return Document.model_json_schema(mode="serialization") - - -@app.post("/submit") -def submit_data(data: Document): - with Session(engine) as session: - breakpoint() - data = Document.model_validate(data) - session.add(data) - session.commit() - return {"status": "success", "id": data.id} - - -@app.get("/documents/") -def read_documents(skip: int = 0, limit: int = 10): - with Session(engine) as session: - statement = select(Document).offset(skip).limit(limit) - results = session.exec(statement).all() - return results - - -@app.get("/", response_class=HTMLResponse) -def get_form(): - return """ - - - - QA Entry Form - - - - -

Submit QA Revision

-
- - - - - - """ diff --git a/src/nrsk/documents/seed_doc_db.py b/src/nrsk/documents/seed_doc_db.py deleted file mode 100644 index 014919a..0000000 --- a/src/nrsk/documents/seed_doc_db.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Seed DB for documents, e.g. with doc types""" - -from sqlmodel import Session - -from nrsk import DOCS_ROOT -from nrsk.db import get_engine -from nrsk.documents.validate import validate_doc_types - - -def seed_doc_types(): - engine = get_engine() - doc_types = validate_doc_types(DOCS_ROOT / "_data" / "doc-types.yaml") - - with Session(engine) as session: - for dtype in doc_types: - session.add(dtype) - - session.commit() - - -if __name__ == "__main__": - seed_doc_types() - print("seeded doc types") diff --git a/src/nrsk/documents/validate.py b/src/nrsk/documents/validate.py deleted file mode 100644 index 438e129..0000000 --- a/src/nrsk/documents/validate.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -Validate document data during build. - -In particular, check doc types. -""" - -import pathlib - -import yaml - -from nrsk.models import InformationTypes - - -def sphinx_validate_doc_types(app) -> dict: - """Ensure doc type data is valid.""" - fpath = pathlib.Path(app.srcdir) / "_data" / "doc-types.yaml" - return validate_doc_types(fpath) - - -def validate_doc_types(fpath: str) -> dict: - with open(fpath) as f: - data = yaml.safe_load(f) - return InformationTypes.validate_python(data) diff --git a/src/nrsk/models.py b/src/nrsk/models.py deleted file mode 100644 index e0fb9db..0000000 --- a/src/nrsk/models.py +++ /dev/null @@ -1,666 +0,0 @@ -""" -Define the Data Dictionary. - -Implementation of Data Dictionary -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. impl:: Maintain the Data Dictionary base data using Pydantic - :id: I_DATA_DICT - :links: R_DATA_DICT - - The data dictionary is managed using Pydantic. Pydantic allows for - concise Python code to richly define data models and their fields. From a single - class definition, it provides data validation, automatic rich documentation (via - automatic a Sphinx plugin), an integration with FastAPI for data exchange, and - relatively easy integration with sqlalchemy for database persistence. Changes to - the schema can be managed and controlled via the revision control system, and - changes to a single source (the Python code) will automatically propagate the - rendered documentation and, potentially the database (e.g. using *alembic*) - - Using SQLAchemy as the database engine enables wide flexibility in underlying - database technology, including PostgreSQL, MySQL, SQLite, Oracle, and MS SQL - Server. Pydantic models allows us to validate data loaded from a database, - directly from structured text file, or from JSON data delivered via the network. - -Analysis of Alternatives -^^^^^^^^^^^^^^^^^^^^^^^^ -SQLModel :cite:p:`SQLModel` was considered as the data layer base, but it was -determined to be less mature than pydantic and sqlalchemy, with inadequate -documentation related to field validation. It was determined to use Pydantic -directly for schema definitions. - -.. _data-dict: - -Data Dictionary -^^^^^^^^^^^^^^^ -This is the official Data Dictionary discussed in :ref:`the Information -Management Plan `. -""" - -import re -from datetime import datetime, timedelta -from enum import StrEnum -from typing import Annotated, Any, Optional -from uuid import UUID, uuid4 - -# _PK_TYPE = UUID -# moving away from UUID at least temporarily b/c SQLite doesn't -# really support it, which makes adding new data via DBeaver harder -_PK_TYPE = int - -from pydantic import ( - AnyUrl, - BaseModel, - ConfigDict, - EmailStr, - PositiveInt, - TypeAdapter, - ValidationError, - computed_field, - field_validator, - model_validator, -) -from sqlalchemy import text -from sqlmodel import JSON, Column, Field, Relationship, SQLModel - -ALL_CAPS = re.compile("^[A-Z]$") - - -class NRSKModel(SQLModel): - id: _PK_TYPE = Field( - # default_factory=uuid4, - description="The unique ID of this object. Used as a primary key in the database.", - primary_key=True, - # schema_extra={ - # "examples": ["3fa85f64-5717-4562-b3fc-2c963f66afa6"], - # }, - ) - - -class DocumentUserLink(NRSKModel, table=True): - """Linkages between users and documents.""" - - position: int = Field(default=0) - """Integer indicating order of people""" - - role_note: str = Field( - default="", - ) - """Extra information about role such as 'lead' or 'section 2.4'""" - - document_id: _PK_TYPE | None = Field( - foreign_key="document.id", primary_key=True, default=None - ) - user_id: _PK_TYPE | None = Field( - foreign_key="user.id", primary_key=True, default=None - ) - - -class User(NRSKModel, table=True): - """A person involved in the Project.""" - - given_name: str - family_name: str - preferred_name: str | None = None - previous_name: str | None = None - email: EmailStr - joined_on: datetime | None - deactivated_on: datetime | None - organization: str | None - title: str | None - - contributed: list["Document"] = Relationship( - back_populates="contributors", link_model=DocumentUserLink - ) - - -class OpenItem(NRSKModel): - name: str - status: str - created_on: datetime - closed_on: datetime | None = None - - -class SSC(NRSKModel): - """ - A Structure, System, or Component in the plant. - - This is a generic hierarchical object that can represent plants, units, - buildings and their structures, systems, subsystems, components, - subcomponents, etc. - - A physical tree of buildings/structures/rooms may have overlapping - contents in terms of systems/components/equipment/parts - """ - - name: str - pbs_code: str | None = Field( - description="An integer sequence that determines the 'system number' and also the ordering in printouts", - schema_extra={ - "examples": ["1.2.3", "20.5.11"], - }, - default="", - ) - """PBS code is tied closely to the structure of the PBS, obviously. If 1.2 - is a category level, that's ok, but that doesn't imply that the second level - of PBS 2 is also a category level; it may be systems. - Since this can change in major PBS reorganizations, it should not be used - for cross-referencing (use ID). - """ - - abbrev: str = Field( - description="A human-friendly abbreviation uniquely defining the system" - ) - parent: Optional["SSC"] = None - functions: list[str | None] = Field( - description="Functions of this system", default=None - ) - - @field_validator("abbrev", mode="after") - @classmethod - def abbrev_must_be_all_caps(cls, v: str) -> str: # noqa: D102 - if not re.match(ALL_CAPS, v): - raise ValueError("{v} must be all CAPS") - - @field_validator("pbs_code", mode="after") - @classmethod - def pbs_must_be_int_sequence(cls, v: str) -> str: # noqa: D102 - if not v or re.match(r"^(\d+\.?)+$", v): - raise ValueError(f"{v} must be an integer sequence, like 1.2.3") - - -class SystemsList(BaseModel): - """A flat list of Systems in the plant. - - Can be used e.g. to render a snapshot of the Master Systems List. - - Does not include categories like "Nuclear Island" or "Primary Systems". - - We may want another structure that represents the whole tree in a - well-defined manner, or we may want to add a 'path' attr - to systems that define where they live. - """ - - systems: list[SSC] - - -class ParamDef(NRSKModel): - """A parameter class defining an aspect of plant design.""" - - name: str = Field( - schema_extra={"examples": ["Nominal gross power"]}, - ) - """Name of the parameter class.""" - description: str - """Detailed description of what parameters of this type represent""" - - valid_units: list[str | None] = Field( - schema_extra={"examples": ["MW", "W", "shp"]}, default=None - ) - """List of units allowed""" - - -class ParamVal(NRSKModel): - """A particular value of a Parameter, assigned to a particular SSC.""" - - ssc: SSC - pdef: ParamDef - value: str - units: str | None = None - pedigree: str = Field( - description="Indication of how well it is known (rough estimate, final design, as-built)." - ) - source: str = Field(description="Where this version of the value came from") - - -class ITSystem(NRSKModel): - """An IT system used by the project.""" - - name: str - vendor: str - version: str | None = None - use_cases: list[str] = Field( - schema_extra={ - "examples": [ - [ - "Document management", - ] - ], - } - ) - """One or more use cases this system is used for.""" - - physical_location: str = Field(description="Where the system is physically located") - url: AnyUrl | None = Field(description="Full URL to the system", default=None) - custodian: User | None = Field( - description="Person currently in charge of system", default=None - ) - launched_on: datetime | None = None - retired_on: datetime | None = None - quality_related: bool - - -class InformationType(NRSKModel, table=True): - """A type/kind/class of Information, Document, or Record.""" - - model_config = ConfigDict(extra="forbid") - - name: str - abbrev: str - examples: list[str] | None = Field( - default=None, - sa_column=Column(JSON), - ) - description: str = "" - retention: str | None = "" - record: bool = True - use_cases: str = "" - notes: str = "" - parent_id: _PK_TYPE | None = Field(default=None, foreign_key="informationtype.id") - # Add these two relationships for easier DB parsing in code - parent: Optional["InformationType"] = Relationship( - back_populates="subtypes", - sa_relationship_kwargs={"remote_side": "InformationType.id"}, - ) - subtypes: list["InformationType"] = Relationship(back_populates="parent") - - -InformationTypes = TypeAdapter(list[InformationType]) -"""A list of document types.""" - - -class Document(NRSKModel, table=True): - """ - Data dictionary entry for Documents and Records. - - Document data is designed to satisfy the needs defined in :ref:`rmdc-proc`. - - See Also - -------- - * Some of the field definitions come from CFIHOS - https://www.jip36-cfihos.org/wp-content/uploads/2023/08/v.1.5.1-CFIHOS-Specification-Document-1.docx - * ISO-19650 has different Status Codes defining suitability level (for information, as-built) - https://ukbimframework.org/wp-content/uploads/2020/05/ISO19650-2Edition4.pdf - """ - - class STATUS(StrEnum): - """Document Status options.""" - - # Much of the wording here comes from cloverDocumentControlRecords2010. - - # NOTE: if you add or remove a status, be sure to also update the - # category property below AND :ref:`rmdc-doc-status`! - - ## Not Yet Approved: - RESERVED = "RESERVED" - """ - A Document ID has been assigned, but the document is in development or - has not yet been started (default). - """ - - IN_PROGRESS = "IN PROGRESS" - """One or more authors are creating or revising the document.""" - - IN_REVIEW = "IN REVIEW" - """A completed draft of the document has been submitted and is pending review.""" - - REJECTED = "REJECTED" - """A draft that was rejected by the review team and may be revised and resubmitted.""" - - AUTHORIZED = "AUTHORIZED" - """A controlled revision that has been signed but is not yet effective. - Such documents may be used for training, etc. Documents with this status may - be used for plant modifications in a work package, but not for normal operations.""" - - REFERENCE = "REFERENCE" - """Document is stored in EDMS for ease of access and reference, but - there is no assertion that the information is the latest available. - Useful for Standards, engineering handbook excerpts, vendor notices.""" - - NATIVE = "NATIVE" - """A document file that may be in EDMS in the native file format. Not - used in the field because they (a) may require special software to view - and (b) may not be controlled for field use (i.e. not quarantined if - errors are discovered).""" - - ## Approved: - APPROVED = "APPROVED" - """A document revision that has been submitted by the releasing - organization and that is authorized for the use case defined in - the suitability code. - - * A drawing with this status during operation reflects the plant configuration - * A drawing with this status before or during construction reflects that it is - ready to be fabricated/built - * A procedure with this status is effective. - """ - - ## No longer Approved: - QUARANTINED = "QUARANTINED" - """(On hold, Suspended) A document revision that was previously - authorized and has been placed on hold, e.g. a procedure that cannot be - performed as written or a design that is known to have pending changes.""" - - SUPERSEDED = "SUPERSEDED" - """A document that has been replaced by another document. The new - document is to be recorded in the index.""" - - REVISED = "REVISED" - """A document that has been replaced by a subsequent revision of that - document.""" - - VOIDED = "VOIDED" - """A document or revision that is no longer needed and there is no - revision or superseding document. This would also be used for documents - that have reached a predetermined expiration date, such as a temporary - procedure.""" - - CLOSED = "CLOSED" - """(Archived) A document for which the work has been completed.""" - - @property - def category(self) -> str: - """High-level status category: Not yet approved, Approved, or No Longer Approved.""" - if self.value in { - self.RESERVED, - self.IN_PROGRESS, - self.IN_REVIEW, - self.REJECTED, - self.AUTHORIZED, - self.REFERENCE, - self.NATIVE, - }: - return "Not Yet Approved" - if self.value in {self.APPROVED}: - return "Approved" - return "No Longer Approved" - - class USAGE(StrEnum): - """Usage options. - - Usage governs what use cases a document may be used for. It is a notion - derived from the ISO 19650 'suitability' idea, but used in combination - with the NIRMA status codes. It allows a document to be approved for - e.g. a conceptual design stage without letting it inadvertently be - released for bid or manufacture. Releasing organizations can update the - suitability as needed. - - See https://ukbimframework.org/wp-content/uploads/2020/09/Guidance-Part-C_Facilitating-the-common-data-environment-workflow-and-technical-solutions_Edition-1.pdf - """ - - FOR_INFORMATION = "FOR INFORMATION" - """A document revision that may be used for information only, not for - any contractual purpose.""" - - FOR_STAGE_APPROVAL = "FOR STAGE APPROVAL" - """A document revision that is considered complete for the contractual stage in - which it was created. For example, in a Preliminary Design phase, this - usage would indicate that it is at the expected usage level for - preliminary design. Most design-phase documents that are not yet ready - for bid or construction will be marked for this usage.""" - - FOR_BID = "FOR BID" - """A document revision that is ready to be sent to external parties for bid. - During the bid process, changes may be expected based on vendor feedback.""" - - FOR_CONSTRUCTION = "FOR CONSTRUCTION" - """A document revision that is ready to be sent to the field for manufacture, - fabrication, construction. An approved document with this usage implies - that all the quality, regulatory, and design aspects are in place, and - that work can proceed. However, what is constructed is not yet - authorized for operation.""" - - FOR_OPERATION = "FOR OPERATION" - """A document revision that can be used to operate the business and/or plant. - Procedures of this usage may be used to do work or operate equipment.""" - - AS_BUILT = "AS BUILT" - """A document revision that is an as-built record of construction or manufacture. - Documents of this usage may be used to operate the plant.""" - - class RETENTION(StrEnum): - """Retention plan options. - - Retention plans define how long the document or record is to be - kept before it is destroyed. - - .. note:: May want this to actually be a timedelta - - """ - - LIFETIME = "LIFETIME" - """Lifetime of the plant.""" - - # use_attribute_docstrings allows us to just use docstrings and get - # the same info in both the JSON Schema and also the Sphinx render - model_config = ConfigDict(use_attribute_docstrings=True) - - number: str - """The identification number meeting the document numbering rules""" - - title: str = Field( - schema_extra={ - "examples": ["CNSG Development and Status 1966-1977"], - }, - ) - """Descriptive title explaining the contents""" - - revision: str = Field( - schema_extra={ - "examples": ["0", "1", "1a", "A"], - }, - ) - """Revision code""" - - originating_organization_id: _PK_TYPE | None = Field( - foreign_key="organization.id", - description="The organization that owns or issued this document", - default=None, - ) - # This allows you to do `my_document.orginating_organization` in Python - originating_organization: "Organization" = Relationship() - - originator_number: str | None = None - """The originating organization's document number (if originated externally).""" - - originator_revision: str | None = None - """The originating organization's revision code (if originated externally).""" - - type_id: _PK_TYPE = Field( - foreign_key="informationtype.id", - description="The ID of the InformationType", - ) - # type: "InformationType" = Relationship() - - contributors: list[User] = Relationship( - back_populates="contributed", - link_model=DocumentUserLink, - sa_relationship_kwargs={ - "order_by": "DocumentUserLink.position", - "lazy": "selectin", - }, - ) - """Holds all relationships with users but does not show up in JSON Schema""" - - @computed_field - @property - def authors(self) -> list[User]: - """List of author info for the UI.""" - return [{"id": a.id, "name": a.name} for a in self.contributors] - - @computed_field - @property - def reviewers(self) -> list[User]: - """List of reviewer info for the UI.""" - return [ - {"id": a.id, "name": a.name} - for a in self.contributors - if a.role == "reviewer" - ] - - # revision_reviewers: list[RevisionReviewerLink] = Relationship( - # back_populates="reviewed", - # link_model=RevisionReviewerLink, - # sa_relationship_kwargs={ - # "order_by": "RevisionReviewerLink.position", - # "cascade": "all, delete-orphan", - # }, - # ) - # """The reviewer(s), if any.""" - - # revision_approvers: list[RevisionApproverLink] = Relationship( - # back_populates="approved", - # link_model=RevisionApproverLink, - # sa_relationship_kwargs={ - # "order_by": "RevisionApproverLink.position", - # "cascade": "all, delete-orphan", - # }, - # ) - # """The approver(s), if any.""" - - revision_comment: str | None = None - """Explanation of what changed in this revision""" - - status: STATUS = STATUS.RESERVED - usage: USAGE = USAGE.FOR_INFORMATION - retention_plan: RETENTION = RETENTION.LIFETIME - restriction_codes: str = Field( - description="Markings for export control, legal, etc.", default="" - ) - - actual_reviewed_date: datetime | None = None - actual_approved_date: datetime | None = None - - # filenames may be empty at first, i.e. for RESERVED docs - filenames: list[str] = Field( - description="Filenames of files attached to this Document. Main file should be the first.", - default_factory=list, - sa_column=Column(JSON, nullable=False, server_default=text("'[]'")), - ) - file_notes: list[str] = Field( - description="Short description of each file represented in filenames.", - default_factory=list, - sa_column=Column(JSON, nullable=False, server_default=text("'[]'")), - ) - checksums: list[str] = Field( - description="SHA-256 checksum of each file for data integrity", - default_factory=list, - sa_column=Column(JSON, nullable=False, server_default=text("'[]'")), - ) - """Checksums are used to verify long-term data integrity against tampering - and data degradation. While BLAKE3 checksums are faster, SHA-256 is more standard - and built-in at this point. In the future, switching to BLAKE3 may make sense for - easier periodic re-verification of large data libraries.""" - - physical_location: str | None = Field( - description="Location of a media (only valid when not stored as an electronic file).", - default=None, - ) - notes: str = Field( - description="Additional information about the Document/Record", default="" - ) - - @computed_field - @property - def status_category(self) -> str: - """The top-level status category, derived from Document Status""" - return self.status.category - - @model_validator(mode="after") - def cant_have_electronic_and_physical_location(self) -> "Document": # noqa: D102 - has_physical_location = self.physical_location is not None - has_file = self.filenames is not None - - if has_physical_location and has_file: - raise ValueError( - "Cannot provide both physical_location and filename(s). They are mutually exclusive." - ) - - return self - - -class Organization(NRSKModel, table=True): - """An organization of people: companies, departments, governments, etc.""" - - name: str = Field(index=True) - """Organization Name""" - - abbreviation: str | None = Field(default=None, index=True) - website: str | None = None - is_active: bool = Field(default=True) - - # allow it to be hierarchical to capture full org trees and refer to - # divisions - parent_id: _PK_TYPE | None = Field( - default=None, - foreign_key="organization.id", - ) - """The parent organization this org reports to""" - - parent: Optional["Organization"] = Relationship( - back_populates="child_orgs", - sa_relationship_kwargs={"remote_side": "Organization.id"}, - ) - child_orgs: list["Organization"] = Relationship(back_populates="parent") - - -class PredecessorTask(NRSKModel): - """Link to a predecessor task.""" - - class PRED_TYPE(StrEnum): # noqa: N801 - """Predecessor relationship type.""" - - FS = "FS" - """Finish-to-start: predecessor finishes before successor starts (very common)""" - FF = "FF" - """Finish-to-finish: predecessor finishes before successor can finish""" - SS = "SS" - """Start-to-start: predecessor starts before successor starts""" - SF = "SF" - """Start-to-finish: predecessor starts before successor finishes (uncommon, maybe shift change)""" - - id: str - """ID of the predecessor task.""" - type: PRED_TYPE = PRED_TYPE.FS - lag: timedelta | None = Field( - description="Lag time. Negative timedelta implies negative lag " - "(lead time, starts before predecessor ends)", - default=None, - ) - - -class ScheduledTask(NRSKModel): - """Scheduled task, e.g. in P6.""" - - name: str - id: str | None = None - is_milestone: bool = False - predecessors: list[PredecessorTask] = [] - duration: timedelta | None = None - actual_start: datetime | None = None - actual_end: datetime | None = None - scheduled_start: datetime | None = None - scheduled_end: datetime | None = None - - @model_validator(mode="before") - @classmethod - def convert_days_to_duration(cls, data: Any) -> Any: - """Allow input of duration_days, but convert on way in.""" - if isinstance(data, dict): - days = data.get("duration_days") - if days is not None: - data["duration"] = timedelta(days=float(days)) - del data["duration_days"] - return data - - -class ScheduleLane(BaseModel): - """A section of a schedule.""" - - name: str - color: str | None = None - tasks: list[ScheduledTask] - - -ScheduleInput = TypeAdapter(list[ScheduleLane]) -"""A list of lanes, representing full schedule input.""" diff --git a/src/nrsk/plant/__init__.py b/src/nrsk/plant/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/nrsk/plant/load_plant_data.py b/src/nrsk/plant/load_plant_data.py deleted file mode 100644 index 5d2abac..0000000 --- a/src/nrsk/plant/load_plant_data.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Read plant information like systems, equipment, & params from a folder structure. - -This reads it into the standard data structures defined via Pydantic, -which can then be used for any other purpose (reporting, etc.) - -The structure here is path/to/system where the folders define the -functional hierarchy (i.e. plant, 'island', system, subsystem). - -Some files can exist in the hierarchy: - -* System data files *.yaml -* System documents *.rst - -The documents often make use of the data in the yaml file through -system-level (or other) ``datatemplate`` directives, e.g. to print -out a list of System Functions or Parameters. - -This module parses the directory tree and YAML files, combining them into one -big tree of data. - -Future considerations: - -* It may make sense to have ``system.yaml`` (or ``equipment.yaml``) and - ``parameters.yaml`` in each of these folders for longer-term efficient - loading of just the Systems List vs. the entire Equipment List (which - will end up being more efficient in a proper database). Or not... I mean - we could just statically render everything and it'd be pretty performant - during reads. Maybe just have system, equipment, and param info in the - yaml file. - -""" - -import logging -from pathlib import Path - -from ruamel.yaml import YAML - -logger = logging.getLogger(__name__) - - -def load_yaml_tree(root_path: str | Path) -> dict: - """Load a directory tree of files to represent the Plant systems and params.""" - root_path = Path(root_path) - yaml = YAML(typ="safe") - tree = {} - - for root, dirs, files in root_path.walk(): - # Ensure empty folders get included in tree. - current = tree - rel = root.relative_to(root_path) - parts = rel.parts - logger.info(f"loading {parts}") - # drill into the part of the tree where we are - for part in parts: - if part not in current: - current[part] = {} - current = current[part] - for file in files: - if file.endswith(".yaml"): - data = yaml.load(root / file) - current.update(data) - if parts and not current: - current.update({"name": parts[-1]}) - logger.info(f"{current}") - - assign_hierarchical_code(tree) - return tree - - -def assign_hierarchical_code(data, current_path=""): - """ - Traverses a nested dictionary and adds a 'pbs_code' key to every - dictionary at any level of nesting, containing its hierarchical path. - - The dictionary is modified in place. - - Args: - data (dict): The dictionary to traverse and modify. - current_path (str): The current hierarchical path string (e.g., "1.", "2.3."). - """ - if not isinstance(data, dict): - return - - item_counter = 1 - keys_to_process = list(data.keys()) - - for key in keys_to_process: - value = data[key] - - # e.g., if current_path="1.", the next item's number is "1.1" - new_path = f"{current_path}{item_counter}" - - if isinstance(value, dict): - value["pbs_code"] = new_path - assign_hierarchical_code(value, new_path + ".") - item_counter += 1 diff --git a/src/nrsk/plant/plant_data_table.py b/src/nrsk/plant/plant_data_table.py deleted file mode 100644 index 32d973e..0000000 --- a/src/nrsk/plant/plant_data_table.py +++ /dev/null @@ -1,139 +0,0 @@ -"""Sphinx directive that makes tables of Plant Data from the PBS tree. - -Since individual system-level data can be nicely handled with datatemplates, -this custom directive just looks at the whole tree and makes the PBS -structure. - -This is somewhat duplicative of the TOC directive in the Plant folder, -but the automatic sphinx numbering and lack of abbrev is a bit sad. -""" - -import os -from pathlib import Path - -from docutils import nodes -from docutils.parsers.rst.directives.tables import Table -from sphinx.util import logging - -from nrsk.plant.load_plant_data import load_yaml_tree - -logger = logging.getLogger("[plant_data_table]") - - -class PlantBreakdownStructureTable(Table): - """Plant Breakdown Structure Table.""" - - has_content = False - required_arguments = 1 - optional_arguments = 0 - option_spec = { - "start-node": str, - "columns": lambda x: [c.strip() for c in x.split(",")], - "max-depth": int, - "hide-empty": lambda x: True, - } - - def get_default_columns(self): - return ["Path", "Value", "Tags"] - - def run(self): - env = self.state.document.settings.env - pbs_path = Path(env.srcdir) / Path(self.arguments[0]) - logger.info(f"[plant-data-table] Loading data from: {pbs_path}") - - if not pbs_path.exists(): - logger.warning(f"Input data not found: {pbs_path}") - return [nodes.paragraph(text=f"PBS data not found: {pbs_path}")] - - data = load_yaml_tree(pbs_path) - - # Drill down to optional key-path - if "key-path" in self.options: - keys = self.options["key-path"].split(".") - logger.info(f"Using subkey: {keys}") - for k in keys: - data = data[k] - - max_depth = int(self.options.get("max-depth", 10)) - hide_empty = "hide-empty" in self.options - columns = self.options.get("columns") - if not columns: - columns = self.get_default_columns() - - # Build table - table_node = nodes.table() - classes = table_node.get("classes", []) # want table wider: this doesn't work - classes.append("full-width") - table_node["classes"] = classes - tgroup = nodes.tgroup(cols=len(columns)) - table_node += tgroup - - # Header - for _ in columns: - tgroup += nodes.colspec(colwidth=10) - head = nodes.thead() - tgroup += head - row = nodes.row() - for col in columns: - row += nodes.entry("", nodes.paragraph(text=col)) - head += row - - # Body - tbody = nodes.tbody() - tgroup += tbody - - def walk(obj, path="", depth=0): - if depth >= max_depth: - return - if not isinstance(obj, dict): - return - for k, v in obj.items(): - current_path = f"{path}.{k}" if path else k - if hide_empty and self.is_empty(v): - continue - if not isinstance(v, dict): - continue - self.add_row(tbody, columns, current_path, v, depth) - if "functions" not in obj: - # stop if you hit a system with functions - walk(v, current_path, depth + 1) - - walk(data) - - return [table_node] - - def is_empty(self, value): - return value in ({}, [], "", None) - - def add_row(self, tbody, columns, path, value, depth): - """Add a row to the table.""" - row = nodes.row() - indent1 = " " * depth # em spaces for indentation - indent2 = " " * depth * 2 - - cols = [] - cols.append(indent1 + value.get("pbs_code", "")) - cols.append(indent2 + value.get("name", "(noname)")) - cols.append(value.get("abbrev", "")) - cols.append(value.get("desc", "")) - cols.append(value.get("tags", "")) - - for col in cols: - entry = nodes.entry() - para = nodes.paragraph() - para += nodes.Text(col) - entry += para - row += entry - - tbody += row - - -def setup(app): - """Setup for sphinx extension.""" - app.add_directive("plant-data-table", PlantBreakdownStructureTable) - - return { - "version": "0.1", - "parallel_read_safe": True, - "parallel_write_safe": True, - } diff --git a/src/nrsk/plant/plantdata.py b/src/nrsk/plant/plantdata.py deleted file mode 100644 index ebf50b0..0000000 --- a/src/nrsk/plant/plantdata.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Load plant PBS data.""" - -from collections import defaultdict -from pathlib import Path - -from ruamel.yaml import YAML - - -def load_yaml_tree(root_path: str | Path) -> dict: - """Load data from yaml tree.""" - root_path = Path(root_path) - yaml = YAML(typ="safe") - tree = {} - - for yaml_file in sorted(root_path.rglob("*.yaml")): - rel = yaml_file.relative_to(root_path).with_suffix("") # remove .yaml - parts = ( - rel.parent.parts if rel.name == "index" else (*rel.parent.parts, rel.name) - ) - - current = tree - for part in parts[:-1]: - if part not in current: - current[part] = {} - current = current[part] - - key = parts[-1] - data = yaml.load(yaml_file) - - if key == "index": # treat index.yaml as folder metadata - current.update(data or {}) - else: - if ( - current.get(key) is not None - and isinstance(current[key], dict) - and isinstance(data, dict) - ): - current[key].update(data) # merge if conflict - else: - current[key] = data - - return tree diff --git a/src/nrsk/schedule/__init__.py b/src/nrsk/schedule/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/nrsk/schedule/load_schedule.py b/src/nrsk/schedule/load_schedule.py index 0ccab88..4ffc9bb 100644 --- a/src/nrsk/schedule/load_schedule.py +++ b/src/nrsk/schedule/load_schedule.py @@ -9,12 +9,13 @@ import logging import os import re from datetime import datetime -from pathlib import Path +from glob import glob import jpype -import jpype.imports # required though not explicitly 'used' +import jpype.imports import matplotlib.dates as mdates import matplotlib.pyplot as plt +import mpl_toolkits.axisartist as axisartist import pandas as pd import yaml from docutils import nodes @@ -28,13 +29,18 @@ logger.setLevel(logging.DEBUG) # Start JVM with MPXJ jar jpype.startJVM(classpath=["/home/nick/repos/mpxj/mpxj-lib/*"]) -from java.io import File # noqa: E402 +from java.io import File from java.time import LocalDateTime # noqa: E402 from org.mpxj import ( # noqa: E402 + Availability, Duration, + FieldType, ProjectFile, Relation, RelationType, + Resource, + TaskField, + TaskType, TimeUnit, ) from org.mpxj.cpm import MicrosoftScheduler, PrimaveraScheduler # noqa: E402 @@ -44,32 +50,14 @@ from org.mpxj.writer import ( # noqa:E402 UniversalProjectWriter, ) -from nrsk.models import PredecessorTask, ScheduleInput - -_PT = PredecessorTask.PRED_TYPE -RELATION_MAP = { - _PT.FF: RelationType.FINISH_FINISH, - _PT.FS: RelationType.FINISH_START, - _PT.SS: RelationType.START_START, - _PT.SF: RelationType.START_FINISH, -} - def create_task(parent, name, duration): """Make a planned task.""" task = parent.addTask() task.setName(name) - if duration: - # apply duration in way that makes schedule solver solve for it - - # Some tasks may not have durations but then will require appropriate - # predecessor links to become scheduled - # but then they'll need a work resource or else you'll get error: - # >>> Task has no duration value and no resource assignments with a work - # value: [Task id=35 uniqueID=35 name=Pre-licensing activities] - task.setDuration(duration) - task.setActualDuration(Duration.getInstance(0, duration.getUnits())) - task.setRemainingDuration(duration) + task.setDuration(duration) + task.setActualDuration(Duration.getInstance(0, duration.getUnits())) + task.setRemainingDuration(duration) return task @@ -78,7 +66,6 @@ def load_from_yaml(fname: str = "schedule.yaml") -> ProjectFile: """Load data file in YAML format.""" with open(fname) as f: data = yaml.safe_load(f) - data = ScheduleInput.validate_python(data) project = ProjectFile() @@ -88,44 +75,35 @@ def load_from_yaml(fname: str = "schedule.yaml") -> ProjectFile: predecessors = {} tasks_by_id = {} - for lane in data: + for lane in data["lanes"]: summary = project.addTask() - summary.setName(lane.name) + summary.setName(lane["name"]) - for task_d in lane.tasks: - if task_d.is_milestone: + for task_d in lane["tasks"]: + if task_d.get("milestone"): task = create_task( - summary, task_d.name, Duration.getInstance(0, TimeUnit.DAYS) + summary, task_d["name"], Duration.getInstance(0, TimeUnit.DAYS) ) else: - duration = ( - Duration.getInstance(task_d.duration.days, TimeUnit.DAYS) - if task_d.duration - else None + if not task_d.get("duration_days"): + raise ValueError(f"{task_d} needs a duration") + task = create_task( + summary, + task_d["name"], + Duration.getInstance(task_d["duration_days"], TimeUnit.DAYS), ) - task = create_task(summary, task_d.name, duration) - # track predecessors to build after all tasks exist - if tid := task_d.id: + # track predecessors by ID to build after all tasks exist + if tid := task_d.get("id"): tasks_by_id[tid] = task - for pred_data in task_d.predecessors: + for pred_id in task_d.get("predecessors", []): pred_ids = predecessors.get(task, []) - pred_ids.append(pred_data) + pred_ids.append(pred_id) predecessors[task] = pred_ids for task in project.getTasks(): - for pred_data in predecessors.get(task, []): - pred_id = pred_data.id - pred_task = tasks_by_id[pred_id] - type = RELATION_MAP[pred_data.type] - # lag_duration is handled/translated by pydantic into timedelta - if lag := pred_data.lag: - lag_days = lag.days # note that this truncates to nearest day - else: - lag_days = 0 - lag = Duration.getInstance(lag_days, TimeUnit.DAYS) - task.addPredecessor( - Relation.Builder().predecessorTask(pred_task).lag(lag).type(type) - ) + for pred_id in predecessors.get(task, []): + pred = tasks_by_id[pred_id] + task.addPredecessor(Relation.Builder().predecessorTask(pred)) return project @@ -218,7 +196,7 @@ def _preprocess_plot(project): return df, df_deps -def plot_schedule( # noqa: C901 +def plot_schedule( input_fname: str = "scheduled.xml", project=None, output_fname: str = "schedule.svg" ): """Generate plot of schedule.""" @@ -292,7 +270,7 @@ def plot_schedule( # noqa: C901 plt.title("AMS High-Level Schedule") # plt.tight_layout() plt.savefig(output_fname) - # plt.show() + plt.show() class ScheduleDirective(Directive): @@ -303,69 +281,65 @@ class ScheduleDirective(Directive): def run(self): # noqa: D102 env = self.state.document.settings.env + builder = env.app.builder schedule_data = self.arguments[0] - schedule_data_abs = Path(env.srcdir) / schedule_data + schedule_data_abs = os.path.join(env.srcdir, schedule_data) - if not schedule_data_abs.exists(): + if not os.path.exists(schedule_data_abs): logger.error(f"Schedule file not found: {schedule_data_abs}") return [] - # put image within _static so html builder knows to copy it over. - gen_dir = Path(env.app.srcdir) / "_static" / "generated_assets" + # Image output directory + gen_dir = os.path.join(env.app.srcdir, "generated_assets") ensuredir(gen_dir) + ensuredir(os.path.join(env.app.outdir, "_downloads")) # Name of the generated file base = os.path.splitext(os.path.basename(schedule_data))[0] - out_image = gen_dir / f"{base}.svg" + out_image = os.path.join(gen_dir, f"{base}.svg") start_date = datetime(2026, 1, 1) proj = load_from_yaml(fname=schedule_data) solve_schedule(proj, start_date) plot_schedule(project=proj, output_fname=out_image) writer = UniversalProjectWriter(FileFormat.MSPDI) - writer.write(proj, gen_dir / f"{base}_mspdi.xml") + writer.write(proj, os.path.join("_build", "_downloads", f"{base}_mspdi.xml")) env.note_dependency(schedule_data_abs) + rel = str(os.path.relpath(out_image, env.app.srcdir)) + # trying to mock /generated_assets/schedule.svg for the build folder + # but it ends up in _images actually. + # somewhat hacky but works in subfolders + abs_rel = os.path.join("/", rel) + image_node = nodes.image(uri=abs_rel) + uri = builder.get_relative_uri(env.docname, "_images/" + f"{base}.svg") + uri = uri.replace(".html", "") + + ref_node = nodes.reference("", "", refuri=uri) + ref_node += image_node + ref_node["target"] = "_blank" + ref_node["rel"] = "noopener" + + uri_dl1 = builder.get_relative_uri( + env.docname, "_downloads/" + f"{base}_mspdi.xml" + ) + uri_dl1 = uri_dl1.replace(".html", "") + download1 = nodes.reference( + text="Download schedule in MS Project XML format", + refuri=uri_dl1, + classes=["download-link"], + ) - uri = f"/_static/generated_assets/{base}.svg" - image_node = nodes.image(uri=uri) paragraph = nodes.paragraph() - - # download link only makes sense in web env, not PDF - builder_name = self.state.document.settings.env.app.builder.name - if builder_name not in ("html", "singlehtml", "dirhtml"): - paragraph += image_node - else: - # add hyperlink to image. Since this may be called from a subdir we need - # relative paths that walk up appropriately. - docname = env.docname # subdir/mydoc - relative_root_path = "../" * docname.count(os.sep) - hyperlink_uri = relative_root_path + uri[1:] - - # Result when docname is 'subdir/mydoc': - # hyperlink_uri will be: ../_static/generated_assets/my_diagram.svg - ref_node = nodes.reference("", "", refuri=hyperlink_uri) - ref_node += image_node - ref_node["target"] = "_blank" - ref_node["rel"] = "noopener" - paragraph += ref_node - - # and hyperlink to schedule data - hyperlink_uri = ( - relative_root_path + f"_static/generated_assets/{base}_mspdi.xml" - ) - download1 = nodes.reference( - text="Download schedule in MS Project XML format", - refuri=hyperlink_uri, - classes=["download-link"], - ) - paragraph += download1 + paragraph += ref_node + paragraph += download1 return [paragraph] -def setup(app): # noqa: D103 +def setup(app): + """Setup for sphinx extension.""" app.add_directive("schedule", ScheduleDirective) return { diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tests/unit/models/__init__.py b/tests/unit/models/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tests/unit/models/test_document_model.py b/tests/unit/models/test_document_model.py deleted file mode 100644 index a473683..0000000 --- a/tests/unit/models/test_document_model.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Tests for Document model.""" - -from datetime import datetime - -import pytest -from pydantic import ValidationError - -from nrsk.models import Document - - -@pytest.fixture -def valid_document_data(): # noqa: D103 - return { - "uuid": "2deac04a-d1d1-4e42-b1a7-cc941d9da9b5", - "title": "Project Proposal Q4", - "revision": "2", - "type": "CALC", - "originators": ["jane@example.com"], - "review_status": "IN REVIEW", - "status": "RESERVED", - } - - -def test_document_model_success(valid_document_data): - """Test that valid input data correctly creates a Document instance.""" - doc = Document(**valid_document_data) - - assert isinstance(doc, Document) - assert doc.title == "Project Proposal Q4" - assert doc.status == Document.STATUS.RESERVED - assert doc.status.value == "RESERVED" - assert doc.status_category == "Not Yet Approved" - - -@pytest.mark.parametrize( - "invalid_status", - [ - "Reserved", # Capitalized (case sensitive) - "re-served", # Hyphenated (typo) - "finalized", # Non-existent status - 123, # Wrong type (integer) - ], -) -def test_document_status_invalid_enum(valid_document_data, invalid_status): - """Tests that the model raises ValidationError for invalid status strings.""" - data = valid_document_data.copy() - data["status"] = invalid_status - - with pytest.raises(ValidationError) as excinfo: - Document(**data) - - assert any("status" in err["loc"] for err in excinfo.value.errors()) - assert "Input should be " in str(excinfo.value)