mirror of
https://github.com/enjoy-digital/litex.git
synced 2025-01-04 09:52:26 -05:00
commit
5ff02e23a0
7 changed files with 1095 additions and 0 deletions
177
litex/soc/doc/__init__.py
Normal file
177
litex/soc/doc/__init__.py
Normal file
|
@ -0,0 +1,177 @@
|
|||
# This file is Copyright (c) 2020 Sean Cross <sean@xobs.io>
|
||||
# License: BSD
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import datetime
|
||||
|
||||
from litex.soc.interconnect.csr import _CompoundCSR
|
||||
from litex.soc.integration import export
|
||||
from .csr import DocumentedCSRRegion
|
||||
from .module import gather_submodules, ModuleNotDocumented, DocumentedModule, DocumentedInterrupts
|
||||
from .rst import reflow
|
||||
|
||||
default_sphinx_configuration = """
|
||||
project = '{}'
|
||||
copyright = '{}, {}'
|
||||
author = '{}'
|
||||
extensions = [
|
||||
'sphinx.ext.autosectionlabel',
|
||||
'sphinxcontrib.wavedrom',{}
|
||||
]
|
||||
templates_path = ['_templates']
|
||||
exclude_patterns = []
|
||||
offline_skin_js_path = "https://wavedrom.com/skins/default.js"
|
||||
offline_wavedrom_js_path = "https://wavedrom.com/WaveDrom.js"
|
||||
html_theme = 'alabaster'
|
||||
html_static_path = ['_static']
|
||||
"""
|
||||
|
||||
def generate_svd(soc, buildpath, filename=None, name="soc", **kwargs):
|
||||
if filename is None:
|
||||
filename = name + ".svd"
|
||||
kwargs["name"] = name
|
||||
with open(buildpath + "/" + filename, "w", encoding="utf-8") as svd:
|
||||
svd.write(export.get_svd(soc, **kwargs))
|
||||
|
||||
|
||||
def generate_docs(soc, base_dir, project_name="LiteX SoC Project",
|
||||
author="Anonymous", sphinx_extensions=[], quiet=False, note_pulses=False,
|
||||
from_scratch=True):
|
||||
"""Possible extra extensions:
|
||||
[
|
||||
'm2r',
|
||||
'recommonmark',
|
||||
'sphinx_rtd_theme',
|
||||
'sphinx_autodoc_typehints',
|
||||
]
|
||||
"""
|
||||
|
||||
# Ensure the target directory is a full path
|
||||
if base_dir[-1] != '/':
|
||||
base_dir = base_dir + '/'
|
||||
|
||||
# Ensure the output directory exists
|
||||
pathlib.Path(base_dir + "/_static").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create the sphinx configuration file if the user has requested,
|
||||
# or if it doesn't exist already.
|
||||
if from_scratch or not os.path.isfile(base_dir + "conf.py"):
|
||||
with open(base_dir + "conf.py", "w", encoding="utf-8") as conf:
|
||||
year = datetime.datetime.now().year
|
||||
sphinx_ext_str = ""
|
||||
for ext in sphinx_extensions:
|
||||
sphinx_ext_str += "\n \"{}\",".format(ext)
|
||||
print(default_sphinx_configuration.format(project_name, year,
|
||||
author, author, sphinx_ext_str), file=conf)
|
||||
|
||||
if not quiet:
|
||||
print("Generate the documentation by running `sphinx-build -M html {} {}_build`".format(base_dir, base_dir))
|
||||
|
||||
# Gather all interrupts so we can easily map IRQ numbers to CSR sections
|
||||
interrupts = {}
|
||||
for csr, irq in sorted(soc.soc_interrupt_map.items()):
|
||||
interrupts[csr] = irq
|
||||
|
||||
# Convert each CSR region into a DocumentedCSRRegion.
|
||||
# This process will also expand each CSR into a DocumentedCSR,
|
||||
# which means that CompoundCSRs (such as CSRStorage and CSRStatus)
|
||||
# that are larger than the buswidth will be turned into multiple
|
||||
# DocumentedCSRs.
|
||||
documented_regions = []
|
||||
seen_modules = set()
|
||||
regions = []
|
||||
|
||||
# Previously, litex contained a function to gather csr regions.
|
||||
if hasattr(soc, "get_csr_regions"):
|
||||
regions = soc.get_csr_regions()
|
||||
else:
|
||||
# Now we just access the regions directly.
|
||||
for region_name, region in soc.csr_regions.items():
|
||||
regions.append((region_name, region.origin,
|
||||
region.busword, region.obj))
|
||||
|
||||
for csr_region in regions:
|
||||
module = None
|
||||
if hasattr(soc, csr_region[0]):
|
||||
module = getattr(soc, csr_region[0])
|
||||
seen_modules.add(module)
|
||||
submodules = gather_submodules(module)
|
||||
|
||||
documented_region = DocumentedCSRRegion(
|
||||
csr_region, module, submodules, csr_data_width=soc.csr_data_width)
|
||||
if documented_region.name in interrupts:
|
||||
documented_region.document_interrupt(
|
||||
soc, submodules, interrupts[documented_region.name])
|
||||
documented_regions.append(documented_region)
|
||||
|
||||
# Document any modules that are not CSRs.
|
||||
# TODO: Add memory maps here.
|
||||
additional_modules = [
|
||||
DocumentedInterrupts(interrupts),
|
||||
]
|
||||
for (mod_name, mod) in soc._submodules:
|
||||
if mod not in seen_modules:
|
||||
try:
|
||||
additional_modules.append(DocumentedModule(mod_name, mod))
|
||||
except ModuleNotDocumented:
|
||||
pass
|
||||
|
||||
# Create index.rst containing links to all of the generated files.
|
||||
# If the user has set `from_scratch=False`, then skip this step.
|
||||
if from_scratch or not os.path.isfile(base_dir + "index.rst"):
|
||||
with open(base_dir + "index.rst", "w", encoding="utf-8") as index:
|
||||
print("""
|
||||
Documentation for {}
|
||||
{}
|
||||
|
||||
""".format(project_name, "="*len("Documentation for " + project_name)), file=index)
|
||||
|
||||
if len(additional_modules) > 0:
|
||||
print("""
|
||||
Modules
|
||||
=======
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
""", file=index)
|
||||
for module in additional_modules:
|
||||
print(" {}".format(module.name), file=index)
|
||||
|
||||
if len(documented_regions) > 0:
|
||||
print("""
|
||||
Register Groups
|
||||
===============
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
""", file=index)
|
||||
for region in documented_regions:
|
||||
print(" {}".format(region.name), file=index)
|
||||
|
||||
print("""
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
""", file=index)
|
||||
|
||||
# Create a Region file for each of the documented CSR regions.
|
||||
for region in documented_regions:
|
||||
with open(base_dir + region.name + ".rst", "w", encoding="utf-8") as outfile:
|
||||
region.print_region(outfile, base_dir, note_pulses)
|
||||
|
||||
# Create a Region file for each additional non-CSR module
|
||||
for region in additional_modules:
|
||||
with open(base_dir + region.name + ".rst", "w", encoding="utf-8") as outfile:
|
||||
region.print_region(outfile, base_dir, note_pulses)
|
||||
|
||||
# Copy over wavedrom javascript and configuration files
|
||||
with open(os.path.dirname(__file__) + "/static/WaveDrom.js", "r") as wd_in:
|
||||
with open(base_dir + "/_static/WaveDrom.js", "w") as wd_out:
|
||||
wd_out.write(wd_in.read())
|
||||
with open(os.path.dirname(__file__) + "/static/default.js", "r") as wd_in:
|
||||
with open(base_dir + "/_static/default.js", "w") as wd_out:
|
||||
wd_out.write(wd_in.read())
|
471
litex/soc/doc/csr.py
Normal file
471
litex/soc/doc/csr.py
Normal file
|
@ -0,0 +1,471 @@
|
|||
# This file is Copyright (c) 2020 Sean Cross <sean@xobs.io>
|
||||
# License: BSD
|
||||
|
||||
from migen import *
|
||||
from migen.util.misc import xdir
|
||||
from migen.fhdl.specials import Memory
|
||||
|
||||
from litex.soc.integration.doc import ModuleDoc
|
||||
from litex.soc.interconnect.csr_bus import SRAM
|
||||
from litex.soc.interconnect.csr import _CompoundCSR, CSRStatus, CSRStorage, CSRField, _CSRBase
|
||||
from litex.soc.interconnect.csr_eventmanager import _EventSource, SharedIRQ, EventManager, EventSourceLevel, EventSourceProcess, EventSourcePulse
|
||||
|
||||
import textwrap
|
||||
|
||||
from .rst import print_table, reflow
|
||||
|
||||
class DocumentedCSRField:
|
||||
def __init__(self, field):
|
||||
self.name = field.name
|
||||
self.size = field.size
|
||||
self.offset = field.offset
|
||||
self.reset_value = field.reset.value
|
||||
self.description = field.description
|
||||
self.access = field.access
|
||||
self.pulse = field.pulse
|
||||
self.values = field.values
|
||||
|
||||
# If this is part of a sub-CSR, this value will be different
|
||||
self.start = None
|
||||
|
||||
class DocumentedCSR:
|
||||
def trim(self, docstring):
|
||||
if docstring is not None:
|
||||
return reflow(docstring)
|
||||
return None
|
||||
|
||||
def __init__(self, name, address, short_numbered_name="", short_name="", reset=0, offset=0, size=8, description=None, access="read-write", fields=[]):
|
||||
self.name = name
|
||||
self.short_name = short_name
|
||||
self.short_numbered_name = short_numbered_name
|
||||
self.address = address
|
||||
self.offset = offset
|
||||
self.size = size
|
||||
if size == 0:
|
||||
print("!!! Warning: creating CSR of size 0 {}".format(name))
|
||||
self.description = self.trim(description)
|
||||
self.reset_value = reset
|
||||
self.fields = fields
|
||||
self.access = access
|
||||
for f in self.fields:
|
||||
f.description = self.trim(f.description)
|
||||
|
||||
class DocumentedCSRRegion:
|
||||
def __init__(self, csr_region, module=None, submodules=[], csr_data_width=8):
|
||||
(self.name, self.origin, self.busword, self.raw_csrs) = csr_region
|
||||
self.current_address = self.origin
|
||||
self.sections = []
|
||||
self.csrs = []
|
||||
self.csr_data_width = csr_data_width
|
||||
|
||||
# If the section has extra documentation, gather it.
|
||||
if isinstance(module, ModuleDoc):
|
||||
self.sections.append(module)
|
||||
if module is not None and hasattr(module, "get_module_documentation"):
|
||||
docs = module.get_module_documentation()
|
||||
for doc in docs:
|
||||
self.sections.append(doc)
|
||||
|
||||
if isinstance(self.raw_csrs, SRAM):
|
||||
print("{}@{:x}: Found SRAM: {}".format(self.name, self.origin, self.raw_csrs))
|
||||
elif isinstance(self.raw_csrs, list):
|
||||
for csr in self.raw_csrs:
|
||||
if isinstance(csr, _CSRBase):
|
||||
self.document_csr(csr)
|
||||
elif isinstance(csr, SRAM):
|
||||
print("{}: Found SRAM in the list: {}".format(self.name, csr))
|
||||
else:
|
||||
print("{}: Unknown module: {}".format(self.name, csr))
|
||||
elif isinstance(self.raw_csrs, Memory):
|
||||
self.csrs.append(DocumentedCSR(
|
||||
self.name.upper(), self.origin, short_numbered_name=self.name.upper(), short_name=self.name.upper(), reset=0, size=self.raw_csrs.width,
|
||||
description="{} x {}-bit memory".format(self.raw_csrs.width, self.raw_csrs.depth)
|
||||
))
|
||||
print("{}@{:x}: Found memory that's {} x {} (but memories aren't documented yet)".format(self.name, self.origin, self.raw_csrs.width, self.raw_csrs.depth))
|
||||
else:
|
||||
print("{}@{:x}: Unexpected item on the CSR bus: {}".format(self.name, self.origin, self.raw_csrs))
|
||||
|
||||
def bit_range(self, start, end, empty_if_zero=False):
|
||||
end -= 1
|
||||
if start == end:
|
||||
if empty_if_zero:
|
||||
return ""
|
||||
return "[{}]".format(start)
|
||||
else:
|
||||
return "[{}:{}]".format(end, start)
|
||||
|
||||
def document_interrupt(self, soc, submodules, irq):
|
||||
managers = submodules["event_managers"]
|
||||
for m in managers:
|
||||
sources_u = [y for x, y in xdir(m, True) if isinstance(y, _EventSource)]
|
||||
sources = sorted(sources_u, key=lambda x: x.duid)
|
||||
|
||||
def source_description(src):
|
||||
if hasattr(src, "name") and src.name is not None:
|
||||
base_text = "`1` if a `{}` event occurred. ".format(src.name)
|
||||
else:
|
||||
base_text = "`1` if a this particular event occurred. "
|
||||
if hasattr(src, "description") and src.description is not None:
|
||||
return src.description
|
||||
elif isinstance(src, EventSourceLevel):
|
||||
return base_text + "This Event is **level triggered** when the signal is **high**."
|
||||
elif isinstance(src, EventSourcePulse):
|
||||
return base_text + "This Event is triggered on a **rising** edge."
|
||||
elif isinstance(src, EventSourceProcess):
|
||||
return base_text + "This Event is triggered on a **falling** edge."
|
||||
else:
|
||||
return base_text + "This Event uses an unknown method of triggering."
|
||||
|
||||
# Patch the DocumentedCSR to add our own Description, if one doesn't exist.
|
||||
for dcsr in self.csrs:
|
||||
short_name = dcsr.short_name.upper()
|
||||
if short_name == m.status.name.upper():
|
||||
if dcsr.fields is None or len(dcsr.fields) == 0:
|
||||
fields = []
|
||||
for i, source in enumerate(sources):
|
||||
if hasattr(source, "name") and source.name is not None:
|
||||
fields.append(DocumentedCSRField(CSRField(source.name, offset=i, description="Level of the `{}` event".format(source.name))))
|
||||
else:
|
||||
fields.append(DocumentedCSRField(CSRField("event{}".format(i), offset=i, description="Level of the `event{}` event".format(i))))
|
||||
dcsr.fields = fields
|
||||
if dcsr.description is None:
|
||||
dcsr.description = "This register contains the current raw level of the Event trigger. Writes to this register have no effect."
|
||||
elif short_name == m.pending.name.upper():
|
||||
if dcsr.fields is None or len(dcsr.fields) == 0:
|
||||
fields = []
|
||||
for i, source in enumerate(sources):
|
||||
if hasattr(source, "name") and source.name is not None:
|
||||
fields.append(DocumentedCSRField(CSRField(source.name, offset=i, description=source_description(source))))
|
||||
else:
|
||||
fields.append(DocumentedCSRField(CSRField("event{}".format(i), offset=i, description=source_description(source))))
|
||||
dcsr.fields = fields
|
||||
if dcsr.description is None:
|
||||
dcsr.description = "When an Event occurs, the corresponding bit will be set in this register. To clear the Event, set the corresponding bit in this register."
|
||||
elif short_name == m.enable.name.upper():
|
||||
if dcsr.fields is None or len(dcsr.fields) == 0:
|
||||
fields = []
|
||||
for i, source in enumerate(sources):
|
||||
if hasattr(source, "name") and source.name is not None:
|
||||
fields.append(DocumentedCSRField(CSRField(source.name, offset=i, description="Write a `1` to enable the `{}` Event".format(source.name))))
|
||||
else:
|
||||
fields.append(DocumentedCSRField(CSRField("event{}".format(i), offset=i, description="Write a `1` to enable the `{}` Event".format(i))))
|
||||
dcsr.fields = fields
|
||||
if dcsr.description is None:
|
||||
dcsr.description = "This register enables the corresponding Events. Write a `0` to this register to disable individual events."
|
||||
|
||||
def sub_csr_bit_range(self, csr, offset):
|
||||
nwords = (csr.size + self.busword - 1)//self.busword
|
||||
i = nwords - offset - 1
|
||||
nbits = min(csr.size - i*self.busword, self.busword) - 1
|
||||
name = (csr.name + str(i) if nwords > 1 else csr.name).upper()
|
||||
origin = i*self.busword
|
||||
return (origin, nbits, name)
|
||||
|
||||
def split_fields(self, fields, start, end):
|
||||
"""Split `fields` into a sub-list that only contains the fields
|
||||
between `start` and `end`.
|
||||
This means that sometimes registers will get truncated. For example,
|
||||
if we're going from [8:15] and we have a register that spans [7:15],
|
||||
the bottom bit will be cut off. To account for this, we set the `.start`
|
||||
property of the resulting split field to `1`, the `.offset` to `0`, and the
|
||||
`.size` to 7.
|
||||
"""
|
||||
split_f = []
|
||||
for field in fields:
|
||||
if field.offset > end:
|
||||
continue
|
||||
if field.offset + field.size < start:
|
||||
continue
|
||||
new_field = DocumentedCSRField(field)
|
||||
|
||||
new_field.offset -= start
|
||||
if new_field.offset < 0:
|
||||
underflow_amount = -new_field.offset
|
||||
new_field.offset = 0
|
||||
new_field.size -= underflow_amount
|
||||
new_field.start = underflow_amount
|
||||
# If it extends past the range, clamp the size to the range
|
||||
if new_field.offset + new_field.size > (end - start):
|
||||
new_field.size = (end - start) - new_field.offset + 1
|
||||
if new_field.start is None:
|
||||
new_field.start = 0
|
||||
split_f.append(new_field)
|
||||
return split_f
|
||||
|
||||
def print_reg(self, reg, stream):
|
||||
print("", file=stream)
|
||||
print(" .. wavedrom::", file=stream)
|
||||
print(" :caption: {}".format(reg.name), file=stream)
|
||||
print("", file=stream)
|
||||
print(" {", file=stream)
|
||||
print(" \"reg\": [", file=stream)
|
||||
if len(reg.fields) > 0:
|
||||
bit_offset = 0
|
||||
for field in reg.fields:
|
||||
field_name = field.name
|
||||
attr_str = ""
|
||||
if field.reset_value != 0:
|
||||
attr_str = "\"attr\": '" + str(field.reset_value) + "', "
|
||||
type_str = ""
|
||||
if field.pulse:
|
||||
type_str = "\"type\": 4, "
|
||||
if hasattr(field, "start") and field.start is not None:
|
||||
field_name = "{}{}".format(field.name, self.bit_range(field.start, field.size + field.start, empty_if_zero=True))
|
||||
term=","
|
||||
if bit_offset != field.offset:
|
||||
print(" {\"bits\": " + str(field.offset - bit_offset) + "},", file=stream)
|
||||
if field.offset + field.size == self.busword:
|
||||
term=""
|
||||
print(" {\"name\": \"" + field_name + "\", " + type_str + attr_str + "\"bits\": " + str(field.size) + "}" + term, file=stream)
|
||||
bit_offset = field.offset + field.size
|
||||
if bit_offset != self.busword:
|
||||
print(" {\"bits\": " + str(self.busword - bit_offset) + "}", file=stream)
|
||||
else:
|
||||
term=""
|
||||
if reg.size != self.csr_data_width:
|
||||
term=","
|
||||
attr_str = ""
|
||||
if reg.reset_value != 0:
|
||||
attr_str = "\"attr\": 'reset: " + str(reg.reset_value) + "', "
|
||||
print(" {\"name\": \"" + reg.short_name.lower() + self.bit_range(reg.offset, reg.offset + reg.size, empty_if_zero=True) + "\", " + attr_str + "\"bits\": " + str(reg.size) + "}" + term, file=stream)
|
||||
if reg.size != self.csr_data_width:
|
||||
print(" {\"bits\": " + str(self.csr_data_width - reg.size) + "},", file=stream)
|
||||
print(" ], \"config\": {\"hspace\": 400, \"bits\": " + str(self.busword) + ", \"lanes\": 1 }, \"options\": {\"hspace\": 400, \"bits\": " + str(self.busword) + ", \"lanes\": 1}", file=stream)
|
||||
print(" }", file=stream)
|
||||
print("", file=stream)
|
||||
|
||||
def get_csr_reset(self, csr):
|
||||
reset = 0
|
||||
if hasattr(csr, "fields"):
|
||||
for f in csr.fields.fields:
|
||||
reset = reset | (f.reset_value << f.offset)
|
||||
elif hasattr(csr, "storage"):
|
||||
reset = int(csr.storage.reset.value)
|
||||
elif hasattr(csr, "status"):
|
||||
reset = int(csr.status.reset.value)
|
||||
return reset
|
||||
|
||||
def get_csr_size(self, csr):
|
||||
nbits = 0
|
||||
if hasattr(csr, "fields"):
|
||||
for f in csr.fields.fields:
|
||||
nbits = max(nbits, f.size + f.offset)
|
||||
elif hasattr(csr, "storage"):
|
||||
nbits = int(csr.storage.nbits)
|
||||
elif hasattr(csr, "status"):
|
||||
nbits = int(csr.status.nbits)
|
||||
elif hasattr(csr ,"r"):
|
||||
nbits = int(csr.r.nbits)
|
||||
elif hasattr(csr, "value"):
|
||||
nbits = int(csr.value.nbits)
|
||||
else:
|
||||
raise ValueError("Internal error: can't determine CSR size of {}".format(csr))
|
||||
return nbits
|
||||
|
||||
def document_csr(self, csr):
|
||||
"""Generates one or more DocumentedCSR, which will get appended
|
||||
to self.csrs"""
|
||||
fields = []
|
||||
description = None
|
||||
atomic_write = False
|
||||
full_name = self.name.upper() + "_" + csr.name.upper()
|
||||
reset = 0
|
||||
if isinstance(csr, CSRStatus):
|
||||
access = "read-only"
|
||||
else:
|
||||
access = "read-write"
|
||||
|
||||
if hasattr(csr, "fields"):
|
||||
fields = csr.fields.fields
|
||||
if hasattr(csr, "description"):
|
||||
description = csr.description
|
||||
if hasattr(csr, "atomic_write"):
|
||||
atomic_write = csr.atomic_write
|
||||
size = self.get_csr_size(csr)
|
||||
reset = self.get_csr_reset(csr)
|
||||
|
||||
# If the CSR is composed of multiple sub-CSRs, document each
|
||||
# one individually.
|
||||
if isinstance(csr, _CompoundCSR) and len(csr.simple_csrs) > 1:
|
||||
for i in range(len(csr.simple_csrs)):
|
||||
(start, length, name) = self.sub_csr_bit_range(csr, i)
|
||||
sub_name = self.name.upper() + "_" + name
|
||||
bits_str = "Bits {}-{} of `{}`.".format(start, start+length, full_name)
|
||||
if atomic_write:
|
||||
if i == (range(len(csr.simple_csrs))-1):
|
||||
bits_str += "Writing this register triggers an update of " + full_name
|
||||
else:
|
||||
bits_str += "The value won't take effect until `" + full_name + "0` is written."
|
||||
if i == 0:
|
||||
d = description
|
||||
if description is None:
|
||||
d = bits_str
|
||||
else:
|
||||
d = bits_str + " " + reflow(d)
|
||||
self.csrs.append(DocumentedCSR(
|
||||
sub_name, self.current_address, short_numbered_name=name.upper(), short_name=csr.name.upper(), reset=(reset>>start)&((2**length)-1),
|
||||
offset=start, size=self.csr_data_width,
|
||||
description=d, fields=self.split_fields(fields, start, start + length), access=access
|
||||
))
|
||||
else:
|
||||
self.csrs.append(DocumentedCSR(
|
||||
sub_name, self.current_address, short_numbered_name=name.upper(), short_name=csr.name.upper(), reset=(reset>>start)&((2**length)-1),
|
||||
offset=start, size=self.csr_data_width,
|
||||
description=bits_str, fields=self.split_fields(fields, start, start + length), access=access
|
||||
))
|
||||
self.current_address += 4
|
||||
else:
|
||||
self.csrs.append(DocumentedCSR(
|
||||
full_name, self.current_address, short_numbered_name=csr.name.upper(), short_name=csr.name.upper(), reset=reset, size=size,
|
||||
description=description, fields=fields, access=access
|
||||
))
|
||||
self.current_address += 4
|
||||
|
||||
def make_value_table(self, values):
|
||||
ret = ""
|
||||
max_value_width=len("Value")
|
||||
max_description_width=len("Description")
|
||||
for v in values:
|
||||
(value, name, description) = (None, None, None)
|
||||
if len(v) == 2:
|
||||
(value, description) = v
|
||||
elif len(v) == 3:
|
||||
(value, name, description) = v
|
||||
else:
|
||||
raise ValueError("Unexpected length of CSRField's value tuple")
|
||||
|
||||
# Ensure the value is a string
|
||||
if not isinstance(value, str):
|
||||
value = "{}".format(value)
|
||||
|
||||
max_value_width = max(max_value_width, len(value))
|
||||
for d in description.splitlines():
|
||||
max_description_width = max(max_description_width, len(d))
|
||||
ret += "\n"
|
||||
ret += "+-" + "-"*max_value_width + "-+-" + "-"*max_description_width + "-+\n"
|
||||
ret += "| " + "Value".ljust(max_value_width) + " | " + "Description".ljust(max_description_width) + " |\n"
|
||||
ret += "+=" + "="*max_value_width + "=+=" + "="*max_description_width + "=+\n"
|
||||
for v in values:
|
||||
(value, name, description) = (None, None, None)
|
||||
if len(v) == 2:
|
||||
(value, description) = v
|
||||
elif len(v) == 3:
|
||||
(value, name, description) = v
|
||||
else:
|
||||
raise ValueError("Unexpected length of CSRField's value tuple")
|
||||
|
||||
# Ensure the value is a string
|
||||
if not isinstance(value, str):
|
||||
value = "{}".format(value)
|
||||
|
||||
value = value.ljust(max_value_width)
|
||||
first_line = True
|
||||
for d in description.splitlines():
|
||||
if first_line:
|
||||
ret += "| {} | {} |\n".format(value, d.ljust(max_description_width))
|
||||
first_line = False
|
||||
else:
|
||||
ret += "| {} | {} |\n".format(" ".ljust(max_value_width), d.ljust(max_description_width))
|
||||
ret += "+-" + "-"*max_value_width + "-+-" + "-"*max_description_width + "-+\n"
|
||||
return ret
|
||||
|
||||
def print_region(self, stream, base_dir, note_pulses):
|
||||
title = "{}".format(self.name.upper())
|
||||
print(title, file=stream)
|
||||
print("=" * len(title), file=stream)
|
||||
print("", file=stream)
|
||||
|
||||
for section in self.sections:
|
||||
title = textwrap.dedent(section.title())
|
||||
body = textwrap.dedent(section.body())
|
||||
print("{}".format(title), file=stream)
|
||||
print("-" * len(title), file=stream)
|
||||
|
||||
if section.format() == "rst":
|
||||
print(body, file=stream)
|
||||
elif section.format() == "md":
|
||||
filename = section.path()
|
||||
if filename is not None:
|
||||
print(".. mdinclude:: " + filename, file=stream)
|
||||
else:
|
||||
temp_filename = self.name + '-' + str(hash(title)) + "." + section.format()
|
||||
with open(base_dir + "/" + temp_filename, "w") as cache:
|
||||
print(body, file=cache)
|
||||
print(".. mdinclude:: " + temp_filename, file=stream)
|
||||
print("", file=stream)
|
||||
|
||||
if len(self.csrs) > 0:
|
||||
title = "Register Listing for {}".format(self.name.upper())
|
||||
print(title, file=stream)
|
||||
print("-" * len(title), file=stream)
|
||||
|
||||
csr_table = [["Register", "Address"]]
|
||||
for csr in self.csrs:
|
||||
csr_table.append([":ref:`{} <{}>`".format(csr.name, csr.name), ":ref:`0x{:08x} <{}>`".format(csr.address, csr.name)])
|
||||
print_table(csr_table, stream)
|
||||
|
||||
for csr in self.csrs:
|
||||
print("{}".format(csr.name), file=stream)
|
||||
print("^" * len(csr.name), file=stream)
|
||||
print("", file=stream)
|
||||
print("`Address: 0x{:08x} + 0x{:x} = 0x{:08x}`".format(self.origin, csr.address - self.origin, csr.address), file=stream)
|
||||
print("", file=stream)
|
||||
if csr.description is not None:
|
||||
print(textwrap.indent(csr.description, prefix=" "), file=stream)
|
||||
self.print_reg(csr, stream)
|
||||
if len(csr.fields) > 0:
|
||||
max_field_width=len("Field")
|
||||
max_name_width=len("Name")
|
||||
max_description_width=len("Description")
|
||||
value_tables = {}
|
||||
|
||||
for f in csr.fields:
|
||||
field = self.bit_range(f.offset, f.offset + f.size)
|
||||
max_field_width = max(max_field_width, len(field))
|
||||
|
||||
name = f.name
|
||||
if hasattr(f, "start") and f.start is not None:
|
||||
name = "{}{}".format(f.name, self.bit_range(f.start, f.size + f.start))
|
||||
max_name_width = max(max_name_width, len(name))
|
||||
|
||||
description = f.description
|
||||
if description is None:
|
||||
description = ""
|
||||
if note_pulses and f.pulse:
|
||||
description = description + "\n\nWriting a 1 to this bit triggers the function."
|
||||
for d in description.splitlines():
|
||||
max_description_width = max(max_description_width, len(d))
|
||||
if f.values is not None:
|
||||
value_tables[f.name] = self.make_value_table(f.values)
|
||||
for d in value_tables[f.name].splitlines():
|
||||
max_description_width = max(max_description_width, len(d))
|
||||
print("", file=stream)
|
||||
print("+-" + "-"*max_field_width + "-+-" + "-"*max_name_width + "-+-" + "-"*max_description_width + "-+", file=stream)
|
||||
print("| " + "Field".ljust(max_field_width) + " | " + "Name".ljust(max_name_width) + " | " + "Description".ljust(max_description_width) + " |", file=stream)
|
||||
print("+=" + "="*max_field_width + "=+=" + "="*max_name_width + "=+=" + "="*max_description_width + "=+", file=stream)
|
||||
for f in csr.fields:
|
||||
field = self.bit_range(f.offset, f.offset + f.size).ljust(max_field_width)
|
||||
|
||||
name = f.name.upper()
|
||||
if hasattr(f, "start") and f.start is not None:
|
||||
name = "{}{}".format(f.name.upper(), self.bit_range(f.start, f.size + f.start))
|
||||
name = name.ljust(max_name_width)
|
||||
|
||||
description = f.description
|
||||
if description is None:
|
||||
description = ""
|
||||
if note_pulses and f.pulse:
|
||||
description = description + "\n\nWriting a 1 to this bit triggers the function."
|
||||
|
||||
if f.name in value_tables:
|
||||
description += "\n" + value_tables[f.name]
|
||||
|
||||
first_line = True
|
||||
for d in description.splitlines():
|
||||
if first_line:
|
||||
print("| {} | {} | {} |".format(field, name, d.ljust(max_description_width)), file=stream)
|
||||
first_line = False
|
||||
else:
|
||||
print("| {} | {} | {} |".format(" ".ljust(max_field_width), " ".ljust(max_name_width), d.ljust(max_description_width)), file=stream)
|
||||
print("+-" + "-"*max_field_width + "-+-" + "-"*max_name_width + "-+-" + "-"*max_description_width + "-+", file=stream)
|
||||
print("", file=stream)
|
120
litex/soc/doc/module.py
Normal file
120
litex/soc/doc/module.py
Normal file
|
@ -0,0 +1,120 @@
|
|||
# This file is Copyright (c) 2020 Sean Cross <sean@xobs.io>
|
||||
# License: BSD
|
||||
|
||||
from migen.fhdl.module import DUID
|
||||
from migen.util.misc import xdir
|
||||
|
||||
from litex.soc.interconnect.csr_eventmanager import EventManager
|
||||
from litex.soc.integration.doc import ModuleDoc
|
||||
|
||||
import textwrap
|
||||
|
||||
from .rst import print_table, print_rst
|
||||
|
||||
def gather_submodules_inner(module, depth, seen_modules, submodules):
|
||||
if module is None:
|
||||
return submodules
|
||||
if depth == 0:
|
||||
if isinstance(module, ModuleDoc):
|
||||
# print("{} is an instance of ModuleDoc".format(module))
|
||||
submodules["module_doc"].append(module)
|
||||
for k,v in module._submodules:
|
||||
# print("{}Submodule {} {}".format(" "*(depth*4), k, v))
|
||||
if v not in seen_modules:
|
||||
seen_modules.add(v)
|
||||
if isinstance(v, EventManager):
|
||||
# print("{}{} appears to be an EventManager".format(" "*(depth*4), k))
|
||||
submodules["event_managers"].append(v)
|
||||
|
||||
if isinstance(v, ModuleDoc):
|
||||
submodules["module_doc"].append(v)
|
||||
|
||||
gather_submodules_inner(v, depth + 1, seen_modules, submodules)
|
||||
return submodules
|
||||
|
||||
def gather_submodules(module):
|
||||
depth = 0
|
||||
seen_modules = set()
|
||||
submodules = {
|
||||
"event_managers": [],
|
||||
"module_doc": [],
|
||||
}
|
||||
|
||||
return gather_submodules_inner(module, depth, seen_modules, submodules)
|
||||
|
||||
class ModuleNotDocumented(Exception):
|
||||
"""Indicates a Module has no documentation or sub-documentation"""
|
||||
pass
|
||||
|
||||
class DocumentedModule:
|
||||
"""Multi-section Documentation of a Module"""
|
||||
|
||||
def __init__(self, name, module, has_documentation=False):
|
||||
self.name = name
|
||||
self.sections = []
|
||||
|
||||
if isinstance(module, ModuleDoc):
|
||||
has_documentation = True
|
||||
self.sections.append(module)
|
||||
|
||||
if hasattr(module, "get_module_documentation"):
|
||||
for doc in module.get_module_documentation():
|
||||
has_documentation = True
|
||||
self.sections.append(doc)
|
||||
|
||||
if not has_documentation:
|
||||
raise ModuleNotDocumented()
|
||||
|
||||
def print_region(self, stream, base_dir, note_pulses=False):
|
||||
title = "{}".format(self.name.upper())
|
||||
print(title, file=stream)
|
||||
print("=" * len(title), file=stream)
|
||||
print("", file=stream)
|
||||
|
||||
for section in self.sections:
|
||||
title = textwrap.dedent(section.title())
|
||||
body = textwrap.dedent(section.body())
|
||||
print("{}".format(title), file=stream)
|
||||
print("-" * len(title), file=stream)
|
||||
print(textwrap.dedent(body), file=stream)
|
||||
print("", file=stream)
|
||||
|
||||
class DocumentedInterrupts(DocumentedModule):
|
||||
"""A :obj:`DocumentedModule` that automatically documents interrupts in an SoC
|
||||
|
||||
This creates a :obj:`DocumentedModule` object that prints out the contents
|
||||
of the interrupt map of an SoC.
|
||||
"""
|
||||
def __init__(self, interrupts):
|
||||
DocumentedModule.__init__(self, "interrupts", None, has_documentation=True)
|
||||
|
||||
self.irq_table = [["Interrupt", "Module"]]
|
||||
for module_name, irq_no in interrupts.items():
|
||||
self.irq_table.append([str(irq_no), ":doc:`{} <{}>`".format(module_name.upper(), module_name)])
|
||||
|
||||
def print_region(self, stream, base_dir, note_pulses=False):
|
||||
title = "Interrupt Controller"
|
||||
print(title, file=stream)
|
||||
print("=" * len(title), file=stream)
|
||||
print("", file=stream)
|
||||
|
||||
print_rst(stream,
|
||||
"""
|
||||
This device has an ``EventManager``-based interrupt
|
||||
system. Individual modules generate `events` which are wired
|
||||
into a central interrupt controller.
|
||||
|
||||
When an interrupt occurs, you should look the interrupt number up
|
||||
in the CPU-specific interrupt table and then call the relevant
|
||||
module.
|
||||
""")
|
||||
|
||||
section_title = "Assigned Interrupts"
|
||||
print("{}".format(section_title), file=stream)
|
||||
print("-" * len(section_title), file=stream)
|
||||
print("", file=stream)
|
||||
|
||||
print("The following interrupts are assigned on this system:", file=stream)
|
||||
print_table(self.irq_table, stream)
|
||||
|
||||
|
169
litex/soc/doc/rst.py
Normal file
169
litex/soc/doc/rst.py
Normal file
|
@ -0,0 +1,169 @@
|
|||
# This file is Copyright (c) 2020 Sean Cross <sean@xobs.io>
|
||||
# License: BSD
|
||||
|
||||
import textwrap
|
||||
|
||||
def make_table(t):
|
||||
"""Make a reStructured Text Table
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
A string containing a reStructured Text table.
|
||||
"""
|
||||
column_widths = []
|
||||
|
||||
table = "\n"
|
||||
if len(t) <= 0:
|
||||
return table
|
||||
|
||||
# Figure out how wide to make each column
|
||||
for col in t[0]:
|
||||
column_widths.append(0)
|
||||
|
||||
for row in t:
|
||||
for i, column in enumerate(row):
|
||||
column_widths[i] = max(column_widths[i], len(column))
|
||||
|
||||
# Print out header
|
||||
header = t.pop(0)
|
||||
table += "+"
|
||||
for i, column in enumerate(header):
|
||||
table += "-" + "-"*column_widths[i]
|
||||
table += "-+"
|
||||
table += "\n"
|
||||
|
||||
table += "|"
|
||||
for i, column in enumerate(header):
|
||||
table += " " + column.ljust(column_widths[i]) + " |"
|
||||
table += "\n"
|
||||
|
||||
table += "+"
|
||||
for i, column in enumerate(header):
|
||||
table += "=" + "="*column_widths[i]
|
||||
table += "=+"
|
||||
table += "\n"
|
||||
|
||||
for row in t:
|
||||
table += "|"
|
||||
for i, column in enumerate(row):
|
||||
table += " " + column.ljust(column_widths[i]) + " |"
|
||||
table += "\n"
|
||||
|
||||
table += "+"
|
||||
for i, column in enumerate(row):
|
||||
table += "-" + "-"*column_widths[i]
|
||||
table += "-+"
|
||||
table += "\n"
|
||||
table += "\n"
|
||||
|
||||
return table
|
||||
|
||||
def print_table(table, stream):
|
||||
"""Print a reStructured Text table
|
||||
|
||||
Arguments
|
||||
---------
|
||||
|
||||
table (:obj:`list` of :obj:`list`s): A list of rows in the table.
|
||||
Each row has several columns. The first row is the table header.
|
||||
|
||||
stream (:obj:`io`): Destination output file.
|
||||
"""
|
||||
column_widths = []
|
||||
|
||||
print("", file=stream)
|
||||
if len(table) <= 0:
|
||||
return
|
||||
|
||||
# Figure out how wide to make each column
|
||||
for col in table[0]:
|
||||
column_widths.append(0)
|
||||
|
||||
for row in table:
|
||||
for i, column in enumerate(row):
|
||||
column_widths[i] = max(column_widths[i], len(column))
|
||||
|
||||
# Print out header
|
||||
header = table.pop(0)
|
||||
print("+", file=stream, end="")
|
||||
for i, column in enumerate(header):
|
||||
print("-" + "-"*column_widths[i], file=stream, end="")
|
||||
print("-+", file=stream, end="")
|
||||
print("", file=stream)
|
||||
|
||||
print("|", file=stream, end="")
|
||||
for i, column in enumerate(header):
|
||||
print(" " + column.ljust(column_widths[i]) + " |", file=stream, end="")
|
||||
print("", file=stream)
|
||||
|
||||
print("+", file=stream, end="")
|
||||
for i, column in enumerate(header):
|
||||
print("=" + "="*column_widths[i], file=stream, end="")
|
||||
print("=+", file=stream, end="")
|
||||
print("", file=stream)
|
||||
|
||||
for row in table:
|
||||
print("|", file=stream, end="")
|
||||
for i, column in enumerate(row):
|
||||
print(" " + column.ljust(column_widths[i]) + " |", file=stream, end="")
|
||||
print("", file=stream)
|
||||
|
||||
print("+", file=stream, end="")
|
||||
for i, column in enumerate(row):
|
||||
print("-" + "-"*column_widths[i], file=stream, end="")
|
||||
print("-+", file=stream, end="")
|
||||
print("", file=stream)
|
||||
print("", file=stream)
|
||||
|
||||
def pad_first_line_if_necessary(s):
|
||||
if not isinstance(s, str):
|
||||
return s
|
||||
lines = s.split("\n")
|
||||
|
||||
# If there aren't at least two lines, don't do anything
|
||||
if len(lines) < 2:
|
||||
return s
|
||||
|
||||
# If the first line is blank, don't do anything
|
||||
if lines[0].strip() == "":
|
||||
return s
|
||||
|
||||
# If the pading on line 1 is greater than line 2, pad line 1
|
||||
# and return the result
|
||||
line_0_padding = len(lines[0]) - len(lines[0].lstrip(' '))
|
||||
line_1_padding = len(lines[1]) - len(lines[1].lstrip(' '))
|
||||
if (line_1_padding > 0) and (line_1_padding > line_0_padding):
|
||||
lines[0] = " " * (line_1_padding - line_0_padding) + lines[0]
|
||||
return "\n".join(lines)
|
||||
return s
|
||||
|
||||
def reflow(s, width=80):
|
||||
"""Reflow the jagged text that gets generated as part
|
||||
of this Python comment.
|
||||
|
||||
In this comment, the first line would be indented relative
|
||||
to the rest. Additionally, the width of this block would
|
||||
be limited to the original text width.
|
||||
|
||||
To reflow text, break it along \n\n, then dedent and reflow
|
||||
each line individually.
|
||||
|
||||
Finally, append it to a new string to be returned.
|
||||
"""
|
||||
if not isinstance(s, str):
|
||||
return s
|
||||
out = []
|
||||
s = pad_first_line_if_necessary(s)
|
||||
for piece in textwrap.dedent(s).split("\n\n"):
|
||||
trimmed_piece = textwrap.fill(textwrap.dedent(piece).strip(), width=width)
|
||||
out.append(trimmed_piece)
|
||||
return "\n\n".join(out)
|
||||
|
||||
def _reflow(s, width=80):
|
||||
return reflow(s, width)
|
||||
|
||||
def print_rst(stream, s, reflow=True):
|
||||
"""Print a given string to the given stream. Ensure it is reflowed."""
|
||||
print(_reflow(s), file=stream)
|
||||
print("", file=stream)
|
3
litex/soc/doc/static/WaveDrom.js
vendored
Normal file
3
litex/soc/doc/static/WaveDrom.js
vendored
Normal file
File diff suppressed because one or more lines are too long
3
litex/soc/doc/static/default.js
vendored
Normal file
3
litex/soc/doc/static/default.js
vendored
Normal file
File diff suppressed because one or more lines are too long
|
@ -23,6 +23,11 @@ from litex.soc.interconnect.csr import CSRStatus
|
|||
|
||||
from litex.build.tools import generated_banner
|
||||
|
||||
from litex.soc.doc.rst import reflow
|
||||
from litex.soc.doc.module import gather_submodules, ModuleNotDocumented, DocumentedModule, DocumentedInterrupts
|
||||
from litex.soc.doc.csr import DocumentedCSRRegion
|
||||
from litex.soc.interconnect.csr import _CompoundCSR
|
||||
|
||||
# CPU files ----------------------------------------------------------------------------------------
|
||||
|
||||
def get_cpu_mak(cpu, compile_software):
|
||||
|
@ -273,3 +278,150 @@ def get_csr_csv(csr_regions={}, constants={}, mem_regions={}):
|
|||
d["memories"][name]["type"],
|
||||
)
|
||||
return r
|
||||
|
||||
# SVD Export --------------------------------------------------------------------------------------
|
||||
|
||||
def get_svd(soc, vendor="litex", name="soc", description=None):
|
||||
def sub_csr_bit_range(busword, csr, offset):
|
||||
nwords = (csr.size + busword - 1)//busword
|
||||
i = nwords - offset - 1
|
||||
nbits = min(csr.size - i*busword, busword) - 1
|
||||
name = (csr.name + str(i) if nwords > 1 else csr.name).upper()
|
||||
origin = i*busword
|
||||
return (origin, nbits, name)
|
||||
|
||||
def print_svd_register(csr, csr_address, description, length, svd):
|
||||
svd.append(' <register>')
|
||||
svd.append(' <name>{}</name>'.format(csr.short_numbered_name))
|
||||
if description is not None:
|
||||
svd.append(' <description><![CDATA[{}]]></description>'.format(description))
|
||||
svd.append(' <addressOffset>0x{:04x}</addressOffset>'.format(csr_address))
|
||||
svd.append(' <resetValue>0x{:02x}</resetValue>'.format(csr.reset_value))
|
||||
svd.append(' <size>{}</size>'.format(length))
|
||||
svd.append(' <access>{}</access>'.format(csr.access))
|
||||
csr_address = csr_address + 4
|
||||
svd.append(' <fields>')
|
||||
if hasattr(csr, "fields") and len(csr.fields) > 0:
|
||||
for field in csr.fields:
|
||||
svd.append(' <field>')
|
||||
svd.append(' <name>{}</name>'.format(field.name))
|
||||
svd.append(' <msb>{}</msb>'.format(field.offset +
|
||||
field.size - 1))
|
||||
svd.append(' <bitRange>[{}:{}]</bitRange>'.format(
|
||||
field.offset + field.size - 1, field.offset))
|
||||
svd.append(' <lsb>{}</lsb>'.format(field.offset))
|
||||
svd.append(' <description><![CDATA[{}]]></description>'.format(
|
||||
reflow(field.description)))
|
||||
svd.append(' </field>')
|
||||
else:
|
||||
field_size = csr.size
|
||||
field_name = csr.short_name.lower()
|
||||
# Strip off "ev_" from eventmanager fields
|
||||
if field_name == "ev_enable":
|
||||
field_name = "enable"
|
||||
elif field_name == "ev_pending":
|
||||
field_name = "pending"
|
||||
elif field_name == "ev_status":
|
||||
field_name = "status"
|
||||
svd.append(' <field>')
|
||||
svd.append(' <name>{}</name>'.format(field_name))
|
||||
svd.append(' <msb>{}</msb>'.format(field_size - 1))
|
||||
svd.append(' <bitRange>[{}:{}]</bitRange>'.format(field_size - 1, 0))
|
||||
svd.append(' <lsb>{}</lsb>'.format(0))
|
||||
svd.append(' </field>')
|
||||
svd.append(' </fields>')
|
||||
svd.append(' </register>')
|
||||
|
||||
interrupts = {}
|
||||
for csr, irq in sorted(soc.soc_interrupt_map.items()):
|
||||
interrupts[csr] = irq
|
||||
|
||||
documented_regions = []
|
||||
|
||||
raw_regions = []
|
||||
if hasattr(soc, "get_csr_regions"):
|
||||
raw_regions = soc.get_csr_regions()
|
||||
else:
|
||||
for region_name, region in soc.csr_regions.items():
|
||||
raw_regions.append((region_name, region.origin,
|
||||
region.busword, region.obj))
|
||||
for csr_region in raw_regions:
|
||||
documented_regions.append(DocumentedCSRRegion(
|
||||
csr_region, csr_data_width=soc.csr_data_width))
|
||||
|
||||
svd = []
|
||||
svd.append('<?xml version="1.0" encoding="utf-8"?>')
|
||||
svd.append('')
|
||||
svd.append('<device schemaVersion="1.1" xmlns:xs="http://www.w3.org/2001/XMLSchema-instance" xs:noNamespaceSchemaLocation="CMSIS-SVD.xsd" >')
|
||||
svd.append(' <vendor>{}</vendor>'.format(vendor))
|
||||
svd.append(' <name>{}</name>'.format(name.upper()))
|
||||
if description is not None:
|
||||
svd.append(' <description><![CDATA[{}]]></description>'.format(reflow(description)))
|
||||
svd.append('')
|
||||
svd.append(' <addressUnitBits>8</addressUnitBits>')
|
||||
svd.append(' <width>32</width>')
|
||||
svd.append(' <size>32</size>')
|
||||
svd.append(' <access>read-write</access>')
|
||||
svd.append(' <resetValue>0x00000000</resetValue>')
|
||||
svd.append(' <resetMask>0xFFFFFFFF</resetMask>')
|
||||
svd.append('')
|
||||
svd.append(' <peripherals>')
|
||||
|
||||
for region in documented_regions:
|
||||
csr_address = 0
|
||||
svd.append(' <peripheral>')
|
||||
svd.append(' <name>{}</name>'.format(region.name.upper()))
|
||||
svd.append(' <baseAddress>0x{:08X}</baseAddress>'.format(region.origin))
|
||||
svd.append(' <groupName>{}</groupName>'.format(region.name.upper()))
|
||||
if len(region.sections) > 0:
|
||||
svd.append(' <description><![CDATA[{}]]></description>'.format(
|
||||
reflow(region.sections[0].body())))
|
||||
svd.append(' <registers>')
|
||||
for csr in region.csrs:
|
||||
description = None
|
||||
if hasattr(csr, "description"):
|
||||
description = csr.description
|
||||
if isinstance(csr, _CompoundCSR) and len(csr.simple_csrs) > 1:
|
||||
is_first = True
|
||||
for i in range(len(csr.simple_csrs)):
|
||||
(start, length, name) = sub_csr_bit_range(
|
||||
region.busword, csr, i)
|
||||
if length > 0:
|
||||
bits_str = "Bits {}-{} of `{}`.".format(
|
||||
start, start+length, csr.name)
|
||||
else:
|
||||
bits_str = "Bit {} of `{}`.".format(
|
||||
start, csr.name)
|
||||
if is_first:
|
||||
if description is not None:
|
||||
print_svd_register(
|
||||
csr.simple_csrs[i], csr_address, bits_str + " " + description, length, svd)
|
||||
else:
|
||||
print_svd_register(
|
||||
csr.simple_csrs[i], csr_address, bits_str, length, svd)
|
||||
is_first = False
|
||||
else:
|
||||
print_svd_register(
|
||||
csr.simple_csrs[i], csr_address, bits_str, length, svd)
|
||||
csr_address = csr_address + 4
|
||||
else:
|
||||
length = ((csr.size + region.busword - 1) //
|
||||
region.busword) * region.busword
|
||||
print_svd_register(
|
||||
csr, csr_address, description, length, svd)
|
||||
csr_address = csr_address + 4
|
||||
svd.append(' </registers>')
|
||||
svd.append(' <addressBlock>')
|
||||
svd.append(' <offset>0</offset>')
|
||||
svd.append(' <size>0x{:x}</size>'.format(csr_address))
|
||||
svd.append(' <usage>registers</usage>')
|
||||
svd.append(' </addressBlock>')
|
||||
if region.name in interrupts:
|
||||
svd.append(' <interrupt>')
|
||||
svd.append(' <name>{}</name>'.format(region.name))
|
||||
svd.append(' <value>{}</value>'.format(interrupts[region.name]))
|
||||
svd.append(' </interrupt>')
|
||||
svd.append(' </peripheral>')
|
||||
svd.append(' </peripherals>')
|
||||
svd.append('</device>')
|
||||
return "\n".join(svd)
|
||||
|
|
Loading…
Reference in a new issue