f4pga/utils: all python scripts moved from xc7 arch-defs tarballs
Signed-off-by: Unai Martinez-Corral <umartinezcorral@antmicro.com>
This commit is contained in:
parent
de9ed1f3db
commit
873ea7e61b
|
@ -91,7 +91,7 @@ xc7a50t: &xc7
|
|||
params:
|
||||
stage_name: ioplace
|
||||
interpreter: '${python3}'
|
||||
script: '${shareDir}/scripts/prjxray_create_ioplace.py'
|
||||
script: ['-m', 'f4pga.utils.xc7.create_ioplace']
|
||||
outputs:
|
||||
io_place:
|
||||
mode: stdout
|
||||
|
@ -107,7 +107,7 @@ xc7a50t: &xc7
|
|||
params:
|
||||
stage_name: place_constraints
|
||||
interpreter: '${python3}'
|
||||
script: '${shareDir}/scripts/prjxray_create_place_constraints.py'
|
||||
script: ['-m', 'f4pga.utils.xc7.create_place_constraints']
|
||||
outputs:
|
||||
place_constraints:
|
||||
mode: stdout
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2019-2022 F4PGA Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Top level keywords defining the begin of a cell definition.
|
||||
top_level = [
|
||||
"model",
|
||||
"inputs",
|
||||
"outputs",
|
||||
"names",
|
||||
"latch",
|
||||
"subckt",
|
||||
]
|
||||
|
||||
# Keywords defining cell attributes / parameters. Those can be specified for
|
||||
# each cell multiple times. Parameter names and values are stored in a dict
|
||||
# under the parsed blif data.
|
||||
#
|
||||
# For example: the construct ".param MODE SYNC" will add to the dict under
|
||||
# the key "param" entry "MODE":"SYNC".
|
||||
#
|
||||
sub_level = [
|
||||
"attr",
|
||||
"param",
|
||||
]
|
||||
|
||||
|
||||
def parse_blif(f):
|
||||
current = None
|
||||
|
||||
data = {}
|
||||
|
||||
def add(d):
|
||||
if d["type"] not in data:
|
||||
data[d["type"]] = []
|
||||
data[d["type"]].append(d)
|
||||
|
||||
current = None
|
||||
for oline in f:
|
||||
line = oline
|
||||
if "#" in line:
|
||||
line = line[: line.find("#")]
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
if line.startswith("."):
|
||||
args = line.split(" ", maxsplit=1)
|
||||
if len(args) < 2:
|
||||
args.append("")
|
||||
|
||||
ctype = args.pop(0)
|
||||
assert ctype.startswith("."), ctype
|
||||
ctype = ctype[1:]
|
||||
|
||||
if ctype in top_level:
|
||||
if current:
|
||||
add(current)
|
||||
current = {
|
||||
"type": ctype,
|
||||
"args": args[-1].split(),
|
||||
"data": [],
|
||||
}
|
||||
elif ctype in sub_level:
|
||||
if ctype not in current:
|
||||
current[ctype] = {}
|
||||
key, value = args[-1].split(maxsplit=1)
|
||||
current[ctype][key] = value
|
||||
else:
|
||||
current[ctype] = args[-1].split()
|
||||
continue
|
||||
current["data"].append(line.strip().split())
|
||||
|
||||
if current:
|
||||
add(current)
|
||||
|
||||
assert len(data["inputs"]) == 1
|
||||
data["inputs"] = data["inputs"][0]
|
||||
assert len(data["outputs"]) == 1
|
||||
data["outputs"] = data["outputs"][0]
|
||||
return data
|
|
@ -0,0 +1,63 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2019-2022 F4PGA Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
Supported PCF commands:
|
||||
* set_io <net> <pad> - constrain a given <net> to a given physical <pad> in eFPGA pinout.
|
||||
* set_clk <pin> <net> - constrain a given global clock <pin> to a given <net>
|
||||
Every tile where <net> is present will be constrained to use a given global clock.
|
||||
"""
|
||||
|
||||
from collections import namedtuple
|
||||
import re
|
||||
|
||||
PcfIoConstraint = namedtuple("PcfIoConstraint", "net pad line_str line_num")
|
||||
PcfClkConstraint = namedtuple("PcfClkConstraint", "pin net")
|
||||
|
||||
|
||||
def parse_simple_pcf(f):
|
||||
"""Parse a simple PCF file object and yield PcfIoConstraint objects."""
|
||||
for line_number, line in enumerate(f):
|
||||
line_number += 1
|
||||
|
||||
# Remove comments.
|
||||
args = re.sub(r"#.*", "", line.strip()).split()
|
||||
|
||||
if not args:
|
||||
continue
|
||||
|
||||
# Ignore arguments.
|
||||
args = [arg for arg in args if arg[0] != "-"]
|
||||
assert len(args) == 3, args
|
||||
|
||||
if args[0] == "set_io":
|
||||
|
||||
yield PcfIoConstraint(
|
||||
net=args[1],
|
||||
pad=args[2],
|
||||
line_str=line.strip(),
|
||||
line_num=line_number,
|
||||
)
|
||||
|
||||
if args[0] == "set_clk":
|
||||
|
||||
yield PcfClkConstraint(
|
||||
pin=args[1],
|
||||
net=args[2],
|
||||
)
|
|
@ -0,0 +1,264 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2019-2022 F4PGA Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import print_function
|
||||
from collections import OrderedDict, namedtuple
|
||||
import itertools
|
||||
import re
|
||||
import lxml.etree as ET
|
||||
|
||||
from f4pga.utils.eblif import parse_blif
|
||||
|
||||
IoConstraint = namedtuple("IoConstraint", "name x y z comment")
|
||||
|
||||
HEADER_TEMPLATE = """\
|
||||
#{name:<{nl}} x y z pcf_line
|
||||
#{s:-^{nl}} -- -- - ----"""
|
||||
|
||||
CONSTRAINT_TEMPLATE = "{name:<{nl}} {x: 3} {y: 3} {z: 2} # {comment}"
|
||||
INOUT_REGEX = re.compile(r"^(.+)(_\$inp|_\$out)(.*)$")
|
||||
NETNAME_REGEX = re.compile(r"(.+?)(\[[0-9]+\]$|$)")
|
||||
|
||||
|
||||
class IoPlace(object):
|
||||
def __init__(self):
|
||||
self.constraints = OrderedDict()
|
||||
self.inputs = set()
|
||||
self.outputs = set()
|
||||
self.net_to_block = None
|
||||
self.net_map = {}
|
||||
self.block_to_inst = {}
|
||||
self.inout_nets = set()
|
||||
self.net_to_pad = set()
|
||||
self.net_file_io = set()
|
||||
|
||||
def read_io_loc_pairs(self, blif):
|
||||
"""
|
||||
Read IO_LOC_PAIRS parameters from eblif carrying the information
|
||||
which package pin a specified top port is constrained, e.g. O_LOC_PAIRS = "portA:D1"
|
||||
In case of differential inputs/outputs there are two pairs of the parameter,
|
||||
i.e. IO_LOC_PAIRS = "portA_p:D2,portA_n:D4"
|
||||
"""
|
||||
if "subckt" not in blif:
|
||||
return
|
||||
for attr in blif["subckt"]:
|
||||
if "param" not in attr:
|
||||
continue
|
||||
if "IO_LOC_PAIRS" in attr["param"]:
|
||||
locs = attr["param"]["IO_LOC_PAIRS"][1:-1].split(",")
|
||||
if "NONE" in locs:
|
||||
continue
|
||||
for loc in locs:
|
||||
net, pad = loc.split(":")
|
||||
self.net_to_pad.add((net, pad))
|
||||
|
||||
def read_io_list_from_eblif(self, eblif_file):
|
||||
blif = parse_blif(eblif_file)
|
||||
|
||||
self.inputs = set(blif["inputs"]["args"])
|
||||
self.outputs = set(blif["outputs"]["args"])
|
||||
|
||||
# Build a net name map that maps products of an inout port split into
|
||||
# their formet name.
|
||||
#
|
||||
# For example, an inout port 'A' is split into 'A_$inp' and 'A_$out',
|
||||
# port B[2] into 'B_$inp[2]' and 'B_$out[2]'.
|
||||
self.net_map = {}
|
||||
self.inout_nets = set()
|
||||
for net in itertools.chain(self.inputs, self.outputs):
|
||||
match = INOUT_REGEX.match(net)
|
||||
if match:
|
||||
alias = match.group(1) + match.group(3)
|
||||
self.inout_nets.add(alias)
|
||||
self.net_map[net] = alias
|
||||
else:
|
||||
self.net_map[net] = net
|
||||
self.read_io_loc_pairs(blif)
|
||||
|
||||
def load_block_names_from_net_file(self, net_file):
|
||||
"""
|
||||
.place files expect top-level block (cluster) names, not net names, so
|
||||
build a mapping from net names to block names from the .net file.
|
||||
"""
|
||||
net_xml = ET.parse(net_file)
|
||||
net_root = net_xml.getroot()
|
||||
self.net_to_block = {}
|
||||
|
||||
for block in net_root.xpath("//block"):
|
||||
instance = block.attrib["instance"]
|
||||
if instance != "inpad[0]" and instance != "outpad[0]":
|
||||
continue
|
||||
|
||||
top_block = block.getparent()
|
||||
assert top_block is not None
|
||||
while top_block.getparent() is not net_root:
|
||||
assert top_block is not None
|
||||
top_block = top_block.getparent()
|
||||
self.net_to_block[block.get("name")] = top_block.get("name")
|
||||
|
||||
# Loop over all top-level blocks. Store block name to its instance
|
||||
# correspondences.
|
||||
for block_xml in net_root.findall("block"):
|
||||
name = block_xml.attrib["name"]
|
||||
inst = block_xml.attrib["instance"]
|
||||
|
||||
assert name not in self.block_to_inst, block_xml.attrib
|
||||
self.block_to_inst[name] = inst
|
||||
|
||||
def load_net_file_ios(self, net_file):
|
||||
"""
|
||||
Loads input and outputs net names from the netlist file.
|
||||
"""
|
||||
|
||||
def get_ios(net_root, io_types):
|
||||
"Get the top level io signals from the netlist XML root."
|
||||
for io_type in io_types:
|
||||
io = net_root.xpath("/block/{}/text ()".format(io_type))
|
||||
|
||||
if len(io) == 1:
|
||||
yield io[0]
|
||||
|
||||
net_xml = ET.parse(net_file)
|
||||
net_root = net_xml.getroot()
|
||||
|
||||
for io_line in get_ios(net_root, ["inputs", "outputs"]):
|
||||
io_list = io_line.split(" ")
|
||||
for io in io_list:
|
||||
self.net_file_io.add(io.replace("out:", ""))
|
||||
|
||||
def get_top_level_block_instance_for_net(self, net_name):
|
||||
"""
|
||||
Returns a name of the top-level block instance for the given net
|
||||
name.
|
||||
"""
|
||||
assert self.is_net(net_name)
|
||||
|
||||
# VPR prefixes output constraints with "out:"
|
||||
if net_name in self.outputs:
|
||||
net_name = "out:" + net_name
|
||||
|
||||
# This is an inout net
|
||||
if net_name in self.inout_nets:
|
||||
block_names = set()
|
||||
|
||||
for prefix, suffix in zip(["", "out:"], ["_$inp", "_$out"]):
|
||||
match = NETNAME_REGEX.match(net_name)
|
||||
name = prefix + match.group(1) + suffix + match.group(2)
|
||||
block_names.add(self.net_to_block[name])
|
||||
|
||||
# Both parts of the net should point to the same block
|
||||
assert len(block_names) == 1, (net_name, block_names)
|
||||
return self.block_to_inst[list(block_names)[0]]
|
||||
|
||||
# A regular net
|
||||
else:
|
||||
if net_name in self.net_to_block:
|
||||
block_name = self.net_to_block[net_name]
|
||||
return self.block_to_inst[block_name]
|
||||
else:
|
||||
return None
|
||||
|
||||
def constrain_net(self, net_name, loc, comment=""):
|
||||
assert len(loc) == 3
|
||||
assert net_name not in self.constraints
|
||||
|
||||
assert self.is_net(net_name), "net {} not in eblif".format(net_name)
|
||||
|
||||
# VPR prefixes output constraints with "out:"
|
||||
if net_name in self.outputs:
|
||||
net_name = "out:" + net_name
|
||||
|
||||
# This is an inout net
|
||||
if net_name in self.inout_nets:
|
||||
for prefix, suffix in zip(["", "out:"], ["_$inp", "_$out"]):
|
||||
|
||||
match = NETNAME_REGEX.match(net_name)
|
||||
name = prefix + match.group(1) + suffix + match.group(2)
|
||||
|
||||
self.constraints[name] = IoConstraint(
|
||||
name=name,
|
||||
x=loc[0],
|
||||
y=loc[1],
|
||||
z=loc[2],
|
||||
comment=comment,
|
||||
)
|
||||
|
||||
# A regular net
|
||||
else:
|
||||
self.constraints[net_name] = IoConstraint(
|
||||
name=net_name,
|
||||
x=loc[0],
|
||||
y=loc[1],
|
||||
z=loc[2],
|
||||
comment=comment,
|
||||
)
|
||||
|
||||
def output_io_place(self, f):
|
||||
max_name_length = max(len(c.name) for c in self.constraints.values())
|
||||
print(HEADER_TEMPLATE.format(name="Block Name", nl=max_name_length, s=""), file=f)
|
||||
|
||||
constrained_blocks = {}
|
||||
|
||||
for vpr_net, constraint in self.constraints.items():
|
||||
name = constraint.name
|
||||
name = self.net_to_block.get(name) if self.net_to_block else name
|
||||
|
||||
# This block is already constrained, check if there is no
|
||||
# conflict there.
|
||||
if name in constrained_blocks:
|
||||
existing = constrained_blocks[name]
|
||||
|
||||
if existing.x != constraint.x or existing.y != constraint.y or existing.z != constraint.z:
|
||||
|
||||
print("Error: block '{}' has multiple conflicting constraints!".format(name))
|
||||
print("", constrained_blocks[name])
|
||||
print("", constraint)
|
||||
exit(-1)
|
||||
|
||||
# Don't write the second constraing
|
||||
continue
|
||||
|
||||
# omit if no corresponding block name for the net
|
||||
if name is not None:
|
||||
print(
|
||||
CONSTRAINT_TEMPLATE.format(
|
||||
name=name,
|
||||
nl=max_name_length,
|
||||
x=constraint.x,
|
||||
y=constraint.y,
|
||||
z=constraint.z,
|
||||
comment=constraint.comment,
|
||||
),
|
||||
file=f,
|
||||
)
|
||||
|
||||
# Add to constrained block list
|
||||
constrained_blocks[name] = constraint
|
||||
|
||||
def is_net(self, net):
|
||||
return net in self.net_map.values()
|
||||
|
||||
def is_net_packed(self, net):
|
||||
return net in self.net_file_io
|
||||
|
||||
def get_nets(self):
|
||||
for net in self.inputs:
|
||||
yield self.net_map[net]
|
||||
for net in self.outputs:
|
||||
yield self.net_map[net]
|
|
@ -0,0 +1,175 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2019-2022 F4PGA Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
||||
"""
|
||||
Convert a PCF file into a VPR io.place file.
|
||||
"""
|
||||
|
||||
|
||||
from argparse import ArgumentParser, FileType
|
||||
from pathlib import Path
|
||||
from csv import DictReader as csv_DictReader
|
||||
from sys import stdout, stderr, exit as sys_exit
|
||||
from json import dump as json_dump, load as json_load
|
||||
|
||||
from f4pga.utils.vpr_io_place import IoPlace
|
||||
from f4pga.utils.pcf import parse_simple_pcf
|
||||
|
||||
|
||||
def p_main(blif, map, net, pcf=None, output=stdout, iostandard_defs_file=None, iostandard="LVCMOS33", drive=12):
|
||||
io_place = IoPlace()
|
||||
io_place.read_io_list_from_eblif(blif)
|
||||
io_place.load_block_names_from_net_file(net)
|
||||
|
||||
# Map of pad names to VPR locations.
|
||||
pad_map = {}
|
||||
|
||||
for pin_map_entry in csv_DictReader(map):
|
||||
pad_map[pin_map_entry["name"]] = (
|
||||
(
|
||||
int(pin_map_entry["x"]),
|
||||
int(pin_map_entry["y"]),
|
||||
int(pin_map_entry["z"]),
|
||||
),
|
||||
pin_map_entry["is_output"],
|
||||
pin_map_entry["iob"],
|
||||
pin_map_entry["real_io_assoc"],
|
||||
)
|
||||
|
||||
iostandard_defs = {}
|
||||
|
||||
# Load iostandard constraints. This is a temporary workaround that allows
|
||||
# to pass them into fasm2bels. As soon as there is support for XDC this
|
||||
# will not be needed anymore.
|
||||
# If there is a JSON file with the same name as the PCF file then it is
|
||||
# loaded and used as iostandard constraint source NOT for the design but
|
||||
# to be used in fasm2bels.
|
||||
iostandard_constraints = {}
|
||||
|
||||
if pcf is not None:
|
||||
fname = Path(pcf.name.replace(".pcf", ".json"))
|
||||
if fname.is_file():
|
||||
with fname.open("r") as fp:
|
||||
iostandard_constraints = json_load(fp)
|
||||
net_to_pad = io_place.net_to_pad
|
||||
if pcf is not None:
|
||||
net_to_pad |= set((constr.net, constr.pad) for constr in parse_simple_pcf(pcf))
|
||||
# Check for conflicting pad constraints
|
||||
net_to_pad_map = dict()
|
||||
for (net, pad) in net_to_pad:
|
||||
if net not in net_to_pad_map:
|
||||
net_to_pad_map[net] = pad
|
||||
elif pad != net_to_pad_map[net]:
|
||||
print(
|
||||
f"ERROR: Conflicting pad constraints for net {net}:\n{pad}\n{net_to_pad_map[net]}",
|
||||
file=stderr,
|
||||
)
|
||||
sys_exit(1)
|
||||
|
||||
# Constrain nets
|
||||
for net, pad in net_to_pad:
|
||||
if not io_place.is_net(net):
|
||||
nets = "\n".join(io_place.get_nets())
|
||||
print(
|
||||
f"ERROR: Constrained net {net} is not in available netlist:\n{nets}",
|
||||
file=stderr,
|
||||
)
|
||||
sys_exit(1)
|
||||
|
||||
if pad not in pad_map:
|
||||
pads = "\n".join(sorted(pad_map.keys()))
|
||||
print(
|
||||
f"ERROR: Constrained pad {pad} is not in available pad map:\n{pads}",
|
||||
file=stderr,
|
||||
)
|
||||
sys_exit(1)
|
||||
|
||||
loc, is_output, iob, real_io_assoc = pad_map[pad]
|
||||
|
||||
io_place.constrain_net(net_name=net, loc=loc, comment="set_property LOC {} [get_ports {{{}}}]".format(pad, net))
|
||||
if real_io_assoc == "True":
|
||||
iostandard_defs[iob] = (
|
||||
iostandard_constraints[pad]
|
||||
if pad in iostandard_constraints
|
||||
else ({"DRIVE": drive, "IOSTANDARD": iostandard} if is_output else {"IOSTANDARD": iostandard})
|
||||
)
|
||||
|
||||
io_place.output_io_place(output)
|
||||
|
||||
# Write iostandard definitions
|
||||
if iostandard_defs_file is not None:
|
||||
with Path(iostandard_defs_file).open("w") as f:
|
||||
json_dump(iostandard_defs, f, indent=2)
|
||||
|
||||
|
||||
def main(
|
||||
blif,
|
||||
map,
|
||||
net,
|
||||
pcf=None,
|
||||
output=None,
|
||||
iostandard_defs_file=None,
|
||||
iostandard="LVCMOS33",
|
||||
drive=12,
|
||||
):
|
||||
p_main(
|
||||
blif=Path(blif).open("r"),
|
||||
map=Path(map).open("r"),
|
||||
net=Path(net).open("r"),
|
||||
pcf=None if pcf is None else Path(pcf).open("r"),
|
||||
output=stdout if output is None else Path(output).open("w"),
|
||||
iostandard_defs_file=iostandard_defs_file,
|
||||
iostandard=iostandard,
|
||||
drive=drive,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = ArgumentParser(description="Convert a PCF file into a VPR io.place file.")
|
||||
parser.add_argument("--pcf", "-p", "-P", type=FileType("r"), required=False, help="PCF input file")
|
||||
parser.add_argument("--blif", "-b", type=FileType("r"), required=True, help="BLIF / eBLIF file")
|
||||
parser.add_argument("--map", "-m", "-M", type=FileType("r"), required=True, help="Pin map CSV file")
|
||||
parser.add_argument("--output", "-o", "-O", type=FileType("w"), default=stdout, help="The output io.place file")
|
||||
parser.add_argument("--iostandard_defs", help="(optional) Output IOSTANDARD def file")
|
||||
parser.add_argument(
|
||||
"--iostandard",
|
||||
default="LVCMOS33",
|
||||
help="Default IOSTANDARD to use for pins",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--drive",
|
||||
type=int,
|
||||
default=12,
|
||||
help="Default drive to use for pins",
|
||||
)
|
||||
parser.add_argument("--net", "-n", type=FileType("r"), required=True, help="top.net file")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
p_main(
|
||||
blif=args.blif,
|
||||
map=args.map,
|
||||
net=args.net,
|
||||
pcf=args.pcf,
|
||||
output=args.output,
|
||||
iostandard_defs_file=args.iostandard_defs,
|
||||
iostandard=args.iostandard,
|
||||
drive=args.drive,
|
||||
)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,609 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2019-2022 F4PGA Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
Script for addressing CARRY4 output congestion in elaborated netlists.
|
||||
|
||||
Usage:
|
||||
|
||||
python3 fix_carry.py < input-netlist-json > output-netlist-json
|
||||
|
||||
Description:
|
||||
|
||||
In the 7-series SLICEL (and SLICEM) sites, there can be output congestion
|
||||
if both the CO and O of the CARRY4 are used. This congestion can be
|
||||
avoided by using a transparent/open latch or register on the output of the
|
||||
CARRY4.
|
||||
|
||||
VPR does not currently support either of those options, so for now, if
|
||||
both CO and O are used, the CO output is converted into a LUT equation to
|
||||
recompute the CO output from O, DI and S. See carry_map.v and
|
||||
clean_carry_map.v for details.
|
||||
|
||||
If VPR could emit the transparent/open latch on output congestion, this
|
||||
would no longer be required. The major problem with transparent latch
|
||||
support is that it requires constants to be routed to the G/GE/CLR/PRE
|
||||
ports, which VPR cannot express as a result of packing.
|
||||
|
||||
This script identifies CARRY4 chains in the netlist, identifies if there
|
||||
is output congestion on the O and CO ports, and marks the congestion by
|
||||
changing CARRY_CO_DIRECT (e.g. directly use the CO port) to CARRY_CO_LUT
|
||||
(compute the CO value using a LUT equation).
|
||||
|
||||
|
||||
Diagram showing one row of the 7-series CLE, focusing on O/CO congestion.
|
||||
This diagram shows that if both the O and CO outputs are needed, once must
|
||||
pass through the flip flop (xFF in the diagram).
|
||||
|
||||
CLE Row
|
||||
|
||||
+--------------------------------------------------------------------------+
|
||||
| |
|
||||
| |
|
||||
| +---+ |
|
||||
| | + |
|
||||
| | + |
|
||||
| +-------->+ O + |
|
||||
| CO CHAIN | | + |
|
||||
| | | +---------------------> xMUX
|
||||
| ^ | +---->+ CO + |
|
||||
| | | | | + |
|
||||
| | | | | + |
|
||||
| +---------+----------+ | | | + |
|
||||
| | | | | +---+ |
|
||||
| | CARRY ROW | | | |
|
||||
| +--->+ S O +--------+ | xOUTMUX |
|
||||
| | | | | |
|
||||
| | | + | |
|
||||
| +--->+ DI CO +-------+o+--+ |
|
||||
| | CI CHAIN | + | |
|
||||
| | | | | |
|
||||
| +---------+----------+ | | xFFMUX |
|
||||
| ^ | | |
|
||||
| | | | +---+ |
|
||||
| + | | | + |
|
||||
| | + | + +-----------+ |
|
||||
| +--+o+--->+ O + | | |
|
||||
| + | + | xFF | |
|
||||
| | | +->--D---- Q +------> xQ
|
||||
| | | + | | |
|
||||
| +---->+ CO + | | |
|
||||
| | + +-----------+ |
|
||||
| | + |
|
||||
| +---+ |
|
||||
| |
|
||||
| |
|
||||
+--------------------------------------------------------------------------+
|
||||
|
||||
|
||||
This script operates on a slightly different cell structure than a plain CARRY4.
|
||||
carry_map.v converts the CARRY4 into:
|
||||
|
||||
+------------------+ +-----------------+
|
||||
| | | |
|
||||
| CO3 +->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| DI3 | +-----------------+
|
||||
| |
|
||||
| S3 O3 |
|
||||
| |
|
||||
| DI2 | +-----------------+
|
||||
| | | |
|
||||
| S2 CO2 +->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| DI1 | +-----------------+
|
||||
| |
|
||||
| S1 O2 |
|
||||
| CARRY4 |
|
||||
| DI0 (chained) | +-----------------+
|
||||
| | | |
|
||||
| S0 CO1 +->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| CYINIT | +-----------------+
|
||||
| |
|
||||
+-----------------+ | O1 |
|
||||
| | | |
|
||||
+->+ CARRY_COUT_PLUG +->+ CI | +-----------------+
|
||||
| | | | | | |
|
||||
| +-----------------+ | CO0 +->+ CARRY_CO_DIRECT |
|
||||
| | | | |
|
||||
| | | +-----------------+
|
||||
+-------------------+ | |
|
||||
| | O0 |
|
||||
+------------------+ +-----------------+ | | |
|
||||
| | | | | +------------------+
|
||||
| CO3 +->+ CARRY_CO_DIRECT +-+
|
||||
| | | |
|
||||
| DI3 | +-----------------+
|
||||
| |
|
||||
| S3 O3 |
|
||||
| |
|
||||
| DI2 | +-----------------+
|
||||
| | | |
|
||||
| S2 CO2 +->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| DI1 | +-----------------+
|
||||
| |
|
||||
| S1 O2 |
|
||||
| CARRY4 |
|
||||
| DI0 (root) | +-----------------+
|
||||
| | | |
|
||||
| S0 CO1 +->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| CYINIT | +-----------------+
|
||||
| |
|
||||
| O1 |
|
||||
| |
|
||||
| CI | +-----------------+
|
||||
| | | |
|
||||
| CO0 +->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| | +-----------------+
|
||||
| |
|
||||
| O0 |
|
||||
| |
|
||||
+------------------+
|
||||
|
||||
Each CARRY4 spans the 4 rows of the SLICEL/SLICEM.
|
||||
Row 0 is the S0/DI0/O0/CO0 ports, row 1 is S1/DI1/O1/CO1 ports, etc.
|
||||
|
||||
So there are five cases the script has to handle:
|
||||
|
||||
- No congestion is present between O and CO ->
|
||||
|
||||
Do nothing.
|
||||
|
||||
- Congestion is present on rows 0-2 and row above is in use ->
|
||||
|
||||
Change CARRY_CO_DIRECT to CARRY_CO_LUT.
|
||||
|
||||
Routing and LUT delays are incurred in this case.
|
||||
|
||||
- Congestion is present on rows 0-2 and row above is not in use ->
|
||||
|
||||
Remap CO to O from the row above, and set S on the next row to 0 to
|
||||
ensure O outputs CI from the row below.
|
||||
|
||||
No additional delays for this change.
|
||||
|
||||
- Congestion is present on row 3 and CO3 is not connected to another CARRY ->
|
||||
|
||||
Change CARRY_CO_DIRECT to CARRY_CO_TOP_POP. This adds 1 dummy layer to
|
||||
the carry chain to output the CO.
|
||||
|
||||
No additional delays for this change.
|
||||
|
||||
- Congestion is present on row 3 and CO3 is connected directly to another
|
||||
CARRY4 ->
|
||||
|
||||
Change CARRY_CO_DIRECT to CARRY_CO_LUT *and* change the chained
|
||||
CARRY_COUT_PLUG to be directly connected to the previous CO3.
|
||||
|
||||
Routing and LUT delays are incurred in this case.
|
||||
|
||||
Diagram for this case:
|
||||
|
||||
+-------------------+ +-----------------+
|
||||
| | | |
|
||||
| CO3 +->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| DI3 | +-----------------+
|
||||
| |
|
||||
| S3 O3 |
|
||||
| |
|
||||
| DI2 | +-----------------+
|
||||
| | | |
|
||||
| S2 CO2 +->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| DI1 | +-----------------+
|
||||
| |
|
||||
| S1 O2 |
|
||||
| CARRY4 |
|
||||
| DI0 (chained) | +-----------------+
|
||||
| | | |
|
||||
| S0 CO1 +->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| CYINIT | +-----------------+
|
||||
| |
|
||||
+-----------------+ | O1 |
|
||||
| | | |
|
||||
+->+ CARRY_COUT_PLUG +--->+ CI | +-----------------+
|
||||
| | | | | | |
|
||||
| +-----------------+ | CO0 +->+ CARRY_CO_DIRECT |
|
||||
| | | | |
|
||||
+-------------------+ | +-----------------+ | | +-----------------+
|
||||
| | | | | | |
|
||||
| CO3 +--+->+ CARRY_CO_LUT +-+ | O0 |
|
||||
| | | | | | |
|
||||
| DI3 | +-----------------+ | +-------------------+
|
||||
| | |
|
||||
| S3 O3 | +------>
|
||||
| |
|
||||
| DI2 | +-----------------+
|
||||
| | | |
|
||||
| S2 CO2 +---->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| DI1 | +-----------------+
|
||||
| |
|
||||
| S1 O2 |
|
||||
| CARRY4 |
|
||||
| DI0 (root) | +-----------------+
|
||||
| | | |
|
||||
| S0 CO1 +---->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| CYINIT | +-----------------+
|
||||
| |
|
||||
| O1 |
|
||||
| |
|
||||
| CI | +-----------------+
|
||||
| | | |
|
||||
| CO0 +---->+ CARRY_CO_DIRECT |
|
||||
| | | |
|
||||
| | +-----------------+
|
||||
| |
|
||||
| O0 |
|
||||
| |
|
||||
+-------------------+
|
||||
|
||||
After this script is run, clean_carry_map.v is used to convert CARRY_CO_DIRECT
|
||||
into a direct connection, and CARRY_CO_LUT is mapped to a LUT to compute the
|
||||
carry output.
|
||||
|
||||
"""
|
||||
|
||||
import json
|
||||
from sys import stdin, stdout
|
||||
|
||||
|
||||
def find_top_module(design):
|
||||
"""
|
||||
Looks for the top-level module in the design. Returns its name. Throws
|
||||
an exception if none was found.
|
||||
"""
|
||||
|
||||
for name, module in design["modules"].items():
|
||||
attrs = module["attributes"]
|
||||
if "top" in attrs and int(attrs["top"]) == 1:
|
||||
return name
|
||||
|
||||
raise RuntimeError("No top-level module found in the design!")
|
||||
|
||||
|
||||
def find_carry4_chains(design, top_module, bit_to_cells):
|
||||
"""Identify CARRY4 carry chains starting from the root CARRY4.
|
||||
|
||||
All non-root CARRY4 cells should end up as part of a chain, otherwise
|
||||
an assertion is raised.
|
||||
|
||||
Arguments:
|
||||
design (dict) - "design" field from Yosys JSON format
|
||||
top_module (str) - Name of top module.
|
||||
bit_to_cells (dict) - Map of net bit identifier and cell information.
|
||||
Computes in "create_bit_to_cell_map".
|
||||
|
||||
Returns:
|
||||
list of list of strings - List of CARRY4 chains. Each chain is a list
|
||||
of cellnames. The cells are listed in chain order, starting from
|
||||
the root.
|
||||
|
||||
"""
|
||||
cells = design["modules"][top_module]["cells"]
|
||||
|
||||
used_carry4s = set()
|
||||
root_carry4s = []
|
||||
nonroot_carry4s = {}
|
||||
for cellname in cells:
|
||||
cell = cells[cellname]
|
||||
if cell["type"] != "CARRY4_VPR":
|
||||
continue
|
||||
connections = cell["connections"]
|
||||
|
||||
if "CIN" in connections:
|
||||
cin_connections = connections["CIN"]
|
||||
assert len(cin_connections) == 1
|
||||
|
||||
# Goto driver of CIN, should be a CARRY_COUT_PLUG.
|
||||
plug_cellname, port, bit_idx = bit_to_cells[cin_connections[0]][0]
|
||||
plug_cell = cells[plug_cellname]
|
||||
assert plug_cell["type"] == "CARRY_COUT_PLUG", plug_cellname
|
||||
assert port == "COUT"
|
||||
|
||||
plug_connections = plug_cell["connections"]
|
||||
|
||||
cin_connections = plug_connections["CIN"]
|
||||
assert len(cin_connections) == 1
|
||||
|
||||
# Goto driver of CIN, should be a CARRY_CO_DIRECT.
|
||||
direct_cellname, port, bit_idx = bit_to_cells[cin_connections[0]][0]
|
||||
direct_cell = cells[direct_cellname]
|
||||
assert direct_cell["type"] == "CARRY_CO_DIRECT", direct_cellname
|
||||
assert port == "OUT"
|
||||
|
||||
direct_connections = direct_cell["connections"]
|
||||
|
||||
co_connections = direct_connections["CO"]
|
||||
assert len(co_connections) == 1
|
||||
|
||||
nonroot_carry4s[co_connections[0]] = cellname
|
||||
else:
|
||||
used_carry4s.add(cellname)
|
||||
root_carry4s.append(cellname)
|
||||
|
||||
# Walk from each root CARRY4 to each child CARRY4 module.
|
||||
chains = []
|
||||
for cellname in root_carry4s:
|
||||
chain = [cellname]
|
||||
|
||||
while True:
|
||||
# Follow CO3 to the next CARRY4, if any.
|
||||
cell = cells[cellname]
|
||||
connections = cell["connections"]
|
||||
|
||||
co3_connections = connections.get("CO3", None)
|
||||
if co3_connections is None:
|
||||
# No next CARRY4, stop here.
|
||||
break
|
||||
|
||||
found_next_link = False
|
||||
for connection in co3_connections:
|
||||
next_cellname = nonroot_carry4s.get(connection, None)
|
||||
if next_cellname is not None:
|
||||
cellname = next_cellname
|
||||
used_carry4s.add(cellname)
|
||||
chain.append(cellname)
|
||||
found_next_link = True
|
||||
break
|
||||
|
||||
if not found_next_link:
|
||||
break
|
||||
|
||||
chains.append(chain)
|
||||
|
||||
# Make sure all non-root CARRY4's got used.
|
||||
for bit, cellname in nonroot_carry4s.items():
|
||||
assert cellname in used_carry4s, (bit, cellname)
|
||||
|
||||
return chains
|
||||
|
||||
|
||||
def create_bit_to_cell_map(design, top_module):
|
||||
"""Create map from net bit identifier to cell information.
|
||||
|
||||
Arguments:
|
||||
design (dict) - "design" field from Yosys JSON format
|
||||
top_module (str) - Name of top module.
|
||||
|
||||
Returns:
|
||||
bit_to_cells (dict) - Map of net bit identifier and cell information.
|
||||
|
||||
The map keys are the net bit identifier used to mark which net a cell port
|
||||
is connected too. The map values are a list of cell ports that are in the
|
||||
net. The first element of the list is the driver port, and the remaining
|
||||
elements are sink ports.
|
||||
|
||||
The list elements are 3-tuples with:
|
||||
cellname (str) - The name of the cell this port belongs too
|
||||
port (str) - The name of the port this element is connected too.
|
||||
bit_idx (int) - For multi bit ports, a 0-based index into the port.
|
||||
|
||||
"""
|
||||
bit_to_cells = {}
|
||||
|
||||
cells = design["modules"][top_module]["cells"]
|
||||
|
||||
for cellname in cells:
|
||||
cell = cells[cellname]
|
||||
port_directions = cell["port_directions"]
|
||||
for port, connections in cell["connections"].items():
|
||||
is_output = port_directions[port] == "output"
|
||||
for bit_idx, bit in enumerate(connections):
|
||||
|
||||
list_of_cells = bit_to_cells.get(bit, None)
|
||||
if list_of_cells is None:
|
||||
list_of_cells = [None]
|
||||
bit_to_cells[bit] = list_of_cells
|
||||
|
||||
if is_output:
|
||||
# First element of list of cells is net driver.
|
||||
assert list_of_cells[0] is None, (bit, list_of_cells[0], cellname)
|
||||
list_of_cells[0] = (cellname, port, bit_idx)
|
||||
else:
|
||||
list_of_cells.append((cellname, port, bit_idx))
|
||||
|
||||
return bit_to_cells
|
||||
|
||||
|
||||
def is_bit_used(bit_to_cells, bit):
|
||||
"""Is the net bit specified used by any sinks?"""
|
||||
list_of_cells = bit_to_cells[bit]
|
||||
return len(list_of_cells) > 1
|
||||
|
||||
|
||||
def is_bit_used_other_than_carry4_cin(design, top_module, bit, bit_to_cells):
|
||||
"""Is the net bit specified used by any sinks other than a carry chain?"""
|
||||
cells = design["modules"][top_module]["cells"]
|
||||
list_of_cells = bit_to_cells[bit]
|
||||
assert len(list_of_cells) == 2, bit
|
||||
|
||||
direct_cellname, port, _ = list_of_cells[1]
|
||||
direct_cell = cells[direct_cellname]
|
||||
assert direct_cell["type"] == "CARRY_CO_DIRECT"
|
||||
assert port == "CO"
|
||||
|
||||
# Follow to output
|
||||
connections = direct_cell["connections"]["OUT"]
|
||||
assert len(connections) == 1
|
||||
|
||||
for cellname, port, bit_idx in bit_to_cells[connections[0]][1:]:
|
||||
cell = cells[cellname]
|
||||
if cell["type"] == "CARRY_COUT_PLUG" and port == "CIN":
|
||||
continue
|
||||
else:
|
||||
return True, direct_cellname
|
||||
|
||||
return False, direct_cellname
|
||||
|
||||
|
||||
def create_bit_to_net_map(design, top_module):
|
||||
"""Create map from net bit identifier to net information.
|
||||
|
||||
Arguments:
|
||||
design (dict) - "design" field from Yosys JSON format
|
||||
top_module (str) - Name of top module.
|
||||
|
||||
Returns:
|
||||
bit_to_nets (dict) - Map of net bit identifier to net information.
|
||||
"""
|
||||
bit_to_nets = {}
|
||||
|
||||
nets = design["modules"][top_module]["netnames"]
|
||||
|
||||
for net in nets:
|
||||
for bit_idx, bit in enumerate(nets[net]["bits"]):
|
||||
bit_to_nets[bit] = (net, bit_idx)
|
||||
|
||||
return bit_to_nets
|
||||
|
||||
|
||||
def fixup_cin(design, top_module, bit_to_cells, co_bit, direct_cellname):
|
||||
"""Move connection from CARRY_CO_LUT.OUT -> CARRY_COUT_PLUG.CIN to
|
||||
directly to preceeding CARRY4.
|
||||
"""
|
||||
cells = design["modules"][top_module]["cells"]
|
||||
|
||||
direct_cell = cells[direct_cellname]
|
||||
assert direct_cell["type"] == "CARRY_CO_LUT"
|
||||
|
||||
# Follow to output
|
||||
connections = direct_cell["connections"]["OUT"]
|
||||
assert len(connections) == 1
|
||||
|
||||
for cellname, port, bit_idx in bit_to_cells[connections[0]][1:]:
|
||||
cell = cells[cellname]
|
||||
if cell["type"] == "CARRY_COUT_PLUG" and port == "CIN":
|
||||
assert bit_idx == 0
|
||||
|
||||
cells[cellname]["connections"]["CIN"][0] = co_bit
|
||||
|
||||
|
||||
def fixup_congested_rows(design, top_module, bit_to_cells, bit_to_nets, chain):
|
||||
"""Walk the specified carry chain, and identify if any outputs are congested.
|
||||
|
||||
Arguments:
|
||||
design (dict) - "design" field from Yosys JSON format
|
||||
top_module (str) - Name of top module.
|
||||
bit_to_cells (dict) - Map of net bit identifier and cell information.
|
||||
Computes in "create_bit_to_cell_map".
|
||||
bit_to_nets (dict) - Map of net bit identifier to net information.
|
||||
Computes in "create_bit_to_net_map".
|
||||
chain (list of str) - List of cells in the carry chain.
|
||||
|
||||
"""
|
||||
cells = design["modules"][top_module]["cells"]
|
||||
|
||||
O_ports = ["O0", "O1", "O2", "O3"]
|
||||
CO_ports = ["CO0", "CO1", "CO2", "CO3"]
|
||||
|
||||
def check_if_rest_of_carry4_is_unused(cellname, cell_idx):
|
||||
assert cell_idx < len(O_ports)
|
||||
|
||||
cell = cells[cellname]
|
||||
connections = cell["connections"]
|
||||
|
||||
for o, co in zip(O_ports[cell_idx:], CO_ports[cell_idx:]):
|
||||
o_conns = connections[o]
|
||||
assert len(o_conns) == 1
|
||||
o_bit = o_conns[0]
|
||||
if is_bit_used(bit_to_cells, o_bit):
|
||||
return False
|
||||
|
||||
co_conns = connections[co]
|
||||
assert len(co_conns) == 1
|
||||
co_bit = co_conns[0]
|
||||
if is_bit_used(bit_to_cells, co_bit):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
# Carry chain is congested if both O and CO is used at the same level.
|
||||
# CO to next element in the chain is fine.
|
||||
for chain_idx, cellname in enumerate(chain):
|
||||
cell = cells[cellname]
|
||||
connections = cell["connections"]
|
||||
for cell_idx, (o, co) in enumerate(zip(O_ports, CO_ports)):
|
||||
o_conns = connections[o]
|
||||
assert len(o_conns) == 1
|
||||
o_bit = o_conns[0]
|
||||
|
||||
co_conns = connections[co]
|
||||
assert len(co_conns) == 1
|
||||
co_bit = co_conns[0]
|
||||
|
||||
is_o_used = is_bit_used(bit_to_cells, o_bit)
|
||||
is_co_used, direct_cellname = is_bit_used_other_than_carry4_cin(design, top_module, co_bit, bit_to_cells)
|
||||
|
||||
if is_o_used and is_co_used:
|
||||
# Output at this row is congested.
|
||||
direct_cell = cells[direct_cellname]
|
||||
|
||||
if co == "CO3" and chain_idx == len(chain) - 1:
|
||||
# This congestion is on the top of the carry chain,
|
||||
# emit a dummy layer to the chain.
|
||||
direct_cell["type"] = "CARRY_CO_TOP_POP"
|
||||
assert int(direct_cell["parameters"]["TOP_OF_CHAIN"]) == 1
|
||||
# If this is the last CARRY4 in the chain, see if the
|
||||
# remaining part of the chain is idle.
|
||||
elif chain_idx == len(chain) - 1 and check_if_rest_of_carry4_is_unused(cellname, cell_idx + 1):
|
||||
|
||||
# Because the rest of the CARRY4 is idle, it is safe to
|
||||
# use the next row up to output the top of the carry.
|
||||
connections["S{}".format(cell_idx + 1)] = ["1'b0"]
|
||||
|
||||
next_o_conns = connections[O_ports[cell_idx + 1]]
|
||||
assert len(next_o_conns) == 1
|
||||
direct_cell["connections"]["CO"][0] = next_o_conns[0]
|
||||
|
||||
netname, bit_idx = bit_to_nets[next_o_conns[0]]
|
||||
assert bit_idx == 0
|
||||
|
||||
# Update annotation that this net is now in use.
|
||||
net = design["module"][top_module]["netnames"][netname]
|
||||
assert net["attributes"].get("unused_bits", None) == "0 "
|
||||
del net["attributes"]["unused_bits"]
|
||||
else:
|
||||
# The previous two stragies (use another layer of carry)
|
||||
# only work for the top of the chain. This appears to be
|
||||
# in the middle of the chain, so just spill it out to a
|
||||
# LUT, and fixup the direct carry chain (if any).
|
||||
direct_cell["type"] = "CARRY_CO_LUT"
|
||||
|
||||
fixup_cin(design, top_module, bit_to_cells, co_bit, direct_cellname)
|
||||
|
||||
|
||||
def main(design):
|
||||
top_module = find_top_module(design)
|
||||
bit_to_cells = create_bit_to_cell_map(design, top_module)
|
||||
bit_to_nets = create_bit_to_net_map(design, top_module)
|
||||
for chain in find_carry4_chains(design, top_module, bit_to_cells):
|
||||
fixup_congested_rows(design, top_module, bit_to_cells, bit_to_nets, chain)
|
||||
return design
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
json.dump(main(json.load(stdin)), stdout, indent=2)
|
|
@ -37,6 +37,11 @@ isQuickLogic = FPGA_FAM != "xc7"
|
|||
SH_SUBDIR = "quicklogic" if isQuickLogic else FPGA_FAM
|
||||
|
||||
|
||||
if not isQuickLogic:
|
||||
from f4pga.utils.xc7.create_ioplace import main as xc7_create_ioplace
|
||||
from f4pga.utils.xc7.create_place_constraints import main as xc7_create_place_constraints
|
||||
|
||||
|
||||
# Helper functions
|
||||
|
||||
|
||||
|
@ -472,26 +477,22 @@ fi
|
|||
)
|
||||
else:
|
||||
(eblif, net, part, device, arch_def) = sys_argv[1:6]
|
||||
pcf_opts = f"'--pcf' '{sys_argv[6]}'" if len(sys_argv) > 6 else ""
|
||||
ioplace_file = f"{Path(eblif).stem}.ioplace"
|
||||
p_run_bash_cmds(
|
||||
f"""
|
||||
set -e
|
||||
python3 '{F4PGA_SHARE_DIR}/scripts/prjxray_create_ioplace.py' \
|
||||
--blif '{eblif}' \
|
||||
--net '{net}' {pcf_opts} \
|
||||
--map '{F4PGA_SHARE_DIR}/arch/{device}/{part}/pinmap.csv' \
|
||||
> '{ioplace_file}'
|
||||
python3 '{F4PGA_SHARE_DIR}'/scripts/prjxray_create_place_constraints.py \
|
||||
--blif '{eblif}' \
|
||||
--net '{net}' \
|
||||
--arch '{arch_def}' \
|
||||
--part '{part}' \
|
||||
--vpr_grid_map '{F4PGA_SHARE_DIR}/arch/{device}/vpr_grid_map.csv' \
|
||||
--input '{ioplace_file}' \
|
||||
--db_root "${{DATABASE_DIR:-$(prjxray-config)}}" \
|
||||
> constraints.place
|
||||
"""
|
||||
xc7_create_ioplace(
|
||||
blif=eblif,
|
||||
map=f"{F4PGA_SHARE_DIR}/arch/{device}/{part}/pinmap.csv",
|
||||
net=net,
|
||||
output=ioplace_file,
|
||||
pcf=Path(sys_argv[6]).open("r") if len(sys_argv) > 6 else None,
|
||||
)
|
||||
xc7_create_place_constraints(
|
||||
blif=eblif,
|
||||
net=net,
|
||||
arch=arch_def,
|
||||
part=part,
|
||||
vpr_grid_map=f"{F4PGA_SHARE_DIR}/arch/{device}/vpr_grid_map.csv",
|
||||
input=ioplace_file,
|
||||
output="constraints.place",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ techmap -map $::env(TECHMAP_PATH)/carry_map.v
|
|||
clean_processes
|
||||
write_json $::env(OUT_JSON).carry_fixup.json
|
||||
|
||||
exec $::env(PYTHON3) $::env(UTILS_PATH)/fix_xc7_carry.py < $::env(OUT_JSON).carry_fixup.json > $::env(OUT_JSON).carry_fixup_out.json
|
||||
exec $::env(PYTHON3) -m f4pga.utils.xc7.fix_xc7_carry < $::env(OUT_JSON).carry_fixup.json > $::env(OUT_JSON).carry_fixup_out.json
|
||||
design -push
|
||||
read_json $::env(OUT_JSON).carry_fixup_out.json
|
||||
|
||||
|
|
Loading…
Reference in New Issue