From 873ea7e61b8dce445315f160dc7f6bcbb3a5639d Mon Sep 17 00:00:00 2001 From: Unai Martinez-Corral Date: Mon, 29 Aug 2022 23:29:34 +0200 Subject: [PATCH] f4pga/utils: all python scripts moved from xc7 arch-defs tarballs Signed-off-by: Unai Martinez-Corral --- f4pga/flows/platforms.yml | 4 +- f4pga/utils/eblif.py | 96 ++ f4pga/utils/pcf.py | 63 + f4pga/utils/vpr_io_place.py | 264 +++++ f4pga/utils/xc7/create_ioplace.py | 175 +++ f4pga/utils/xc7/create_place_constraints.py | 1181 +++++++++++++++++++ f4pga/utils/xc7/fix_xc7_carry.py | 609 ++++++++++ f4pga/wrappers/sh/__init__.py | 39 +- f4pga/wrappers/tcl/xc7/synth.f4pga.tcl | 2 +- 9 files changed, 2411 insertions(+), 22 deletions(-) create mode 100644 f4pga/utils/eblif.py create mode 100644 f4pga/utils/pcf.py create mode 100644 f4pga/utils/vpr_io_place.py create mode 100644 f4pga/utils/xc7/create_ioplace.py create mode 100644 f4pga/utils/xc7/create_place_constraints.py create mode 100644 f4pga/utils/xc7/fix_xc7_carry.py diff --git a/f4pga/flows/platforms.yml b/f4pga/flows/platforms.yml index f44299d..7b6e1b6 100644 --- a/f4pga/flows/platforms.yml +++ b/f4pga/flows/platforms.yml @@ -91,7 +91,7 @@ xc7a50t: &xc7 params: stage_name: ioplace interpreter: '${python3}' - script: '${shareDir}/scripts/prjxray_create_ioplace.py' + script: ['-m', 'f4pga.utils.xc7.create_ioplace'] outputs: io_place: mode: stdout @@ -107,7 +107,7 @@ xc7a50t: &xc7 params: stage_name: place_constraints interpreter: '${python3}' - script: '${shareDir}/scripts/prjxray_create_place_constraints.py' + script: ['-m', 'f4pga.utils.xc7.create_place_constraints'] outputs: place_constraints: mode: stdout diff --git a/f4pga/utils/eblif.py b/f4pga/utils/eblif.py new file mode 100644 index 0000000..0d5d6d4 --- /dev/null +++ b/f4pga/utils/eblif.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019-2022 F4PGA Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +# Top level keywords defining the begin of a cell definition. +top_level = [ + "model", + "inputs", + "outputs", + "names", + "latch", + "subckt", +] + +# Keywords defining cell attributes / parameters. Those can be specified for +# each cell multiple times. Parameter names and values are stored in a dict +# under the parsed blif data. +# +# For example: the construct ".param MODE SYNC" will add to the dict under +# the key "param" entry "MODE":"SYNC". +# +sub_level = [ + "attr", + "param", +] + + +def parse_blif(f): + current = None + + data = {} + + def add(d): + if d["type"] not in data: + data[d["type"]] = [] + data[d["type"]].append(d) + + current = None + for oline in f: + line = oline + if "#" in line: + line = line[: line.find("#")] + line = line.strip() + if not line: + continue + + if line.startswith("."): + args = line.split(" ", maxsplit=1) + if len(args) < 2: + args.append("") + + ctype = args.pop(0) + assert ctype.startswith("."), ctype + ctype = ctype[1:] + + if ctype in top_level: + if current: + add(current) + current = { + "type": ctype, + "args": args[-1].split(), + "data": [], + } + elif ctype in sub_level: + if ctype not in current: + current[ctype] = {} + key, value = args[-1].split(maxsplit=1) + current[ctype][key] = value + else: + current[ctype] = args[-1].split() + continue + current["data"].append(line.strip().split()) + + if current: + add(current) + + assert len(data["inputs"]) == 1 + data["inputs"] = data["inputs"][0] + assert len(data["outputs"]) == 1 + data["outputs"] = data["outputs"][0] + return data diff --git a/f4pga/utils/pcf.py b/f4pga/utils/pcf.py new file mode 100644 index 0000000..6c779d9 --- /dev/null +++ b/f4pga/utils/pcf.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019-2022 F4PGA Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +""" +Supported PCF commands: +* set_io - constrain a given to a given physical in eFPGA pinout. +* set_clk - constrain a given global clock to a given + Every tile where is present will be constrained to use a given global clock. +""" + +from collections import namedtuple +import re + +PcfIoConstraint = namedtuple("PcfIoConstraint", "net pad line_str line_num") +PcfClkConstraint = namedtuple("PcfClkConstraint", "pin net") + + +def parse_simple_pcf(f): + """Parse a simple PCF file object and yield PcfIoConstraint objects.""" + for line_number, line in enumerate(f): + line_number += 1 + + # Remove comments. + args = re.sub(r"#.*", "", line.strip()).split() + + if not args: + continue + + # Ignore arguments. + args = [arg for arg in args if arg[0] != "-"] + assert len(args) == 3, args + + if args[0] == "set_io": + + yield PcfIoConstraint( + net=args[1], + pad=args[2], + line_str=line.strip(), + line_num=line_number, + ) + + if args[0] == "set_clk": + + yield PcfClkConstraint( + pin=args[1], + net=args[2], + ) diff --git a/f4pga/utils/vpr_io_place.py b/f4pga/utils/vpr_io_place.py new file mode 100644 index 0000000..216da00 --- /dev/null +++ b/f4pga/utils/vpr_io_place.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019-2022 F4PGA Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import print_function +from collections import OrderedDict, namedtuple +import itertools +import re +import lxml.etree as ET + +from f4pga.utils.eblif import parse_blif + +IoConstraint = namedtuple("IoConstraint", "name x y z comment") + +HEADER_TEMPLATE = """\ +#{name:<{nl}} x y z pcf_line +#{s:-^{nl}} -- -- - ----""" + +CONSTRAINT_TEMPLATE = "{name:<{nl}} {x: 3} {y: 3} {z: 2} # {comment}" +INOUT_REGEX = re.compile(r"^(.+)(_\$inp|_\$out)(.*)$") +NETNAME_REGEX = re.compile(r"(.+?)(\[[0-9]+\]$|$)") + + +class IoPlace(object): + def __init__(self): + self.constraints = OrderedDict() + self.inputs = set() + self.outputs = set() + self.net_to_block = None + self.net_map = {} + self.block_to_inst = {} + self.inout_nets = set() + self.net_to_pad = set() + self.net_file_io = set() + + def read_io_loc_pairs(self, blif): + """ + Read IO_LOC_PAIRS parameters from eblif carrying the information + which package pin a specified top port is constrained, e.g. O_LOC_PAIRS = "portA:D1" + In case of differential inputs/outputs there are two pairs of the parameter, + i.e. IO_LOC_PAIRS = "portA_p:D2,portA_n:D4" + """ + if "subckt" not in blif: + return + for attr in blif["subckt"]: + if "param" not in attr: + continue + if "IO_LOC_PAIRS" in attr["param"]: + locs = attr["param"]["IO_LOC_PAIRS"][1:-1].split(",") + if "NONE" in locs: + continue + for loc in locs: + net, pad = loc.split(":") + self.net_to_pad.add((net, pad)) + + def read_io_list_from_eblif(self, eblif_file): + blif = parse_blif(eblif_file) + + self.inputs = set(blif["inputs"]["args"]) + self.outputs = set(blif["outputs"]["args"]) + + # Build a net name map that maps products of an inout port split into + # their formet name. + # + # For example, an inout port 'A' is split into 'A_$inp' and 'A_$out', + # port B[2] into 'B_$inp[2]' and 'B_$out[2]'. + self.net_map = {} + self.inout_nets = set() + for net in itertools.chain(self.inputs, self.outputs): + match = INOUT_REGEX.match(net) + if match: + alias = match.group(1) + match.group(3) + self.inout_nets.add(alias) + self.net_map[net] = alias + else: + self.net_map[net] = net + self.read_io_loc_pairs(blif) + + def load_block_names_from_net_file(self, net_file): + """ + .place files expect top-level block (cluster) names, not net names, so + build a mapping from net names to block names from the .net file. + """ + net_xml = ET.parse(net_file) + net_root = net_xml.getroot() + self.net_to_block = {} + + for block in net_root.xpath("//block"): + instance = block.attrib["instance"] + if instance != "inpad[0]" and instance != "outpad[0]": + continue + + top_block = block.getparent() + assert top_block is not None + while top_block.getparent() is not net_root: + assert top_block is not None + top_block = top_block.getparent() + self.net_to_block[block.get("name")] = top_block.get("name") + + # Loop over all top-level blocks. Store block name to its instance + # correspondences. + for block_xml in net_root.findall("block"): + name = block_xml.attrib["name"] + inst = block_xml.attrib["instance"] + + assert name not in self.block_to_inst, block_xml.attrib + self.block_to_inst[name] = inst + + def load_net_file_ios(self, net_file): + """ + Loads input and outputs net names from the netlist file. + """ + + def get_ios(net_root, io_types): + "Get the top level io signals from the netlist XML root." + for io_type in io_types: + io = net_root.xpath("/block/{}/text ()".format(io_type)) + + if len(io) == 1: + yield io[0] + + net_xml = ET.parse(net_file) + net_root = net_xml.getroot() + + for io_line in get_ios(net_root, ["inputs", "outputs"]): + io_list = io_line.split(" ") + for io in io_list: + self.net_file_io.add(io.replace("out:", "")) + + def get_top_level_block_instance_for_net(self, net_name): + """ + Returns a name of the top-level block instance for the given net + name. + """ + assert self.is_net(net_name) + + # VPR prefixes output constraints with "out:" + if net_name in self.outputs: + net_name = "out:" + net_name + + # This is an inout net + if net_name in self.inout_nets: + block_names = set() + + for prefix, suffix in zip(["", "out:"], ["_$inp", "_$out"]): + match = NETNAME_REGEX.match(net_name) + name = prefix + match.group(1) + suffix + match.group(2) + block_names.add(self.net_to_block[name]) + + # Both parts of the net should point to the same block + assert len(block_names) == 1, (net_name, block_names) + return self.block_to_inst[list(block_names)[0]] + + # A regular net + else: + if net_name in self.net_to_block: + block_name = self.net_to_block[net_name] + return self.block_to_inst[block_name] + else: + return None + + def constrain_net(self, net_name, loc, comment=""): + assert len(loc) == 3 + assert net_name not in self.constraints + + assert self.is_net(net_name), "net {} not in eblif".format(net_name) + + # VPR prefixes output constraints with "out:" + if net_name in self.outputs: + net_name = "out:" + net_name + + # This is an inout net + if net_name in self.inout_nets: + for prefix, suffix in zip(["", "out:"], ["_$inp", "_$out"]): + + match = NETNAME_REGEX.match(net_name) + name = prefix + match.group(1) + suffix + match.group(2) + + self.constraints[name] = IoConstraint( + name=name, + x=loc[0], + y=loc[1], + z=loc[2], + comment=comment, + ) + + # A regular net + else: + self.constraints[net_name] = IoConstraint( + name=net_name, + x=loc[0], + y=loc[1], + z=loc[2], + comment=comment, + ) + + def output_io_place(self, f): + max_name_length = max(len(c.name) for c in self.constraints.values()) + print(HEADER_TEMPLATE.format(name="Block Name", nl=max_name_length, s=""), file=f) + + constrained_blocks = {} + + for vpr_net, constraint in self.constraints.items(): + name = constraint.name + name = self.net_to_block.get(name) if self.net_to_block else name + + # This block is already constrained, check if there is no + # conflict there. + if name in constrained_blocks: + existing = constrained_blocks[name] + + if existing.x != constraint.x or existing.y != constraint.y or existing.z != constraint.z: + + print("Error: block '{}' has multiple conflicting constraints!".format(name)) + print("", constrained_blocks[name]) + print("", constraint) + exit(-1) + + # Don't write the second constraing + continue + + # omit if no corresponding block name for the net + if name is not None: + print( + CONSTRAINT_TEMPLATE.format( + name=name, + nl=max_name_length, + x=constraint.x, + y=constraint.y, + z=constraint.z, + comment=constraint.comment, + ), + file=f, + ) + + # Add to constrained block list + constrained_blocks[name] = constraint + + def is_net(self, net): + return net in self.net_map.values() + + def is_net_packed(self, net): + return net in self.net_file_io + + def get_nets(self): + for net in self.inputs: + yield self.net_map[net] + for net in self.outputs: + yield self.net_map[net] diff --git a/f4pga/utils/xc7/create_ioplace.py b/f4pga/utils/xc7/create_ioplace.py new file mode 100644 index 0000000..121bf3b --- /dev/null +++ b/f4pga/utils/xc7/create_ioplace.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019-2022 F4PGA Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + +""" +Convert a PCF file into a VPR io.place file. +""" + + +from argparse import ArgumentParser, FileType +from pathlib import Path +from csv import DictReader as csv_DictReader +from sys import stdout, stderr, exit as sys_exit +from json import dump as json_dump, load as json_load + +from f4pga.utils.vpr_io_place import IoPlace +from f4pga.utils.pcf import parse_simple_pcf + + +def p_main(blif, map, net, pcf=None, output=stdout, iostandard_defs_file=None, iostandard="LVCMOS33", drive=12): + io_place = IoPlace() + io_place.read_io_list_from_eblif(blif) + io_place.load_block_names_from_net_file(net) + + # Map of pad names to VPR locations. + pad_map = {} + + for pin_map_entry in csv_DictReader(map): + pad_map[pin_map_entry["name"]] = ( + ( + int(pin_map_entry["x"]), + int(pin_map_entry["y"]), + int(pin_map_entry["z"]), + ), + pin_map_entry["is_output"], + pin_map_entry["iob"], + pin_map_entry["real_io_assoc"], + ) + + iostandard_defs = {} + + # Load iostandard constraints. This is a temporary workaround that allows + # to pass them into fasm2bels. As soon as there is support for XDC this + # will not be needed anymore. + # If there is a JSON file with the same name as the PCF file then it is + # loaded and used as iostandard constraint source NOT for the design but + # to be used in fasm2bels. + iostandard_constraints = {} + + if pcf is not None: + fname = Path(pcf.name.replace(".pcf", ".json")) + if fname.is_file(): + with fname.open("r") as fp: + iostandard_constraints = json_load(fp) + net_to_pad = io_place.net_to_pad + if pcf is not None: + net_to_pad |= set((constr.net, constr.pad) for constr in parse_simple_pcf(pcf)) + # Check for conflicting pad constraints + net_to_pad_map = dict() + for (net, pad) in net_to_pad: + if net not in net_to_pad_map: + net_to_pad_map[net] = pad + elif pad != net_to_pad_map[net]: + print( + f"ERROR: Conflicting pad constraints for net {net}:\n{pad}\n{net_to_pad_map[net]}", + file=stderr, + ) + sys_exit(1) + + # Constrain nets + for net, pad in net_to_pad: + if not io_place.is_net(net): + nets = "\n".join(io_place.get_nets()) + print( + f"ERROR: Constrained net {net} is not in available netlist:\n{nets}", + file=stderr, + ) + sys_exit(1) + + if pad not in pad_map: + pads = "\n".join(sorted(pad_map.keys())) + print( + f"ERROR: Constrained pad {pad} is not in available pad map:\n{pads}", + file=stderr, + ) + sys_exit(1) + + loc, is_output, iob, real_io_assoc = pad_map[pad] + + io_place.constrain_net(net_name=net, loc=loc, comment="set_property LOC {} [get_ports {{{}}}]".format(pad, net)) + if real_io_assoc == "True": + iostandard_defs[iob] = ( + iostandard_constraints[pad] + if pad in iostandard_constraints + else ({"DRIVE": drive, "IOSTANDARD": iostandard} if is_output else {"IOSTANDARD": iostandard}) + ) + + io_place.output_io_place(output) + + # Write iostandard definitions + if iostandard_defs_file is not None: + with Path(iostandard_defs_file).open("w") as f: + json_dump(iostandard_defs, f, indent=2) + + +def main( + blif, + map, + net, + pcf=None, + output=None, + iostandard_defs_file=None, + iostandard="LVCMOS33", + drive=12, +): + p_main( + blif=Path(blif).open("r"), + map=Path(map).open("r"), + net=Path(net).open("r"), + pcf=None if pcf is None else Path(pcf).open("r"), + output=stdout if output is None else Path(output).open("w"), + iostandard_defs_file=iostandard_defs_file, + iostandard=iostandard, + drive=drive, + ) + + +if __name__ == "__main__": + parser = ArgumentParser(description="Convert a PCF file into a VPR io.place file.") + parser.add_argument("--pcf", "-p", "-P", type=FileType("r"), required=False, help="PCF input file") + parser.add_argument("--blif", "-b", type=FileType("r"), required=True, help="BLIF / eBLIF file") + parser.add_argument("--map", "-m", "-M", type=FileType("r"), required=True, help="Pin map CSV file") + parser.add_argument("--output", "-o", "-O", type=FileType("w"), default=stdout, help="The output io.place file") + parser.add_argument("--iostandard_defs", help="(optional) Output IOSTANDARD def file") + parser.add_argument( + "--iostandard", + default="LVCMOS33", + help="Default IOSTANDARD to use for pins", + ) + parser.add_argument( + "--drive", + type=int, + default=12, + help="Default drive to use for pins", + ) + parser.add_argument("--net", "-n", type=FileType("r"), required=True, help="top.net file") + + args = parser.parse_args() + + p_main( + blif=args.blif, + map=args.map, + net=args.net, + pcf=args.pcf, + output=args.output, + iostandard_defs_file=args.iostandard_defs, + iostandard=args.iostandard, + drive=args.drive, + ) diff --git a/f4pga/utils/xc7/create_place_constraints.py b/f4pga/utils/xc7/create_place_constraints.py new file mode 100644 index 0000000..f6b9e4f --- /dev/null +++ b/f4pga/utils/xc7/create_place_constraints.py @@ -0,0 +1,1181 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019-2022 F4PGA Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + +""" Convert a PCF file into a VPR io.place file. """ +from __future__ import print_function +import argparse +import sys +import csv +from os import environ +import lxml.etree as ET +import constraint +import prjxray.db +from collections import defaultdict, OrderedDict, namedtuple +from pathlib import Path +from subprocess import run as subprocess_run + +from f4pga.utils.eblif import parse_blif + + +PlaceConstraint = namedtuple("PlaceConstraint", "name x y z comment") + +HEADER_TEMPLATE = """\ +#{name:<{nl}} x y z pcf_line +#{s:-^{nl}} -- -- - ----""" + +CONSTRAINT_TEMPLATE = "{name:<{nl}} {x: 3} {y: 3} {z: 2} # {comment}" + + +def get_root_cluster(curr): + while True: + parent = curr.getparent() + if parent is None: + return None + + parent_parent = parent.getparent() + if parent_parent is None: + return curr + + curr = parent + + +class PlaceConstraints(object): + def __init__(self, net_file): + self.constraints = OrderedDict() + self.block_to_loc = dict() + + net_xml = ET.parse(net_file) + self.net_root = net_xml.getroot() + + def load_loc_sites_from_net_file(self): + """ + .place files expect top-level block (cluster) names, not net names, so + build a mapping from net names to block names from the .net file. + """ + self.net_to_block = {} + self.block_to_root_block = {} + + for el in self.net_root.iter("block"): + root_block = get_root_cluster(el) + if root_block is not None: + self.block_to_root_block[el.attrib["name"]] = root_block.attrib["name"] + + for attr in self.net_root.xpath("//attribute"): + name = attr.attrib["name"] + if name != "LOC": + continue + + # Get block name + top_block = attr.getparent() + assert top_block is not None + while top_block.getparent() is not self.net_root: + assert top_block is not None + top_block = top_block.getparent() + + self.block_to_loc[top_block.get("name")] = attr.text + + def constrain_block(self, block_name, loc, comment=""): + assert len(loc) == 3 + assert block_name not in self.constraints, block_name + + place_constraint = PlaceConstraint( + name=block_name, + x=loc[0], + y=loc[1], + z=loc[2], + comment=comment, + ) + + root_block = self.block_to_root_block[block_name] + + self.constraints[root_block] = place_constraint + + def output_place_constraints(self, f): + if not self.constraints: + return + + max_name_length = max(len(c.name) for c in self.constraints.values()) + + constrained_blocks = {} + + for vpr_net, constraint in self.constraints.items(): + name = constraint.name + + # This block is already constrained, check if there is no + # conflict there. + if name in constrained_blocks: + existing = constrained_blocks[name] + + if existing.x != constraint.x or existing.y != constraint.y or existing.z != constraint.z: + + print("Error: block '{}' has multiple conflicting constraints!".format(name)) + print("", constrained_blocks[name]) + print("", constraint) + exit(-1) + + # Don't write the second constraing + continue + + # omit if no corresponding block name for the net + if name is not None: + print( + CONSTRAINT_TEMPLATE.format( + name=name, + nl=max_name_length, + x=constraint.x, + y=constraint.y, + z=constraint.z, + comment=constraint.comment, + ), + file=f, + ) + + # Add to constrained block list + constrained_blocks[name] = constraint + + def get_loc_sites(self): + """Yields user-constraints (block, location) pairs""" + + if self.block_to_loc is None: + return + + for loc in self.block_to_loc: + yield (loc, self.block_to_loc[loc]) + + def get_used_instances(self, instance): + """ + Returns a list containing the root clusters of the specified instances in the packed netlist + that are marked as used. + + An instance is marked as used when the XML element relative to the instance name has + children tags. + + E.g.: + + + refclk + + ... + + ... + + + """ + + instances = list() + + for el in self.net_root.iter("block"): + inst = el.attrib["instance"] + if instance in inst: + if len(el.getchildren()) != 0: + instances.append(get_root_cluster(el).attrib["name"]) + + return instances + + +CLOCKS = { + "PLLE2_ADV_VPR": { + "sinks": frozenset(("CLKFBIN", "CLKIN1", "CLKIN2", "DCLK")), + "sources": frozenset(("CLKFBOUT", "CLKOUT0", "CLKOUT1", "CLKOUT2", "CLKOUT3", "CLKOUT4", "CLKOUT5")), + "type": "PLLE2_ADV", + }, + "MMCME2_ADV_VPR": { + "sinks": frozenset(("CLKFBIN", "CLKIN1", "CLKIN2", "DCLK", "PSCLK")), + "sources": frozenset( + ( + "CLKFBOUT", + "CLKFBOUTB", + "CLKOUT0", + "CLKOUT0B", + "CLKOUT1", + "CLKOUT1B", + "CLKOUT2", + "CLKOUT2B", + "CLKOUT3", + "CLKOUT3B", + "CLKOUT4", + "CLKOUT5", + "CLKOUT6", + ) + ), + "type": "MMCME2_ADV", + }, + "BUFGCTRL_VPR": { + "sinks": frozenset(("I0", "I1")), + "sources": frozenset(("O",)), + "type": "BUFGCTRL", + }, + "PS7_VPR": { + "sinks": frozenset( + ( + "DMA0ACLK", + "DMA1ACLK", + "DMA2ACLK", + "DMA3ACLK", + "EMIOENET0GMIIRXCLK", + "EMIOENET0GMIITXCLK", + "EMIOENET1GMIIRXCLK", + "EMIOENET1GMIITXCLK", + "EMIOSDIO0CLKFB", + "EMIOSDIO1CLKFB", + "EMIOSPI0SCLKI", + "EMIOSPI1SCLKI", + "EMIOTRACECLK", + "EMIOTTC0CLKI[0]", + "EMIOTTC0CLKI[1]", + "EMIOTTC0CLKI[2]", + "EMIOTTC1CLKI[0]", + "EMIOTTC1CLKI[1]", + "EMIOTTC1CLKI[2]", + "EMIOWDTCLKI", + "FCLKCLKTRIGN[0]", + "FCLKCLKTRIGN[1]", + "FCLKCLKTRIGN[2]", + "FCLKCLKTRIGN[3]", + "MAXIGP0ACLK", + "MAXIGP1ACLK", + "SAXIACPACLK", + "SAXIGP0ACLK", + "SAXIGP1ACLK", + "SAXIHP0ACLK", + "SAXIHP1ACLK", + "SAXIHP2ACLK", + "SAXIHP3ACLK", + ) + ), + "sources": frozenset( + ( + "FCLKCLK[0]", + "FCLKCLK[1]", + "FCLKCLK[2]", + "FCLKCLK[3]", + "EMIOSDIO0CLK", + "EMIOSDIO1CLK", + # There are also EMIOSPI[01]CLKO and EMIOSPI[01]CLKTN but seem + # to be more of a GPIO outputs than clock sources for the FPGA + # fabric. + ) + ), + "type": "PS7", + }, + "IBUF_VPR": { + "sources": frozenset(("O",)), + "sinks": frozenset(("I",)), + "type": "IBUF", + }, + "IBUFDS_GTE2_VPR": { + "sources": frozenset(("O",)), + "sinks": frozenset(), + "type": "IBUFDS_GTE2", + }, + "GTPE2_COMMON_VPR": { + "sources": frozenset(), + "sinks": frozenset(("GTREFCLK0", "GTREFCLK1")), + "type": "GTPE2_COMMON", + }, + "GTPE2_CHANNEL_VPR": { + "sources": frozenset(("TXOUTCLK", "RXOUTCLK")), + "sinks": frozenset(), + "type": "GTPE2_CHANNEL", + }, +} + +GTP_PRIMITIVES = ["IBUFDS_GTE2", "GTPE2_COMMON", "GTPE2_CHANNEL"] + + +def eprint(*args, **kwargs): + print(*args, file=sys.stderr, **kwargs) + + +def get_cmt(cmt_dict, loc): + """Returns the clock region of an input location.""" + for k, v in cmt_dict.items(): + for (x, y) in v["vpr_loc"]: + if x == loc[0] and y == loc[1]: + return v["clock_region"] + + return None + + +class VprGrid(object): + """This class contains a set of dictionaries helpful + to have a fast lookup at the various coordinates mapping.""" + + def __init__(self, vpr_grid_map, graph_limit): + self.site_dict = dict() + self.site_type_dict = dict() + self.cmt_dict = dict() + self.tile_dict = dict() + self.vpr_loc_cmt = dict() + self.canon_loc = dict() + + if graph_limit is not None: + limits = graph_limit.split(",") + xmin, ymin, xmax, ymax = [int(x) for x in limits] + + with open(vpr_grid_map, "r") as csv_vpr_grid: + csv_reader = csv.DictReader(csv_vpr_grid) + for row in csv_reader: + site_name = row["site_name"] + site_type = row["site_type"] + phy_tile = row["physical_tile"] + vpr_x = row["vpr_x"] + vpr_y = row["vpr_y"] + can_x = row["canon_x"] + can_y = row["canon_y"] + clk_region = row["clock_region"] + connected_to_site = row["connected_to_site"] + + if graph_limit is not None: + if int(can_x) < xmin or int(can_x) > xmax: + continue + if int(can_y) < ymin or int(can_y) > ymax: + continue + + clk_region = None if not clk_region else int(clk_region) + + # Generating the site dictionary + self.site_dict[site_name] = { + "type": site_type, + "tile": phy_tile, + "vpr_loc": (int(vpr_x), int(vpr_y)), + "canon_loc": (int(can_x), int(can_y)), + "clock_region": clk_region, + "connected_to_site": connected_to_site, + } + + # Generating site types dictionary. + if site_type not in self.site_type_dict: + self.site_type_dict[site_type] = [] + + self.site_type_dict[site_type].append((site_name, phy_tile, clk_region)) + + # Generating the cmt dictionary. + # Each entry has: + # - canonical location + # - a list of vpr coordinates + # - clock region + if phy_tile not in self.cmt_dict: + self.cmt_dict[phy_tile] = { + "canon_loc": (int(can_x), int(can_y)), + "vpr_loc": [(int(vpr_x), int(vpr_y))], + "clock_region": clk_region, + } + else: + self.cmt_dict[phy_tile]["vpr_loc"].append((int(vpr_x), int(vpr_y))) + + # Generating the tile dictionary. + # Each tile has a list of (site, site_type) pairs + if phy_tile not in self.tile_dict: + self.tile_dict[phy_tile] = [] + + self.tile_dict[phy_tile].append((site_name, site_type)) + + self.vpr_loc_cmt[(int(vpr_x), int(vpr_y))] = clk_region + + self.canon_loc[(int(vpr_x), int(vpr_y))] = (int(can_x), int(can_y)) + + def get_site_dict(self): + return self.site_dict + + def get_site_type_dict(self): + return self.site_type_dict + + def get_cmt_dict(self): + return self.cmt_dict + + def get_tile_dict(self): + return self.tile_dict + + def get_vpr_loc_cmt(self): + """ + Returns a dictionary containing the mapping between + VPR physical locations to the belonging clock region. + + Dictionary content: + - key : (x, y) coordinate on the VPR grid + - value : clock region corresponding to the (x, y) coordinates + """ + return self.vpr_loc_cmt + + def get_canon_loc(self): + return self.canon_loc + + +class ClockPlacer(object): + def __init__(self, vpr_grid, io_locs, blif_data, roi, graph_limit, allow_bufg_logic_sources=False): + + self.roi = roi + self.cmt_to_bufg_tile = {} + self.bufg_from_cmt = { + "TOP": [], + "BOT": [], + } + self.pll_cmts = set() + self.mmcm_cmts = set() + self.gtp_cmts = set() + + cmt_dict = vpr_grid.get_cmt_dict() + site_type_dict = vpr_grid.get_site_type_dict() + + try: + top_cmt_tile = next(k for k, v in cmt_dict.items() if k.startswith("CLK_BUFG_TOP")) + _, thresh_top_y = cmt_dict[top_cmt_tile]["canon_loc"] + except StopIteration: + thresh_top_y = None + + try: + bot_cmt_tile = next(k for k, v in cmt_dict.items() if k.startswith("CLK_BUFG_BOT")) + _, thresh_bot_y = cmt_dict[bot_cmt_tile]["canon_loc"] + except StopIteration: + thresh_bot_y = None + + if graph_limit is None: + assert thresh_top_y is not None, "BUFG sites in the top half of the device not found" + assert thresh_bot_y is not None, "BUFG sites in the bottom half of the device not found" + else: + assert ( + thresh_top_y is not None or thresh_bot_y is not None + ), "The device grid does not contain any BUFG sites" + + for k, v in cmt_dict.items(): + clock_region = v["clock_region"] + x, y = v["canon_loc"] + if clock_region is None: + continue + elif clock_region in self.cmt_to_bufg_tile: + continue + elif thresh_top_y is not None and y <= thresh_top_y: + self.cmt_to_bufg_tile[clock_region] = "TOP" + elif thresh_bot_y is not None and y >= thresh_bot_y: + self.cmt_to_bufg_tile[clock_region] = "BOT" + + for _, _, clk_region in site_type_dict["PLLE2_ADV"]: + self.pll_cmts.add(clk_region) + + if any(site in GTP_PRIMITIVES for site in site_type_dict): + assert "IBUFDS_GTE2" in site_type_dict + for _, _, clk_region in site_type_dict["IBUFDS_GTE2"]: + self.gtp_cmts.add(clk_region) + + for _, _, clk_region in site_type_dict["MMCME2_ADV"]: + self.mmcm_cmts.add(clk_region) + + self.input_pins = {} + if not self.roi: + for input_pin in blif_data["inputs"]["args"]: + if input_pin not in io_locs.keys(): + continue + + loc = io_locs[input_pin] + cmt = get_cmt(cmt_dict, loc) + assert cmt is not None, loc + + self.input_pins[input_pin] = cmt + + self.clock_blocks = {} + + self.clock_sources = {} + self.clock_sources_cname = {} + + self.clock_cmts = {} + + if "subckt" not in blif_data.keys(): + return + + for subckt in blif_data["subckt"]: + if "cname" not in subckt: + continue + bel = subckt["args"][0] + + assert "cname" in subckt and len(subckt["cname"]) == 1, subckt + + if bel not in CLOCKS: + continue + + cname = subckt["cname"][0] + + clock = { + "name": cname, + "subckt": bel, + "sink_nets": [], + "source_nets": [], + } + + sources = CLOCKS[bel]["sources"] + + ports = dict(arg.split("=", maxsplit=1) for arg in subckt["args"][1:]) + + for source in sources: + source_net = ports[source] + if source_net == "$true" or source_net == "$false": + continue + + self.clock_sources[source_net] = [] + self.clock_sources_cname[source_net] = cname + clock["source_nets"].append(source_net) + + self.clock_blocks[cname] = clock + + # Both PS7 and BUFGCTRL has specialized constraints, + # do not bind based on input pins. + if bel not in ["PS7_VPR", "BUFGCTRL_VPR"]: + for port in ports.values(): + if port not in io_locs: + continue + + if cname in self.clock_cmts: + assert_out = (cname, port, self.clock_cmts[cname], self.input_pins[port]) + assert self.clock_cmts[cname] == self.input_pins[port], assert_out + else: + self.clock_cmts[cname] = self.input_pins[port] + + for subckt in blif_data["subckt"]: + if "cname" not in subckt: + continue + + bel = subckt["args"][0] + if bel not in CLOCKS: + continue + + sinks = CLOCKS[bel]["sinks"] + ports = dict(arg.split("=", maxsplit=1) for arg in subckt["args"][1:]) + + assert "cname" in subckt and len(subckt["cname"]) == 1, subckt + cname = subckt["cname"][0] + clock = self.clock_blocks[cname] + + for sink in sinks: + if sink not in ports: + continue + + sink_net = ports[sink] + if sink_net == "$true" or sink_net == "$false": + continue + + clock["sink_nets"].append(sink_net) + + if sink_net not in self.input_pins and sink_net not in self.clock_sources: + + # Allow IBUFs. At this point we cannot distinguish between + # IBUFs for clock and logic. + if bel == "IBUF_VPR": + continue + + # Allow BUFGs to be driven by generic sources but only + # when enabled. + if bel == "BUFGCTRL_VPR" and allow_bufg_logic_sources: + continue + + # The clock source comes from logic, disallow that + eprint( + "The clock net '{}' driving '{}' sources at logic which is not allowed!".format(sink_net, bel) + ) + exit(-1) + + if sink_net in self.input_pins: + if sink_net not in self.clock_sources: + self.clock_sources[sink_net] = [] + + self.clock_sources[sink_net].append(cname) + + def assign_cmts(self, vpr_grid, blocks, block_locs): + """Assign CMTs to subckt's that require it (e.g. BURF/PLL/MMCM).""" + + problem = constraint.Problem() + + site_dict = vpr_grid.get_site_dict() + vpr_loc_cmt = vpr_grid.get_vpr_loc_cmt() + + # Any clocks that have LOC's already defined should be respected. + # Store the parent CMT in clock_cmts. + for block, loc in block_locs.items(): + if block in self.clock_blocks: + + clock = self.clock_blocks[block] + if CLOCKS[clock["subckt"]]["type"] == "BUFGCTRL": + pass + else: + site = site_dict[loc.replace('"', "")] + assert site is not None, (block, loc) + + clock_region_pkey = site["clock_region"] + + if block in self.clock_cmts: + assert clock_region_pkey == self.clock_cmts[block], (block, clock_region_pkey) + else: + self.clock_cmts[block] = clock_region_pkey + + # Any clocks that were previously constrained must be preserved + for block, (loc_x, loc_y, _) in blocks.items(): + if block in self.clock_blocks: + + clock = self.clock_blocks[block] + if CLOCKS[clock["subckt"]]["type"] == "BUFGCTRL": + pass + else: + cmt = vpr_loc_cmt[(loc_x, loc_y)] + + if block in self.clock_cmts: + assert cmt == self.clock_cmts[block], (block, cmt) + else: + self.clock_cmts[block] = cmt + + # Non-clock IBUF's increase the solution space if they are not + # constrainted by a LOC. Given that non-clock IBUF's don't need to + # solved for, remove them. + unused_blocks = set() + any_variable = False + for clock_name, clock in self.clock_blocks.items(): + used = False + if CLOCKS[clock["subckt"]]["type"] != "IBUF": + used = True + + for source_net in clock["source_nets"]: + if len(self.clock_sources[source_net]) > 0: + used = True + break + + if not used: + unused_blocks.add(clock_name) + continue + + if CLOCKS[clock["subckt"]]["type"] == "BUFGCTRL": + problem.addVariable(clock_name, list(self.bufg_from_cmt.keys())) + any_variable = True + else: + # Constrained objects have a domain of 1 + if clock_name in self.clock_cmts: + problem.addVariable(clock_name, (self.clock_cmts[clock_name],)) + any_variable = True + else: + problem.addVariable(clock_name, list(self.cmt_to_bufg_tile.keys())) + any_variable = True + + # Remove unused blocks from solutions. + for clock_name in unused_blocks: + del self.clock_blocks[clock_name] + + exclusive_clocks = defaultdict(set) + ibuf_cmt_sinks = defaultdict(set) + + for net in self.clock_sources: + + is_net_ibuf = False + + if net not in self.input_pins: + source_clock_name = self.clock_sources_cname[net] + if source_clock_name not in unused_blocks: + source_block = self.clock_blocks[source_clock_name] + if CLOCKS[source_block["subckt"]]["type"] == "IBUF": + is_net_ibuf = True + + for clock_name in self.clock_sources[net]: + + if clock_name in unused_blocks: + continue + + clock = self.clock_blocks[clock_name] + + if CLOCKS[clock["subckt"]]["type"] == "PLLE2_ADV": + problem.addConstraint(lambda cmt: cmt in self.pll_cmts, (clock_name,)) + exclusive_clocks["PLLE2_ADV"].add(clock_name) + if is_net_ibuf: + ibuf_cmt_sinks[source_clock_name].add(clock_name) + + if CLOCKS[clock["subckt"]]["type"] == "MMCME2_ADV": + problem.addConstraint(lambda cmt: cmt in self.mmcm_cmts, (clock_name,)) + exclusive_clocks["MMCME2_ADV"].add(clock_name) + if is_net_ibuf: + ibuf_cmt_sinks[source_clock_name].add(clock_name) + + if net in self.input_pins: + if CLOCKS[clock["subckt"]]["type"] == "BUFGCTRL": + # BUFGCTRL do not get a CMT, instead they get either top or + # bottom. + problem.addConstraint( + lambda clock: clock == self.cmt_to_bufg_tile[self.input_pins[net]], (clock_name,) + ) + elif CLOCKS[clock["subckt"]]["type"] != "IBUF": + problem.addConstraint(lambda clock: clock == self.input_pins[net], (clock_name,)) + else: + source_clock_name = self.clock_sources_cname[net] + source_block = self.clock_blocks[source_clock_name] + is_net_bufg = CLOCKS[source_block["subckt"]]["type"] == "BUFGCTRL" + + if is_net_bufg: + continue + + if CLOCKS[clock["subckt"]]["type"] == "BUFGCTRL": + # BUFG's need to be in the right half. + problem.addConstraint( + lambda source, sink_bufg: self.cmt_to_bufg_tile[source] == sink_bufg, + (source_clock_name, clock_name), + ) + elif not is_net_ibuf: + problem.addConstraint(lambda source, sink: source == sink, (source_clock_name, clock_name)) + + # Ensure that in case of an IBUF driving multiple CMTs at least one of + # them is in the same clock region as the IBUF. + for source, sinks in ibuf_cmt_sinks.items(): + problem.addConstraint( + lambda s, *k: s in k, + ( + source, + *sinks, + ), + ) + + # Prevent constraining exclusive clock source sets to the same resource + for typ, clocks in exclusive_clocks.items(): + problem.addConstraint(constraint.AllDifferentConstraint(), list(clocks)) + + if any_variable: + solutions = problem.getSolutions() + assert len(solutions) > 0, "No solution found!" + + self.clock_cmts.update(solutions[0]) + + def place_clocks(self, canon_grid, vpr_grid, loc_in_use, block_locs, blocks, grid_capacities): + self.assign_cmts(vpr_grid, blocks, block_locs) + + site_type_dict = vpr_grid.get_site_type_dict() + + # Key is (type, clock_region_pkey) + available_placements = {} + available_locs = {} + vpr_locs = {} + + for clock_type in CLOCKS.values(): + if clock_type == "IBUF" or clock_type["type"] not in site_type_dict: + continue + + for loc, tile_name, clock_region_pkey in site_type_dict[clock_type["type"]]: + if clock_type["type"] == "BUFGCTRL": + if "_TOP_" in tile_name: + key = (clock_type["type"], "TOP") + elif "_BOT_" in tile_name: + key = (clock_type["type"], "BOT") + else: + key = (clock_type["type"], clock_region_pkey) + + if key not in available_placements: + available_placements[key] = [] + available_locs[key] = [] + + available_placements[key].append(loc) + vpr_loc = get_vpr_coords_from_site_name(canon_grid, vpr_grid, loc, grid_capacities) + + if vpr_loc is None: + continue + + available_locs[key].append(vpr_loc) + vpr_locs[loc] = vpr_loc + + for clock_name, clock in self.clock_blocks.items(): + # All clocks should have an assigned CMTs at this point. + assert clock_name in self.clock_cmts, clock_name + + bel_type = CLOCKS[clock["subckt"]]["type"] + + # Skip LOCing the PS7. There is only one + if bel_type == "PS7": + continue + + if bel_type == "IBUF": + continue + + key = (bel_type, self.clock_cmts[clock_name]) + + if clock_name in blocks: + # This block has a LOC constraint from the user, verify that + # this LOC constraint makes sense. + assert blocks[clock_name] in available_locs[key], (clock_name, blocks[clock_name], available_locs[key]) + continue + + loc = None + for potential_loc in sorted(available_placements[key]): + # This block has no existing placement, make one + vpr_loc = vpr_locs[potential_loc] + if vpr_loc in loc_in_use: + continue + + loc_in_use.add(vpr_loc) + yield clock_name, potential_loc + loc = potential_loc + break + + # No free LOC!!! + assert loc is not None, (clock_name, available_placements[key]) + + def has_clock_nets(self): + return self.clock_sources + + +def get_tile_capacities(arch_xml_filename): + arch = ET.parse(arch_xml_filename, ET.XMLParser(remove_blank_text=True)) + root = arch.getroot() + + tile_capacities = {} + for el in root.iter("tile"): + tile_name = el.attrib["name"] + + tile_capacities[tile_name] = 0 + + for sub_tile in el.iter("sub_tile"): + capacity = 1 + + if "capacity" in sub_tile.attrib: + capacity = int(sub_tile.attrib["capacity"]) + + tile_capacities[tile_name] += capacity + + grid = {} + for el in root.iter("single"): + x = int(el.attrib["x"]) + y = int(el.attrib["y"]) + grid[(x, y)] = tile_capacities[el.attrib["type"]] + + return grid + + +def get_vpr_coords_from_site_name(canon_grid, vpr_grid, site_name, grid_capacities): + site_name = site_name.replace('"', "") + + site_dict = vpr_grid.get_site_dict() + canon_loc = vpr_grid.get_canon_loc() + + tile = site_dict[site_name]["tile"] + + capacity = 0 + x, y = site_dict[site_name]["vpr_loc"] + canon_x, canon_y = canon_loc[(x, y)] + + if (x, y) in grid_capacities.keys(): + capacity = grid_capacities[(x, y)] + + if not capacity: + # If capacity is zero it means that the site is out of + # the ROI, hence the site needs to be skipped. + return None + elif capacity == 1: + return (x, y, 0) + else: + sites = list(canon_grid.gridinfo_at_loc((canon_x, canon_y)).sites.keys()) + assert capacity == len(sites), (tile, capacity, (x, y)) + + instance_idx = sites.index(site_name) + + return (x, y, instance_idx) + + +def constrain_special_ios(canon_grid, vpr_grid, io_blocks, blif_data, blocks, place_constraints): + """ + There are special IOs which need extra handling when dealing with placement constraints. + + For instance, the IBUFDS_GTE2 primitive must be placed in correspondance with the location + of its input PADs, as no other route exists other than that. + + This function reads the connectivity of the top level nets, which have been previously + constrained, and correctly constrains those blocks which require special handling. + """ + + if "subckt" not in blif_data: + return + + BEL_TYPES = ["IBUFDS_GTE2_VPR", "GTPE2_CHANNEL_VPR"] + SPECIAL_IPADS = ["IPAD_GTP_VPR"] + + special_io_map = dict() + for subckt in blif_data["subckt"]: + if "cname" not in subckt: + continue + bel = subckt["args"][0] + + if bel not in SPECIAL_IPADS: + continue + + top_net = None + renamed_net = None + for port in subckt["args"][1:]: + port_name, net = port.split("=") + + if port_name == "I": + assert net in io_blocks, (net, io_blocks) + top_net = net + elif port_name == "O": + renamed_net = net + else: + assert False, "ERROR: Special IPAD ports not recognized!" + + special_io_map[renamed_net] = top_net + + blocks_to_constrain = set() + for subckt in blif_data["subckt"]: + if "cname" not in subckt: + continue + bel = subckt["args"][0] + + if bel not in BEL_TYPES: + continue + + assert "cname" in subckt and len(subckt["cname"]) == 1, subckt + cname = subckt["cname"][0] + + for port in subckt["args"][1:]: + _, net = port.split("=") + + if net not in special_io_map: + continue + + net = special_io_map[net] + if net in io_blocks: + x, y, z = io_blocks[net] + + canon_loc = vpr_grid.get_canon_loc()[(x, y)] + + gridinfo = canon_grid.gridinfo_at_loc(canon_loc) + sites = sorted(gridinfo.sites.keys()) + site_name = sites[z] + + connected_io_site = vpr_grid.get_site_dict()[site_name]["connected_to_site"] + assert connected_io_site, (site_name, bel, cname) + + new_z = sites.index(connected_io_site) + loc = (x, y, new_z) + + blocks_to_constrain.add((cname, loc)) + + for block, vpr_loc in blocks_to_constrain: + place_constraints.constrain_block(block, vpr_loc, "Constraining block {}".format(block)) + + blocks[block] = vpr_loc + + +def p_main( + net, + vpr_grid_map, + arch, + part, + blif, + db_root, + input=sys.stdin, + output=sys.stdout, + roi=False, + allow_bufg_logic_sources=False, + graph_limit=None, +): + device_families = { + "xc7a": "artix7", + "xc7k": "kintex7", + "xc7z": "zynq7", + "xc7s": "spartan7", + } + + device_family = None + for device in device_families: + if part.startswith(device): + device_family = device_families[device] + break + + assert device_family + + db_root = str(Path(db_root) / device_family) + db = prjxray.db.Database(db_root, part) + canon_grid = db.grid() + + io_blocks = {} + loc_in_use = set() + for line in input: + output.write(line) + + if line[0] == "#": + continue + block, x, y, z = line.strip().split()[0:4] + + io_blocks[block] = (int(x), int(y), int(z)) + loc_in_use.add(io_blocks[block]) + + place_constraints = PlaceConstraints(net) + place_constraints.load_loc_sites_from_net_file() + + grid_capacities = get_tile_capacities(arch) + + eblif_data = parse_blif(blif) + + vpr_grid = VprGrid(vpr_grid_map, graph_limit) + + # Constrain IO blocks and LOCed resources + blocks = {} + block_locs = {} + for block, loc in place_constraints.get_loc_sites(): + vpr_loc = get_vpr_coords_from_site_name(canon_grid, vpr_grid, loc, grid_capacities) + loc_in_use.add(vpr_loc) + + if block in io_blocks: + assert io_blocks[block] == vpr_loc, (block, vpr_loc, io_blocks[block]) + + blocks[block] = vpr_loc + block_locs[block] = loc + + place_constraints.constrain_block(block, vpr_loc, "Constraining block {}".format(block)) + + # Constrain blocks directly connected to IO in the same x, y location + constrain_special_ios(canon_grid, vpr_grid, io_blocks, eblif_data, blocks, place_constraints) + + # Constrain clock resources + clock_placer = ClockPlacer(vpr_grid, io_blocks, eblif_data, roi, graph_limit, allow_bufg_logic_sources) + if clock_placer.has_clock_nets(): + for block, loc in clock_placer.place_clocks( + canon_grid, vpr_grid, loc_in_use, block_locs, blocks, grid_capacities + ): + vpr_loc = get_vpr_coords_from_site_name(canon_grid, vpr_grid, loc, grid_capacities) + place_constraints.constrain_block(block, vpr_loc, "Constraining clock block {}".format(block)) + """ Constrain IDELAYCTRL sites + + Prior to the invocation of this script, the IDELAYCTRL sites must have been + replicated accordingly to the IDELAY specifications. + There can be three different usage combinations of IDELAYCTRL and IDELAYs in a design: + 1. IODELAYs and IDELAYCTRLs can be constrained to banks as needed, + through an in-design LOC constraint. + Manual replication of the constrained IDELAYCTRLs is necessary to provide a + controller for each bank. + 2. IODELAYs and a single IDELAYCTRL can be left entirely unconstrained, + becoming a default group. The IDELAYCTRLis replicated depending on bank usage. + Replication must have happened prior to this step + 3. One or more IODELAY_GROUPs can be defined that contain IODELAYs and a single + IDELAYCTRL each. These components can be otherwise unconstrained and the IDELAYCTRL + for each group has to be replicated as needed (depending on bank usage). + NOTE: IODELAY_GROUPS are not enabled at the moment. + """ + idelayctrl_cmts = set() + idelay_instances = place_constraints.get_used_instances("IDELAYE2") + for inst in idelay_instances: + x, y, z = io_blocks[inst] + idelayctrl_cmt = vpr_grid.get_vpr_loc_cmt()[(x, y)] + idelayctrl_cmts.add(idelayctrl_cmt) + + idelayctrl_instances = place_constraints.get_used_instances("IDELAYCTRL") + + assert len(idelayctrl_cmts) <= len( + idelayctrl_instances + ), "The number of IDELAYCTRL blocks and IO banks with IDELAYs used do not match." + + idelayctrl_sites = dict() + for site_name, _, clk_region in vpr_grid.get_site_type_dict()["IDELAYCTRL"]: + if clk_region in idelayctrl_cmts: + idelayctrl_sites[clk_region] = site_name + + # Check and remove user constrained IDELAYCTRLs + for idelayctrl_block in idelayctrl_instances: + if idelayctrl_block in blocks.keys(): + x, y, _ = blocks[idelayctrl_block] + idelayctrl_cmt = vpr_grid.get_vpr_loc_cmt()[(x, y)] + + assert idelayctrl_cmt in idelayctrl_cmts + + idelayctrl_cmts.remove(idelayctrl_cmt) + idelayctrl_instances.remove(idelayctrl_block) + + # TODO: Add possibility to bind IDELAY banks to IDELAYCTRL sites using + # the IDELAY_GROUP attribute. + for cmt, idelayctrl_block in zip(idelayctrl_cmts, idelayctrl_instances): + x, y = vpr_grid.get_site_dict()[idelayctrl_sites[cmt]]["vpr_loc"] + vpr_loc = (x, y, 0) + + place_constraints.constrain_block( + idelayctrl_block, vpr_loc, "Constraining idelayctrl block {}".format(idelayctrl_block) + ) + + if len(idelayctrl_instances) > 0: + print("Warning: IDELAY_GROUPS parameters are currently being ignored!", file=sys.stderr) + + place_constraints.output_place_constraints(output) + + +def main( + net, + vpr_grid_map, + arch, + part, + blif, + db_root=None, + input=None, + output=None, + roi=False, + allow_bufg_logic_sources=False, + graph_limit=None, +): + p_main( + net=Path(net).open("r"), + vpr_grid_map=vpr_grid_map, + arch=arch, + db_root=environ.get("DATABASE_DIR", subprocess_run("prjxray-config", capture_output=True)) + if db_root is None + else db_root, + part=part, + blif=Path(blif).open("r"), + input=sys.stdin if input is None else Path(input).open("r"), + output=sys.stdout if output is None else Path(output).open("w"), + roi=roi, + allow_bufg_logic_sources=allow_bufg_logic_sources, + graph_limit=graph_limit, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Convert a PCF file into a VPR io.place file.") + parser.add_argument( + "--input", "-i", "-I", type=argparse.FileType("r"), default=sys.stdout, help="The input constraints place file" + ) + parser.add_argument( + "--output", + "-o", + "-O", + type=argparse.FileType("w"), + default=sys.stdout, + help="The output constraints place file", + ) + parser.add_argument("--net", "-n", type=argparse.FileType("r"), required=True, help="top.net file") + parser.add_argument("--vpr_grid_map", help="Map of canonical to VPR grid locations", required=True) + parser.add_argument("--arch", help="Arch XML", required=True) + parser.add_argument("--db_root", required=True) + parser.add_argument("--part", required=True) + parser.add_argument("--blif", "-b", type=argparse.FileType("r"), required=True, help="BLIF / eBLIF file") + parser.add_argument("--roi", action="store_true", help="Using ROI") + parser.add_argument( + "--allow-bufg-logic-sources", action="store_true", help="When set allows BUFGs to be driven by logic" + ) + parser.add_argument("--graph_limit", help="Graph limit parameters") + args = parser.parse_args() + + p_main( + net=args.net, + vpr_grid_map=args.vpr_grid_map, + arch=args.arch, + db_root=args.db_root, + part=args.part, + blif=args.blif, + input=args.input, + output=args.output, + roi=args.roi, + allow_bufg_logic_sources=args.allow_bufg_logic_sources, + graph_limit=args.graph_limit, + ) diff --git a/f4pga/utils/xc7/fix_xc7_carry.py b/f4pga/utils/xc7/fix_xc7_carry.py new file mode 100644 index 0000000..a8ec19d --- /dev/null +++ b/f4pga/utils/xc7/fix_xc7_carry.py @@ -0,0 +1,609 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019-2022 F4PGA Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +""" +Script for addressing CARRY4 output congestion in elaborated netlists. + +Usage: + + python3 fix_carry.py < input-netlist-json > output-netlist-json + +Description: + + In the 7-series SLICEL (and SLICEM) sites, there can be output congestion + if both the CO and O of the CARRY4 are used. This congestion can be + avoided by using a transparent/open latch or register on the output of the + CARRY4. + + VPR does not currently support either of those options, so for now, if + both CO and O are used, the CO output is converted into a LUT equation to + recompute the CO output from O, DI and S. See carry_map.v and + clean_carry_map.v for details. + + If VPR could emit the transparent/open latch on output congestion, this + would no longer be required. The major problem with transparent latch + support is that it requires constants to be routed to the G/GE/CLR/PRE + ports, which VPR cannot express as a result of packing. + + This script identifies CARRY4 chains in the netlist, identifies if there + is output congestion on the O and CO ports, and marks the congestion by + changing CARRY_CO_DIRECT (e.g. directly use the CO port) to CARRY_CO_LUT + (compute the CO value using a LUT equation). + + +Diagram showing one row of the 7-series CLE, focusing on O/CO congestion. +This diagram shows that if both the O and CO outputs are needed, once must +pass through the flip flop (xFF in the diagram). + + CLE Row + ++--------------------------------------------------------------------------+ +| | +| | +| +---+ | +| | + | +| | + | +| +-------->+ O + | +| CO CHAIN | | + | +| | | +---------------------> xMUX +| ^ | +---->+ CO + | +| | | | | + | +| | | | | + | +| +---------+----------+ | | | + | +| | | | | +---+ | +| | CARRY ROW | | | | +| +--->+ S O +--------+ | xOUTMUX | +| | | | | | +| | | + | | +| +--->+ DI CO +-------+o+--+ | +| | CI CHAIN | + | | +| | | | | | +| +---------+----------+ | | xFFMUX | +| ^ | | | +| | | | +---+ | +| + | | | + | +| | + | + +-----------+ | +| +--+o+--->+ O + | | | +| + | + | xFF | | +| | | +->--D---- Q +------> xQ +| | | + | | | +| +---->+ CO + | | | +| | + +-----------+ | +| | + | +| +---+ | +| | +| | ++--------------------------------------------------------------------------+ + + +This script operates on a slightly different cell structure than a plain CARRY4. +carry_map.v converts the CARRY4 into: + + +------------------+ +-----------------+ + | | | | + | CO3 +->+ CARRY_CO_DIRECT | + | | | | + | DI3 | +-----------------+ + | | + | S3 O3 | + | | + | DI2 | +-----------------+ + | | | | + | S2 CO2 +->+ CARRY_CO_DIRECT | + | | | | + | DI1 | +-----------------+ + | | + | S1 O2 | + | CARRY4 | + | DI0 (chained) | +-----------------+ + | | | | + | S0 CO1 +->+ CARRY_CO_DIRECT | + | | | | + | CYINIT | +-----------------+ + | | + +-----------------+ | O1 | + | | | | + +->+ CARRY_COUT_PLUG +->+ CI | +-----------------+ + | | | | | | | + | +-----------------+ | CO0 +->+ CARRY_CO_DIRECT | + | | | | | + | | | +-----------------+ + +-------------------+ | | + | | O0 | ++------------------+ +-----------------+ | | | +| | | | | +------------------+ +| CO3 +->+ CARRY_CO_DIRECT +-+ +| | | | +| DI3 | +-----------------+ +| | +| S3 O3 | +| | +| DI2 | +-----------------+ +| | | | +| S2 CO2 +->+ CARRY_CO_DIRECT | +| | | | +| DI1 | +-----------------+ +| | +| S1 O2 | +| CARRY4 | +| DI0 (root) | +-----------------+ +| | | | +| S0 CO1 +->+ CARRY_CO_DIRECT | +| | | | +| CYINIT | +-----------------+ +| | +| O1 | +| | +| CI | +-----------------+ +| | | | +| CO0 +->+ CARRY_CO_DIRECT | +| | | | +| | +-----------------+ +| | +| O0 | +| | ++------------------+ + +Each CARRY4 spans the 4 rows of the SLICEL/SLICEM. +Row 0 is the S0/DI0/O0/CO0 ports, row 1 is S1/DI1/O1/CO1 ports, etc. + +So there are five cases the script has to handle: + + - No congestion is present between O and CO -> + + Do nothing. + + - Congestion is present on rows 0-2 and row above is in use -> + + Change CARRY_CO_DIRECT to CARRY_CO_LUT. + + Routing and LUT delays are incurred in this case. + + - Congestion is present on rows 0-2 and row above is not in use -> + + Remap CO to O from the row above, and set S on the next row to 0 to + ensure O outputs CI from the row below. + + No additional delays for this change. + + - Congestion is present on row 3 and CO3 is not connected to another CARRY -> + + Change CARRY_CO_DIRECT to CARRY_CO_TOP_POP. This adds 1 dummy layer to + the carry chain to output the CO. + + No additional delays for this change. + + - Congestion is present on row 3 and CO3 is connected directly to another + CARRY4 -> + + Change CARRY_CO_DIRECT to CARRY_CO_LUT *and* change the chained + CARRY_COUT_PLUG to be directly connected to the previous CO3. + + Routing and LUT delays are incurred in this case. + + Diagram for this case: + + +-------------------+ +-----------------+ + | | | | + | CO3 +->+ CARRY_CO_DIRECT | + | | | | + | DI3 | +-----------------+ + | | + | S3 O3 | + | | + | DI2 | +-----------------+ + | | | | + | S2 CO2 +->+ CARRY_CO_DIRECT | + | | | | + | DI1 | +-----------------+ + | | + | S1 O2 | + | CARRY4 | + | DI0 (chained) | +-----------------+ + | | | | + | S0 CO1 +->+ CARRY_CO_DIRECT | + | | | | + | CYINIT | +-----------------+ + | | + +-----------------+ | O1 | + | | | | + +->+ CARRY_COUT_PLUG +--->+ CI | +-----------------+ + | | | | | | | + | +-----------------+ | CO0 +->+ CARRY_CO_DIRECT | + | | | | | ++-------------------+ | +-----------------+ | | +-----------------+ +| | | | | | | +| CO3 +--+->+ CARRY_CO_LUT +-+ | O0 | +| | | | | | | +| DI3 | +-----------------+ | +-------------------+ +| | | +| S3 O3 | +------> +| | +| DI2 | +-----------------+ +| | | | +| S2 CO2 +---->+ CARRY_CO_DIRECT | +| | | | +| DI1 | +-----------------+ +| | +| S1 O2 | +| CARRY4 | +| DI0 (root) | +-----------------+ +| | | | +| S0 CO1 +---->+ CARRY_CO_DIRECT | +| | | | +| CYINIT | +-----------------+ +| | +| O1 | +| | +| CI | +-----------------+ +| | | | +| CO0 +---->+ CARRY_CO_DIRECT | +| | | | +| | +-----------------+ +| | +| O0 | +| | ++-------------------+ + +After this script is run, clean_carry_map.v is used to convert CARRY_CO_DIRECT +into a direct connection, and CARRY_CO_LUT is mapped to a LUT to compute the +carry output. + +""" + +import json +from sys import stdin, stdout + + +def find_top_module(design): + """ + Looks for the top-level module in the design. Returns its name. Throws + an exception if none was found. + """ + + for name, module in design["modules"].items(): + attrs = module["attributes"] + if "top" in attrs and int(attrs["top"]) == 1: + return name + + raise RuntimeError("No top-level module found in the design!") + + +def find_carry4_chains(design, top_module, bit_to_cells): + """Identify CARRY4 carry chains starting from the root CARRY4. + + All non-root CARRY4 cells should end up as part of a chain, otherwise + an assertion is raised. + + Arguments: + design (dict) - "design" field from Yosys JSON format + top_module (str) - Name of top module. + bit_to_cells (dict) - Map of net bit identifier and cell information. + Computes in "create_bit_to_cell_map". + + Returns: + list of list of strings - List of CARRY4 chains. Each chain is a list + of cellnames. The cells are listed in chain order, starting from + the root. + + """ + cells = design["modules"][top_module]["cells"] + + used_carry4s = set() + root_carry4s = [] + nonroot_carry4s = {} + for cellname in cells: + cell = cells[cellname] + if cell["type"] != "CARRY4_VPR": + continue + connections = cell["connections"] + + if "CIN" in connections: + cin_connections = connections["CIN"] + assert len(cin_connections) == 1 + + # Goto driver of CIN, should be a CARRY_COUT_PLUG. + plug_cellname, port, bit_idx = bit_to_cells[cin_connections[0]][0] + plug_cell = cells[plug_cellname] + assert plug_cell["type"] == "CARRY_COUT_PLUG", plug_cellname + assert port == "COUT" + + plug_connections = plug_cell["connections"] + + cin_connections = plug_connections["CIN"] + assert len(cin_connections) == 1 + + # Goto driver of CIN, should be a CARRY_CO_DIRECT. + direct_cellname, port, bit_idx = bit_to_cells[cin_connections[0]][0] + direct_cell = cells[direct_cellname] + assert direct_cell["type"] == "CARRY_CO_DIRECT", direct_cellname + assert port == "OUT" + + direct_connections = direct_cell["connections"] + + co_connections = direct_connections["CO"] + assert len(co_connections) == 1 + + nonroot_carry4s[co_connections[0]] = cellname + else: + used_carry4s.add(cellname) + root_carry4s.append(cellname) + + # Walk from each root CARRY4 to each child CARRY4 module. + chains = [] + for cellname in root_carry4s: + chain = [cellname] + + while True: + # Follow CO3 to the next CARRY4, if any. + cell = cells[cellname] + connections = cell["connections"] + + co3_connections = connections.get("CO3", None) + if co3_connections is None: + # No next CARRY4, stop here. + break + + found_next_link = False + for connection in co3_connections: + next_cellname = nonroot_carry4s.get(connection, None) + if next_cellname is not None: + cellname = next_cellname + used_carry4s.add(cellname) + chain.append(cellname) + found_next_link = True + break + + if not found_next_link: + break + + chains.append(chain) + + # Make sure all non-root CARRY4's got used. + for bit, cellname in nonroot_carry4s.items(): + assert cellname in used_carry4s, (bit, cellname) + + return chains + + +def create_bit_to_cell_map(design, top_module): + """Create map from net bit identifier to cell information. + + Arguments: + design (dict) - "design" field from Yosys JSON format + top_module (str) - Name of top module. + + Returns: + bit_to_cells (dict) - Map of net bit identifier and cell information. + + The map keys are the net bit identifier used to mark which net a cell port + is connected too. The map values are a list of cell ports that are in the + net. The first element of the list is the driver port, and the remaining + elements are sink ports. + + The list elements are 3-tuples with: + cellname (str) - The name of the cell this port belongs too + port (str) - The name of the port this element is connected too. + bit_idx (int) - For multi bit ports, a 0-based index into the port. + + """ + bit_to_cells = {} + + cells = design["modules"][top_module]["cells"] + + for cellname in cells: + cell = cells[cellname] + port_directions = cell["port_directions"] + for port, connections in cell["connections"].items(): + is_output = port_directions[port] == "output" + for bit_idx, bit in enumerate(connections): + + list_of_cells = bit_to_cells.get(bit, None) + if list_of_cells is None: + list_of_cells = [None] + bit_to_cells[bit] = list_of_cells + + if is_output: + # First element of list of cells is net driver. + assert list_of_cells[0] is None, (bit, list_of_cells[0], cellname) + list_of_cells[0] = (cellname, port, bit_idx) + else: + list_of_cells.append((cellname, port, bit_idx)) + + return bit_to_cells + + +def is_bit_used(bit_to_cells, bit): + """Is the net bit specified used by any sinks?""" + list_of_cells = bit_to_cells[bit] + return len(list_of_cells) > 1 + + +def is_bit_used_other_than_carry4_cin(design, top_module, bit, bit_to_cells): + """Is the net bit specified used by any sinks other than a carry chain?""" + cells = design["modules"][top_module]["cells"] + list_of_cells = bit_to_cells[bit] + assert len(list_of_cells) == 2, bit + + direct_cellname, port, _ = list_of_cells[1] + direct_cell = cells[direct_cellname] + assert direct_cell["type"] == "CARRY_CO_DIRECT" + assert port == "CO" + + # Follow to output + connections = direct_cell["connections"]["OUT"] + assert len(connections) == 1 + + for cellname, port, bit_idx in bit_to_cells[connections[0]][1:]: + cell = cells[cellname] + if cell["type"] == "CARRY_COUT_PLUG" and port == "CIN": + continue + else: + return True, direct_cellname + + return False, direct_cellname + + +def create_bit_to_net_map(design, top_module): + """Create map from net bit identifier to net information. + + Arguments: + design (dict) - "design" field from Yosys JSON format + top_module (str) - Name of top module. + + Returns: + bit_to_nets (dict) - Map of net bit identifier to net information. + """ + bit_to_nets = {} + + nets = design["modules"][top_module]["netnames"] + + for net in nets: + for bit_idx, bit in enumerate(nets[net]["bits"]): + bit_to_nets[bit] = (net, bit_idx) + + return bit_to_nets + + +def fixup_cin(design, top_module, bit_to_cells, co_bit, direct_cellname): + """Move connection from CARRY_CO_LUT.OUT -> CARRY_COUT_PLUG.CIN to + directly to preceeding CARRY4. + """ + cells = design["modules"][top_module]["cells"] + + direct_cell = cells[direct_cellname] + assert direct_cell["type"] == "CARRY_CO_LUT" + + # Follow to output + connections = direct_cell["connections"]["OUT"] + assert len(connections) == 1 + + for cellname, port, bit_idx in bit_to_cells[connections[0]][1:]: + cell = cells[cellname] + if cell["type"] == "CARRY_COUT_PLUG" and port == "CIN": + assert bit_idx == 0 + + cells[cellname]["connections"]["CIN"][0] = co_bit + + +def fixup_congested_rows(design, top_module, bit_to_cells, bit_to_nets, chain): + """Walk the specified carry chain, and identify if any outputs are congested. + + Arguments: + design (dict) - "design" field from Yosys JSON format + top_module (str) - Name of top module. + bit_to_cells (dict) - Map of net bit identifier and cell information. + Computes in "create_bit_to_cell_map". + bit_to_nets (dict) - Map of net bit identifier to net information. + Computes in "create_bit_to_net_map". + chain (list of str) - List of cells in the carry chain. + + """ + cells = design["modules"][top_module]["cells"] + + O_ports = ["O0", "O1", "O2", "O3"] + CO_ports = ["CO0", "CO1", "CO2", "CO3"] + + def check_if_rest_of_carry4_is_unused(cellname, cell_idx): + assert cell_idx < len(O_ports) + + cell = cells[cellname] + connections = cell["connections"] + + for o, co in zip(O_ports[cell_idx:], CO_ports[cell_idx:]): + o_conns = connections[o] + assert len(o_conns) == 1 + o_bit = o_conns[0] + if is_bit_used(bit_to_cells, o_bit): + return False + + co_conns = connections[co] + assert len(co_conns) == 1 + co_bit = co_conns[0] + if is_bit_used(bit_to_cells, co_bit): + return False + + return True + + # Carry chain is congested if both O and CO is used at the same level. + # CO to next element in the chain is fine. + for chain_idx, cellname in enumerate(chain): + cell = cells[cellname] + connections = cell["connections"] + for cell_idx, (o, co) in enumerate(zip(O_ports, CO_ports)): + o_conns = connections[o] + assert len(o_conns) == 1 + o_bit = o_conns[0] + + co_conns = connections[co] + assert len(co_conns) == 1 + co_bit = co_conns[0] + + is_o_used = is_bit_used(bit_to_cells, o_bit) + is_co_used, direct_cellname = is_bit_used_other_than_carry4_cin(design, top_module, co_bit, bit_to_cells) + + if is_o_used and is_co_used: + # Output at this row is congested. + direct_cell = cells[direct_cellname] + + if co == "CO3" and chain_idx == len(chain) - 1: + # This congestion is on the top of the carry chain, + # emit a dummy layer to the chain. + direct_cell["type"] = "CARRY_CO_TOP_POP" + assert int(direct_cell["parameters"]["TOP_OF_CHAIN"]) == 1 + # If this is the last CARRY4 in the chain, see if the + # remaining part of the chain is idle. + elif chain_idx == len(chain) - 1 and check_if_rest_of_carry4_is_unused(cellname, cell_idx + 1): + + # Because the rest of the CARRY4 is idle, it is safe to + # use the next row up to output the top of the carry. + connections["S{}".format(cell_idx + 1)] = ["1'b0"] + + next_o_conns = connections[O_ports[cell_idx + 1]] + assert len(next_o_conns) == 1 + direct_cell["connections"]["CO"][0] = next_o_conns[0] + + netname, bit_idx = bit_to_nets[next_o_conns[0]] + assert bit_idx == 0 + + # Update annotation that this net is now in use. + net = design["module"][top_module]["netnames"][netname] + assert net["attributes"].get("unused_bits", None) == "0 " + del net["attributes"]["unused_bits"] + else: + # The previous two stragies (use another layer of carry) + # only work for the top of the chain. This appears to be + # in the middle of the chain, so just spill it out to a + # LUT, and fixup the direct carry chain (if any). + direct_cell["type"] = "CARRY_CO_LUT" + + fixup_cin(design, top_module, bit_to_cells, co_bit, direct_cellname) + + +def main(design): + top_module = find_top_module(design) + bit_to_cells = create_bit_to_cell_map(design, top_module) + bit_to_nets = create_bit_to_net_map(design, top_module) + for chain in find_carry4_chains(design, top_module, bit_to_cells): + fixup_congested_rows(design, top_module, bit_to_cells, bit_to_nets, chain) + return design + + +if __name__ == "__main__": + json.dump(main(json.load(stdin)), stdout, indent=2) diff --git a/f4pga/wrappers/sh/__init__.py b/f4pga/wrappers/sh/__init__.py index e4f71aa..4694dca 100644 --- a/f4pga/wrappers/sh/__init__.py +++ b/f4pga/wrappers/sh/__init__.py @@ -37,6 +37,11 @@ isQuickLogic = FPGA_FAM != "xc7" SH_SUBDIR = "quicklogic" if isQuickLogic else FPGA_FAM +if not isQuickLogic: + from f4pga.utils.xc7.create_ioplace import main as xc7_create_ioplace + from f4pga.utils.xc7.create_place_constraints import main as xc7_create_place_constraints + + # Helper functions @@ -472,26 +477,22 @@ fi ) else: (eblif, net, part, device, arch_def) = sys_argv[1:6] - pcf_opts = f"'--pcf' '{sys_argv[6]}'" if len(sys_argv) > 6 else "" ioplace_file = f"{Path(eblif).stem}.ioplace" - p_run_bash_cmds( - f""" -set -e -python3 '{F4PGA_SHARE_DIR}/scripts/prjxray_create_ioplace.py' \ - --blif '{eblif}' \ - --net '{net}' {pcf_opts} \ - --map '{F4PGA_SHARE_DIR}/arch/{device}/{part}/pinmap.csv' \ - > '{ioplace_file}' -python3 '{F4PGA_SHARE_DIR}'/scripts/prjxray_create_place_constraints.py \ - --blif '{eblif}' \ - --net '{net}' \ - --arch '{arch_def}' \ - --part '{part}' \ - --vpr_grid_map '{F4PGA_SHARE_DIR}/arch/{device}/vpr_grid_map.csv' \ - --input '{ioplace_file}' \ - --db_root "${{DATABASE_DIR:-$(prjxray-config)}}" \ - > constraints.place -""" + xc7_create_ioplace( + blif=eblif, + map=f"{F4PGA_SHARE_DIR}/arch/{device}/{part}/pinmap.csv", + net=net, + output=ioplace_file, + pcf=Path(sys_argv[6]).open("r") if len(sys_argv) > 6 else None, + ) + xc7_create_place_constraints( + blif=eblif, + net=net, + arch=arch_def, + part=part, + vpr_grid_map=f"{F4PGA_SHARE_DIR}/arch/{device}/vpr_grid_map.csv", + input=ioplace_file, + output="constraints.place", ) diff --git a/f4pga/wrappers/tcl/xc7/synth.f4pga.tcl b/f4pga/wrappers/tcl/xc7/synth.f4pga.tcl index 7aa41c6..b379ff0 100644 --- a/f4pga/wrappers/tcl/xc7/synth.f4pga.tcl +++ b/f4pga/wrappers/tcl/xc7/synth.f4pga.tcl @@ -191,7 +191,7 @@ techmap -map $::env(TECHMAP_PATH)/carry_map.v clean_processes write_json $::env(OUT_JSON).carry_fixup.json -exec $::env(PYTHON3) $::env(UTILS_PATH)/fix_xc7_carry.py < $::env(OUT_JSON).carry_fixup.json > $::env(OUT_JSON).carry_fixup_out.json +exec $::env(PYTHON3) -m f4pga.utils.xc7.fix_xc7_carry < $::env(OUT_JSON).carry_fixup.json > $::env(OUT_JSON).carry_fixup_out.json design -push read_json $::env(OUT_JSON).carry_fixup_out.json