Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.39",
%%%     date            = "20 August 2024",
%%%     time            = "08:49:37 MDT",
%%%     filename        = "toct.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "https://www.math.utah.edu/~beebe",
%%%     checksum        = "04479 10419 58566 521670",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography, BibTeX, ACM Transactions on
%%%                        Computation Theory",
%%%     license         = "public domain",
%%%     supported       = "no",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        the journal ACM Transactions on Computation
%%%                        Theory (CODEN ????, ISSN 1942-3454),
%%%                        covering all journal issues from 2009 --
%%%                        date.
%%%
%%%                        The journal has a World-Wide Web site at:
%%%
%%%                            http://www.acm.org/pubs/toct
%%%
%%%                        Tables-of-contents are available at:
%%%
%%%                            http://www.acm.org/pubs/contents/journals/toct/
%%%                            http://portal.acm.org/browse_dl.cfm?idx=J1190
%%%
%%%                        Qualified subscribers can retrieve the full
%%%                        text of recent articles in PDF form.
%%%
%%%                        At version 1.39, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2009 (   7)    2015 (  13)    2021 (  23)
%%%                             2010 (   5)    2016 (  23)    2022 (  13)
%%%                             2011 (   6)    2017 (  10)    2023 (   8)
%%%                             2012 (  15)    2018 (  24)    2024 (  12)
%%%                             2013 (  18)    2019 (  28)
%%%                             2014 (  21)    2020 (  29)
%%%
%%%                             Article:        255
%%%
%%%                             Total entries:  255
%%%
%%%                        The initial draft was extracted from the
%%%                        ACM Web site, with manual corrections and
%%%                        additions from bibliographies in the TeX
%%%                        User Group collection, the author's
%%%                        personal bibliography files, the Compendex
%%%                        database, and a very large computer science
%%%                        bibliography collection on ftp.ira.uka.de
%%%                        in /pub/bibliography to which many people
%%%                        of have contributed.  Where multiple
%%%                        sources of a particular entry existed,
%%%                        field values have been manually merged to
%%%                        preserve maximal information.  Missing
%%%                        entries were identified by software
%%%                        developed for the TeX User Group and BibNet
%%%                        bibliography archive projects, and were
%%%                        then supplied from the original journal
%%%                        issues.  Questions arising from conflicting
%%%                        data were resolved by consulting the
%%%                        original journal issues.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        The bibsource keys in the bibliography
%%%                        entries below indicate the data sources,
%%%                        usually the Karlsruhe computer science
%%%                        bibliography archive for the first two
%%%                        volumes, or the journal Web site or the
%%%                        Compendex database, both of which lack
%%%                        coverage of this journal before 1985.
%%%
%%%                        URL keys in the bibliography point to
%%%                        World Wide Web locations of additional
%%%                        information about the entry.
%%%
%%%                        Spelling has been verified with the UNIX
%%%                        spell and GNU ispell programs using the
%%%                        exception dictionary stored in the
%%%                        companion file with extension .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{"\input bibnames.sty"}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|https://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-TOCT                  = "ACM Transactions on Computation Theory"}

%%% ====================================================================
%%% Publisher abbreviations:
@String{pub-ACM                 = "ACM Press"}

@String{pub-ACM:adr             = "New York, NY 10036, USA"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Fortnow:2009:EF,
  author =       "Lance Fortnow",
  title =        "{Editor}'s Foreword",
  journal =      j-TOCT,
  volume =       "1",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1490270.1490271",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Fri Apr 24 19:03:42 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Aaronson:2009:ANB,
  author =       "Scott Aaronson and Avi Wigderson",
  title =        "Algebrization: a New Barrier in Complexity Theory",
  journal =      j-TOCT,
  volume =       "1",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1490270.1490272",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Fri Apr 24 19:03:42 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Any proof of P $ \neq $ NP will have to overcome two
                 barriers: {\em relativization\/} and {\em natural
                 proofs}. Yet over the last decade, we have seen circuit
                 lower bounds (e.g., that PP does not have linear-size
                 circuits) that overcome both barriers simultaneously.
                 So the question arises of whether there is a third
                 barrier to progress on the central questions in
                 complexity theory.\par

                 In this article, we present such a barrier, which we
                 call {\em algebraic relativization\/} or {\em
                 algebrization}. The idea is that, when we relativize
                 some complexity class inclusion, we should give the
                 simulating machine access not only to an oracle $A$,
                 but also to a low-degree extension of $A$ over a finite
                 field or ring.\par

                 We systematically go through basic results and open
                 problems in complexity theory to delineate the power of
                 the new algebrization barrier. First, we show that all
                 known nonrelativizing results based on
                 arithmetization---both inclusions such as IP = PSPACE
                 and MIP = NEXP, and separations such as MA$_{{EXP}_n}$
                 P/poly---do indeed algebrize. Second, we show that
                 almost all of the major open problems---including P
                 versus NP, P versus RP, and NEXP versus P/poly---will
                 require {\em non-algebrizing techniques}. In some
                 cases, algebrization seems to explain exactly why
                 progress stopped where it did: for example, why we have
                 superlinear circuit lower bounds for PromiseMA but not
                 for NP.\par

                 Our second set of results follows from lower bounds in
                 a new model of {\em algebraic query complexity}, which
                 we introduce in this article and which is interesting
                 in its own right. Some of our lower bounds use direct
                 combinatorial and algebraic arguments, while others
                 stem from a surprising connection between our model and
                 communication complexity. Using this connection, we are
                 also able to give an MA-protocol for the Inner Product
                 function with $ O(\sqrt {n} \log n)$ communication
                 (essentially matching a lower bound of Klauck), as well
                 as a communication complexity conjecture whose truth
                 would imply NL $ \neq $ NP.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
  keywords =     "arithmetization; communication complexity; interactive
                 proofs; Low-degree polynomials; oracles; query
                 complexity",
}

@Article{Razborov:2009:SPB,
  author =       "Alexander Razborov",
  title =        "A Simple Proof of {Bazzi's Theorem}",
  journal =      j-TOCT,
  volume =       "1",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1490270.1490273",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Fri Apr 24 19:03:42 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Linial and Nisan [1990] asked if any polylog-wise
                 independent distribution fools any function in AC$^0$.
                 In a recent remarkable development, Bazzi solved this
                 problem for the case of DNF formulas. The aim of this
                 note is to present a simplified version of his proof.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
  keywords =     "DNF; Pseudo-random generators",
}

@Article{Bourke:2009:DPR,
  author =       "Chris Bourke and Raghunath Tewari and N. V.
                 Vinodchandran",
  title =        "Directed Planar Reachability Is in Unambiguous
                 Log-Space",
  journal =      j-TOCT,
  volume =       "1",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1490270.1490274",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Fri Apr 24 19:03:42 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We make progress in understanding the complexity of
                 the graph reachability problem in the context of
                 unambiguous logarithmic space computation; a restricted
                 form of nondeterminism. As our main result, we show a
                 new upper bound on the {\em directed planar
                 reachability problem\/} by showing that it can be
                 decided in the class unambiguous logarithmic space
                 (UL). We explore the possibility of showing the same
                 upper bound for the general graph reachability problem.
                 We give a simple reduction showing that the
                 reachability problem for directed graphs with thickness
                 two is complete for the class nondeterministic
                 logarithmic space (NL). Hence an extension of our
                 results to directed graphs with thickness two will
                 unconditionally collapse NL to UL.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
  keywords =     "Directed graph reachability; planar graphs;
                 unambiguous log-space",
}

@Article{David:2009:ISB,
  author =       "Matei David and Toniann Pitassi and Emanuele Viola",
  title =        "Improved Separations between Nondeterministic and
                 Randomized Multiparty Communication",
  journal =      j-TOCT,
  volume =       "1",
  number =       "2",
  pages =        "5:1--5:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1595391.1595392",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Tue Mar 16 10:08:03 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Guruswami:2009:HSS,
  author =       "Venkatesan Guruswami and Prasad Raghavendra",
  title =        "Hardness of Solving Sparse Overdetermined Linear
                 Systems: a 3-Query {PCP} over Integers",
  journal =      j-TOCT,
  volume =       "1",
  number =       "2",
  pages =        "6:1--6:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1595391.1595393",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Tue Mar 16 10:08:03 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ben-Sasson:2009:SQP,
  author =       "Eli Ben-Sasson and Prahladh Harsha and Oded Lachish
                 and Arie Matsliah",
  title =        "Sound 3-Query {PCPPs} Are Long",
  journal =      j-TOCT,
  volume =       "1",
  number =       "2",
  pages =        "7:1--7:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1595391.1595394",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Tue Mar 16 10:08:03 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Kyncl:2010:LRD,
  author =       "Jan Kyn{\v{c}}l and Tom{\'a}{\v{s}} Vysko{\v{c}}il",
  title =        "Logspace Reduction of Directed Reachability for
                 Bounded Genus Graphs to the Planar Case",
  journal =      j-TOCT,
  volume =       "1",
  number =       "3",
  pages =        "8:1--8:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1714450.1714451",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Tue Mar 16 10:08:04 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Beame:2010:FCD,
  author =       "Paul Beame and Russell Impagliazzo and Toniann Pitassi
                 and Nathan Segerlind",
  title =        "Formula Caching in {DPLL}",
  journal =      j-TOCT,
  volume =       "1",
  number =       "3",
  pages =        "9:1--9:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1714450.1714452",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Tue Mar 16 10:08:04 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Datta:2010:PDP,
  author =       "Samir Datta and Raghav Kulkarni and Nutan Limaye and
                 Meena Mahajan",
  title =        "Planarity, Determinants, Permanents, and (Unique)
                 Matchings",
  journal =      j-TOCT,
  volume =       "1",
  number =       "3",
  pages =        "10:1--10:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1714450.1714453",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Tue Mar 16 10:08:04 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Yin:2010:CPP,
  author =       "Yitong Yin",
  title =        "Cell-Probe Proofs",
  journal =      j-TOCT,
  volume =       "2",
  number =       "1",
  pages =        "1:1--1:??",
  month =        nov,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1867719.1867720",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Tue Nov 23 11:20:48 MST 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Hoefer:2010:TAC,
  author =       "Martin Hoefer and Alexander Souza",
  title =        "Tradeoffs and Average-Case Equilibria in Selfish
                 Routing",
  journal =      j-TOCT,
  volume =       "2",
  number =       "1",
  pages =        "2:1--2:??",
  month =        nov,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1867719.1867721",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Tue Nov 23 11:20:48 MST 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Purdy:2011:LBC,
  author =       "Eric Purdy",
  title =        "Lower Bounds for Coin-Weighing Problems",
  journal =      j-TOCT,
  volume =       "2",
  number =       "2",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1944857.1944858",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Mon Mar 28 09:50:20 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Arvind:2011:SGI,
  author =       "Vikraman Arvind and Jacobo Tor{\'a}n",
  title =        "Solvable Group Isomorphism Is (Almost) in {NP} $ \cap
                 $ {coNP}",
  journal =      j-TOCT,
  volume =       "2",
  number =       "2",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1944857.1944859",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Mon Mar 28 09:50:20 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Fellows:2011:CDF,
  author =       "Michael R. Fellows and Jiong Guo and Hannes Moser and
                 Rolf Niedermeier",
  title =        "A Complexity Dichotomy for Finding Disjoint Solutions
                 of Vertex Deletion Problems",
  journal =      j-TOCT,
  volume =       "2",
  number =       "2",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1944857.1944860",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Mon Mar 28 09:50:20 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Hitchcock:2011:KCR,
  author =       "John M. Hitchcock and A. Pavan and N. V.
                 Vinodchandran",
  title =        "{Kolmogorov} Complexity in Randomness Extraction",
  journal =      j-TOCT,
  volume =       "3",
  number =       "1",
  pages =        "1:1--1:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2003685.2003686",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Mon Sep 5 16:58:07 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Kulkarni:2011:PIP,
  author =       "Raghav Kulkarni",
  title =        "On the Power of Isolation in Planar Graphs",
  journal =      j-TOCT,
  volume =       "3",
  number =       "1",
  pages =        "2:1--2:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2003685.2003687",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Mon Sep 5 16:58:07 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Smyth:2011:AQC,
  author =       "Clifford Smyth",
  title =        "Approximate Query Complexity",
  journal =      j-TOCT,
  volume =       "3",
  number =       "1",
  pages =        "3:1--3:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2003685.2003688",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  bibdate =      "Mon Sep 5 16:58:07 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Cook:2012:PBP,
  author =       "Stephen Cook and Pierre McKenzie and Dustin Wehr and
                 Mark Braverman and Rahul Santhanam",
  title =        "Pebbles and Branching Programs for Tree Evaluation",
  journal =      j-TOCT,
  volume =       "3",
  number =       "2",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2077336.2077337",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Mar 16 15:22:48 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We introduce the tree evaluation problem, show that it
                 is in LogDCFL (and hence in $P$), and study its
                 branching program complexity in the hope of eventually
                 proving a superlogarithmic space lower bound. The input
                 to the problem is a rooted, balanced $d$-ary tree of
                 height h, whose internal nodes are labeled with $d$-ary
                 functions on $ [k] = {1, \ldots {}, k}$, and whose
                 leaves are labeled with elements of $ [k]$. Each node
                 obtains a value in $ [k]$ equal to its $d$-ary function
                 applied to the values of its $d$ children. The output
                 is the value of the root. We show that the standard
                 black pebbling algorithm applied to the binary tree of
                 height h yields a deterministic $k$-way branching
                 program with $ O(k h)$ states solving this problem, and
                 we prove that this upper bound is tight for $ h = 2$
                 and $ h = 3$. We introduce a simple semantic
                 restriction called thrifty on $k$-way branching
                 programs solving tree evaluation problems and show that
                 the same state bound of $ \Theta (k h)$ is tight for
                 all $ h \geq 2$ for deterministic thrifty programs. We
                 introduce fractional pebbling for trees and show that
                 this yields nondeterministic thrifty programs with $
                 \Theta (k h / 2 + 1)$ states solving the Boolean
                 problem `determine whether the root has value 1', and
                 prove that this bound is tight for $ h = 2, 3, 4$. We
                 also prove that this same bound is tight for
                 unrestricted nondeterministic $k$-way branching
                 programs solving the Boolean problem for $ h = 2, 3$.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Gal:2012:TQL,
  author =       "Anna Gal and Andrew Mills",
  title =        "Three-Query Locally Decodable Codes with Higher
                 Correctness Require Exponential Length",
  journal =      j-TOCT,
  volume =       "3",
  number =       "2",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2077336.2077338",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Mar 16 15:22:48 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Locally decodable codes are error-correcting codes
                 with the extra property that, in order to retrieve the
                 value of a single input position, it is sufficient to
                 read a small number of positions of the codeword. We
                 refer to the probability of getting the correct value
                 as the correctness of the decoding algorithm. A
                 breakthrough result by Yekhanin [2007] showed that
                 3-query linear locally decodable codes may have
                 subexponential length. The construction of Yekhanin,
                 and the three query constructions that followed,
                 achieve correctness only up to a certain limit which is
                 1--3 $ \delta $ for nonbinary codes, where an adversary
                 is allowed to corrupt up to $ \delta $ fraction of the
                 codeword. The largest correctness for a subexponential
                 length 3-query binary code is achieved in a
                 construction by Woodruff [2008], and it is below 1--3 $
                 \delta $. We show that achieving slightly larger
                 correctness (as a function of $ \delta $) requires
                 exponential codeword length for 3-query codes.
                 Previously, there were no larger than quadratic lower
                 bounds known for locally decodable codes with more than
                 2 queries, even in the case of 3-query linear codes.
                 Our lower bounds hold for linear codes over arbitrary
                 finite fields and for binary nonlinear codes.
                 Considering larger number of queries, we obtain lower
                 bounds for $q$-query codes for $ q > 3$, under certain
                 assumptions on the decoding algorithm that have been
                 commonly used in previous constructions. We also prove
                 bounds on the largest correctness achievable by these
                 decoding algorithms, regardless of the length of the
                 code. Our results explain the limitations on
                 correctness in previous constructions using such
                 decoding algorithms. In addition, our results imply
                 trade-offs on the parameters of error-correcting data
                 structures.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Beame:2012:VMR,
  author =       "Paul Beame and Trinh Huynh",
  title =        "The Value of Multiple {Read\slash} Write Streams for
                 Approximating Frequency Moments",
  journal =      j-TOCT,
  volume =       "3",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jan,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2077336.2077339",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Mar 16 15:22:48 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We consider the read/write streams model, an extension
                 of the standard data stream model in which an algorithm
                 can create and manipulate multiple read/write streams
                 in addition to its input data stream. Like the data
                 stream model, the most important parameter for this
                 model is the amount of internal memory used by such an
                 algorithm. The other key parameters are the number of
                 streams the algorithm uses and the number of passes it
                 makes on these streams. We consider how the addition of
                 multiple streams impacts the ability of algorithms to
                 approximate the frequency moments of the input stream.
                 We show that any randomized read/write stream algorithm
                 with a fixed number of streams and a sublogarithmic
                 number of passes that produces a constant factor
                 approximation of the $k$ -th frequency moment $ F_k$ of
                 an input sequence of length of at most $N$ from $ \{ 1,
                 \ldots {}, N \} $ requires space $ \Omega (N^{1 - 4 / k
                 - \delta })$ for any $ \delta > 0$. For comparison, it
                 is known that with a single read-only one-pass data
                 stream there is a randomized constant-factor
                 approximation for $ F_k$ using $ \tilde {O}(N^{1 - 2 /
                 k})$ space, and that by sorting, which can be done
                 deterministically in $ O(\log N)$ passes using $3$
                 read/write streams, $ F_k$ can be computed exactly.
                 Therefore, although the ability to manipulate multiple
                 read/write streams can add substantial power to the
                 data stream model, with a sublogarithmic number of
                 passes this does not significantly improve the ability
                 to approximate higher frequency moments efficiently. We
                 obtain our results by showing a new connection between
                 the read/write streams model and the multiparty
                 number-in-hand communication model.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Tani:2012:EQA,
  author =       "Seiichiro Tani and Hirotada Kobayashi and Keiji
                 Matsumoto",
  title =        "Exact Quantum Algorithms for the Leader Election
                 Problem",
  journal =      j-TOCT,
  volume =       "4",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2141938.2141939",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Nov 6 18:23:48 MST 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "This article gives a separation between quantum and
                 classical models in pure (i.e., noncryptographic)
                 computing abilities with no restriction on the amount
                 of available computing resources, by considering the
                 exact solvability of the leader election problem in
                 anonymous networks, a celebrated unsolvable problem in
                 classical distributed computing. The goal of the leader
                 election problem is to elect a unique leader from among
                 distributed parties. In an anonymous network, all
                 parties with the same number of communication links are
                 identical. It is well-known that no classical algorithm
                 can exactly solve (i.e., in bounded time without error)
                 the leader election problem in anonymous networks, even
                 if the number of parties is given. This article devises
                 a quantum algorithm that, if the number of parties is
                 given, exactly solves the problem for any network
                 topology in polynomial rounds with polynomial
                 communication/time complexity with respect to the
                 number of parties, when the parties are connected with
                 quantum communication links and they have the ability
                 of quantum computing. Our algorithm works even when
                 only an upper bound of the number of parties is given.
                 In such a case, no classical algorithm can solve the
                 problem even under the zero-error setting, the setting
                 in which error is not allowed but running time may be
                 unbounded.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Cheraghchi:2012:ALT,
  author =       "Mahdi Cheraghchi and Johan H{\aa}stad and Marcus
                 Isaksson and Ola Svensson",
  title =        "Approximating Linear Threshold Predicates",
  journal =      j-TOCT,
  volume =       "4",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2141938.2141940",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Nov 6 18:23:48 MST 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study constraint satisfaction problems on the
                 domain {-1, 1}, where the given constraints are
                 homogeneous linear threshold predicates, that is,
                 predicates of the form $ \sgn (w_1 x_1 + \cdots + w_n
                 x_n) $ for some positive integer weights $ w_1, \ldots
                 {}, w_n $. Despite their simplicity, current techniques
                 fall short of providing a classification of these
                 predicates in terms of approximability. In fact, it is
                 not easy to guess whether there exists a homogeneous
                 linear threshold predicate that is approximation
                 resistant or not. The focus of this article is to
                 identify and study the approximation curve of a class
                 of threshold predicates that allow for nontrivial
                 approximation. Arguably the simplest such predicate is
                 the majority predicate $ \sgn (x_1 + \cdots + x_n) $,
                 for which we obtain an almost complete understanding of
                 the asymptotic approximation curve, assuming the Unique
                 Games Conjecture. Our techniques extend to a more
                 general class of ``majority-like'' predicates and we
                 obtain parallel results for them. In order to classify
                 these predicates, we introduce the notion of
                 Chow-robustness that might be of independent
                 interest.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{De:2012:ELB,
  author =       "Anindya De and Thomas Watson",
  title =        "Extractors and Lower Bounds for Locally Samplable
                 Sources",
  journal =      j-TOCT,
  volume =       "4",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2141938.2141941",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Nov 6 18:23:48 MST 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We consider the problem of extracting randomness from
                 sources that are efficiently samplable, in the sense
                 that each output bit of the sampler only depends on
                 some small number $d$ of the random input bits. As our
                 main result, we construct a deterministic extractor
                 that, given any $d$-local source with min-entropy $k$
                 on $n$ bits, extracts $ \Omega (k^2 / n d)$ bits that
                 are $ 2^{-n \Omega (1)}$-close to uniform, provided $ d
                 \leq o(\log n)$ and $ k \geq n^{2 / 3 + \gamma }$ (for
                 arbitrarily small constants $ \gamma > 0$). Using our
                 result, we also improve a result of Viola [2010] who
                 proved a $ 1 / 2 O(1 / \log n)$ statistical distance
                 lower bound for $ o(\log n)$-local samplers trying to
                 sample input-output pairs of an explicit boolean
                 function, assuming the samplers use at most $ n + n^{1
                 - \delta }$ random bits for some constant $ \delta >
                 0$. Using a different function, we simultaneously
                 improve the lower bound to $ 1 / 2 - 2^{-n \Omega (1)}$
                 and eliminate the restriction on the number of random
                 bits.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Schoenebeck:2012:CCN,
  author =       "Grant R. Schoenebeck and Salil Vadhan",
  title =        "The Computational Complexity of {Nash} Equilibria in
                 Concisely Represented Games",
  journal =      j-TOCT,
  volume =       "4",
  number =       "2",
  pages =        "4:1--4:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2189778.2189779",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Nov 6 18:23:49 MST 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Games may be represented in many different ways, and
                 different representations of games affect the
                 complexity of problems associated with games, such as
                 finding a Nash equilibrium. The traditional method of
                 representing a game is to explicitly list all the
                 payoffs, but this incurs an exponential blowup as the
                 number of agents grows. We study two models of
                 concisely represented games: circuit games, where the
                 payoffs are computed by a given boolean circuit, and
                 graph games, where each agent's payoff is a function of
                 only the strategies played by its neighbors in a given
                 graph. For these two models, we study the complexity of
                 four questions: determining if a given strategy is a
                 Nash equilibrium, finding a Nash equilibrium,
                 determining if there exists a pure Nash equilibrium,
                 and determining if there exists a Nash equilibrium in
                 which the payoffs to a player meet some given
                 guarantees. In many cases, we obtain tight results,
                 showing that the problems are complete for various
                 complexity classes.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Kawamura:2012:CTO,
  author =       "Akitoshi Kawamura and Stephen Cook",
  title =        "Complexity Theory for Operators in Analysis",
  journal =      j-TOCT,
  volume =       "4",
  number =       "2",
  pages =        "5:1--5:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2189778.2189780",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Nov 6 18:23:49 MST 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We propose an extension of the framework for
                 discussing the computational complexity of problems
                 involving uncountably many objects, such as real
                 numbers, sets and functions, that can be represented
                 only through approximation. The key idea is to use a
                 certain class of string functions as names representing
                 these objects. These are more expressive than infinite
                 sequences, which served as names in prior work that
                 formulated complexity in more restricted settings. An
                 advantage of using string functions is that we can
                 define their size in a way inspired by higher-type
                 complexity theory. This enables us to talk about
                 computation on string functions whose time or space is
                 bounded polynomially in the input size, giving rise to
                 more general analogues of the classes P, NP, and
                 PSPACE. We also define NP- and PSPACE-completeness
                 under suitable many-one reductions. Because our
                 framework separates machine computation and semantics,
                 it can be applied to problems on sets of interest in
                 analysis once we specify a suitable representation
                 (encoding). As prototype applications, we consider the
                 complexity of functions (operators) on real numbers,
                 real sets, and real functions. For example, the task of
                 numerical algorithms for solving a certain class of
                 differential equations is naturally viewed as an
                 operator taking real functions to real functions. As
                 there was no complexity theory for operators, previous
                 results only stated how complex the solution can be. We
                 now reformulate them and show that the operator itself
                 is polynomial-space complete.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Penna:2012:CRM,
  author =       "Paolo Penna and Carmine Ventre",
  title =        "Collusion-Resistant Mechanisms with Verification
                 Yielding Optimal Solutions",
  journal =      j-TOCT,
  volume =       "4",
  number =       "2",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2189778.2189781",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Nov 6 18:23:49 MST 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "A truthful mechanism consists of an algorithm
                 augmented with a suitable payment function that
                 guarantees that the players cannot improve their
                 utilities by cheating. Mechanism design approaches are
                 particularly appealing for designing protocols that
                 cannot be manipulated by rational players. We present
                 new constructions of so-called mechanisms with
                 verification introduced by Nisan and Ronen [2001]. We
                 first show how to obtain mechanisms that, for
                 single-parameter domains, are resistant to coalitions
                 of colluding agents even if they can exchange
                 compensations. Based on this result we derive a class
                 of exact truthful mechanisms with verification for
                 arbitrary bounded domains. This class of problems
                 includes most of the problems studied in the
                 algorithmic mechanism design literature and for which
                 exact solutions cannot be obtained with truthful
                 mechanisms without verification. This result is an
                 improvement over all known previous constructions of
                 exact mechanisms with verification.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Beyersdorff:2012:PBD,
  author =       "Olaf Beyersdorff and Nicola Galesi and Massimo Lauria
                 and Alexander A. Razborov",
  title =        "Parameterized Bounded-Depth {Frege} Is not Optimal",
  journal =      j-TOCT,
  volume =       "4",
  number =       "3",
  pages =        "7:1--7:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2355580.2355582",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Nov 6 18:23:50 MST 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "A general framework for parameterized proof complexity
                 was introduced by Dantchev et al. [2007]. There, the
                 authors show important results on tree-like
                 Parameterized Resolution---a parameterized version of
                 classical Resolution---and their gap complexity theorem
                 implies lower bounds for that system. The main result
                 of this article significantly improves upon this by
                 showing optimal lower bounds for a parameterized
                 version of bounded-depth Frege. More precisely, we
                 prove that the pigeonhole principle requires proofs of
                 size n$^{\Omega (k)}$ in parameterized bounded-depth
                 Frege, and, as a special case, in dag-like
                 Parameterized Resolution. This answers an open question
                 posed in Dantchev et al. [2007]. In the opposite
                 direction, we interpret a well-known technique for FPT
                 algorithms as a DPLL procedure for Parameterized
                 Resolution. Its generalization leads to a proof search
                 algorithm for Parameterized Resolution that in
                 particular shows that tree-like Parameterized
                 Resolution allows short refutations of all
                 parameterized contradictions given as bounded-width
                 CNFs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Watson:2012:RWW,
  author =       "Thomas Watson",
  title =        "Relativized Worlds without Worst-Case to Average-Case
                 Reductions for {NP}",
  journal =      j-TOCT,
  volume =       "4",
  number =       "3",
  pages =        "8:1--8:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2355580.2355583",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Nov 6 18:23:50 MST 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We prove that, relative to an oracle, there is no
                 worst-case to average-case reduction for NP. We also
                 handle classes that are somewhat larger than NP, as
                 well as worst-case to errorless -average-case
                 reductions. In fact, we prove that relative to an
                 oracle, there is no worst-case to
                 errorless-average-case reduction from NP to
                 BPP$_{||}^{NP}$. We also handle reductions from NP to
                 the polynomial-time hierarchy and beyond, under strong
                 restrictions on the number of queries the reductions
                 can make.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Kayal:2012:SSR,
  author =       "Neeraj Kayal and Chandan Saha",
  title =        "On the Sum of Square Roots of Polynomials and Related
                 Problems",
  journal =      j-TOCT,
  volume =       "4",
  number =       "4",
  pages =        "9:1--9:??",
  month =        nov,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2382559.2382560",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sun May 5 09:31:28 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The sum of square roots over integers problem is the
                 task of deciding the sign of a nonzero sum, {$ S =
                 \Sigma_{i = 1}^n \delta_i \cdot \sqrt a_i $}, where
                 \delta $_i$ \in {+1, -1} and a$_i$ 's are positive
                 integers that are upper bounded by {$N$} (say). A
                 fundamental open question in numerical analysis and
                 computational geometry is whether {$ | S | \geq 1 /
                 2^{(n \cdot \log N) O(1)} $} when {$ S \neq 0 $}. We
                 study a formulation of this problem over polynomials.
                 Given an expression {$ S = \Sigma_{i = 1}^n c_i \cdot
                 \sqrt f_i (x) $}, where $ c_i $'s belong to a field of
                 characteristic $0$ and $ f_i $'s are univariate
                 polynomials with degree bounded by $d$ and $ f_i(0)
                 \neq 0 $ for all $i$, is it true that the minimum
                 exponent of $x$ that has a nonzero coefficient in the
                 power series {$S$} is upper bounded by {$ (n \cdot
                 d)^{O(1)} $}, unless {$ S = 0 $}? We answer this
                 question affirmatively. Further, we show that this
                 result over polynomials can be used to settle
                 (positively) the sum of square roots problem for a
                 special class of integers: Suppose each integer $ a_i $
                 is of the form, {$ a_i = X^d_i + b_{i1} X^{di - 1} +
                 \ldots {} + b_{idi} $}, $ d_i > 0 $, where {$X$} is a
                 positive real number and $ b_{ij} $'s are integers. Let
                 {$ B = \max (| b_{ij} |_{i, j}, 1) $} and $ d = \max_i
                 \{ d_i \} $. If {$ X > (B + 1)^{(n \cdot d)O(1)} $}
                 then a nonzero {$ S = \Sigma_{i = 1}^n \delta_i \sqrt
                 a_i $} is lower bounded as {$ | S | \geq 1 / X^(n \cdot
                 d)O(1) $}. The constant in {$ O (1) $}, as fixed by our
                 analysis, is roughly $2$. We then consider the
                 following more general problem. Given an arithmetic
                 circuit computing a multivariate polynomial {$ f (X) $}
                 and integer $d$, is the degree of {$ f (X) $} less than
                 or equal to $d$ ? We give a {coRP$^{PP}$}-algorithm for
                 this problem, improving previous results of Allender et
                 al. [2009] and Koiran and Perifel [2007].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Pass:2012:PRT,
  author =       "Rafael Pass and Muthuramakrishnan Venkitasubramaniam",
  title =        "A Parallel Repetition Theorem for Constant-Round
                 {Arthur--Merlin} Proofs",
  journal =      j-TOCT,
  volume =       "4",
  number =       "4",
  pages =        "10:1--10:??",
  month =        nov,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2382559.2382561",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sun May 5 09:31:28 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We show a parallel-repetition theorem for
                 constant-round Arthur--Merlin Proofs, using an
                 efficient reduction. As a consequence, we show that
                 parallel repetition reduces the soundness-error at an
                 optimal rate (up to a negligible factor) in
                 constant-round public-coin argument systems, and
                 constant-round public-coin proofs of knowledge. The
                 first of these results resolves an open question posed
                 by Bellare, Impagliazzo, and Naor (FOCS'97).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ron:2012:AIM,
  author =       "Dana Ron and Ronitt Rubinfeld and Muli Safra and Alex
                 Samorodnitsky and Omri Weinstein",
  title =        "Approximating the Influence of Monotone {Boolean}
                 Functions in {$ O(\sqrt n) $} Query Complexity",
  journal =      j-TOCT,
  volume =       "4",
  number =       "4",
  pages =        "11:1--11:??",
  month =        nov,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2382559.2382562",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sun May 5 09:31:28 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The Total Influence ( Average Sensitivity ) of a
                 discrete function is one of its fundamental measures.
                 We study the problem of approximating the total
                 influence of a monotone Boolean function, which we
                 denote by {$ I[f] $}. We present a randomized algorithm
                 that approximates the influence of such functions to
                 within a multiplicative factor of $ (1 \pm \epsilon) $
                 by performing {$ O(\sqrt n I[f] \poly (1 / \epsilon))
                 $} queries. We also prove a lower bound of {$ \Omega
                 (\sqrt n \log n \cdot I[f]) $} on the query complexity
                 of any constant factor approximation algorithm for this
                 problem (which holds for {$ I[f] = \Omega (1) $}),
                 hence showing that our algorithm is almost optimal in
                 terms of its dependence on $n$. For general functions,
                 we give a lower bound of {$ \Omega (n I[f]) $}, which
                 matches the complexity of a simple sampling
                 algorithm.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Vlassis:2012:CCS,
  author =       "Nikos Vlassis and Michael L. Littman and David
                 Barber",
  title =        "On the Computational Complexity of Stochastic
                 Controller Optimization in {POMDPs}",
  journal =      j-TOCT,
  volume =       "4",
  number =       "4",
  pages =        "12:1--12:??",
  month =        nov,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2382559.2382563",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sun May 5 09:31:28 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We show that the problem of finding an optimal
                 stochastic blind controller in a Markov decision
                 process is an NP-hard problem. The corresponding
                 decision problem is NP-hard in PSPACE and
                 sqrt-sum-hard, hence placing it in NP would imply
                 breakthroughs in long-standing open problems in
                 computer science. Our result establishes that the more
                 general problem of stochastic controller optimization
                 in POMDPs is also NP-hard. Nonetheless, we outline a
                 special case that is convex and admits efficient global
                 solutions.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Austrin:2013:UP,
  author =       "Per Austrin and Johan H{\aa}stad",
  title =        "On the usefulness of predicates",
  journal =      j-TOCT,
  volume =       "5",
  number =       "1",
  pages =        "1:1--1:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:04 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Motivated by the pervasiveness of strong
                 inapproximability results for Max-CSPs, we introduce a
                 relaxed notion of an approximate solution of a Max-CSP.
                 In this relaxed version, loosely speaking, the
                 algorithm is allowed to replace the constraints of an
                 instance by some other (possibly real-valued)
                 constraints, and then only needs to satisfy as many of
                 the new constraints as possible. To be more precise, we
                 introduce the following notion of a predicate $P$ being
                 useful for a (real-valued) objective $Q$: given an
                 almost satisfiable Max- $P$ instance, there is an
                 algorithm that beats a random assignment on the
                 corresponding Max-$Q$ instance applied to the same sets
                 of literals. The standard notion of a nontrivial
                 approximation algorithm for a Max-CSP with predicate
                 $P$ is exactly the same as saying that $P$ is useful
                 for $P$ itself. We say that $P$ is useless if it is not
                 useful for any $Q$. This turns out to be equivalent to
                 the following pseudo-randomness property: given an
                 almost satisfiable instance of Max- $P$, it is hard to
                 find an assignment such that the induced distribution
                 on k -bit strings defined by the instance is not
                 essentially uniform. Under the unique games conjecture,
                 we give a complete and simple characterization of
                 useful Max-CSPs defined by a predicate: such a Max-CSP
                 is useless if and only if there is a pairwise
                 independent distribution supported on the satisfying
                 assignments of the predicate. It is natural to also
                 consider the case when no negations are allowed in the
                 CSP instance, and we derive a similar complete
                 characterization (under the UGC) there as well.
                 Finally, we also include some results and examples
                 shedding additional light on the approximability of
                 certain Max-CSPs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Beyersdorff:2013:VPC,
  author =       "Olaf Beyersdorff and Samir Datta and Andreas Krebs and
                 Meena Mahajan and Gido Scharfenberger-Fabian and
                 Karteek Sreenivasaiah and Michael Thomas and Heribert
                 Vollmer",
  title =        "Verifying proofs in constant depth",
  journal =      j-TOCT,
  volume =       "5",
  number =       "1",
  pages =        "2:1--2:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:04 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "In this paper we initiate the study of proof systems
                 where verification of proofs proceeds by NC$^0$
                 circuits. We investigate the question which languages
                 admit proof systems in this very restricted model.
                 Formulated alternatively, we ask which languages can be
                 enumerated by NC$^0$ functions. Our results show that
                 the answer to this problem is not determined by the
                 complexity of the language. On the one hand, we
                 construct NC$^0$ proof systems for a variety of
                 languages ranging from regular to NP complete. On the
                 other hand, we show by combinatorial methods that even
                 easy regular languages such as Exact-OR do not admit
                 NC$^0$ proof systems. We also show that Majority does
                 not admit NC$^0$ proof systems. Finally, we present a
                 general construction of NC$^0$ proof systems for
                 regular languages with strongly connected NFA's.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Cygan:2013:MCP,
  author =       "Marek Cygan and Marcin Pilipczuk and Michal Pilipczuk
                 and Jakub Onufry Wojtaszczyk",
  title =        "On multiway cut parameterized above lower bounds",
  journal =      j-TOCT,
  volume =       "5",
  number =       "1",
  pages =        "3:1--3:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:04 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We introduce a concept of parameterizing a problem
                 above the optimum solution of its natural linear
                 programming relaxation and prove that the node multiway
                 cut problem is fixed-parameter tractable (FPT) in this
                 setting. As a consequence we prove that node multiway
                 cut is FPT, when parameterized above the maximum
                 separating cut, resolving an open problem of Razgon.
                 Our results imply $ O^*(4^k) $ algorithms for vertex
                 cover above maximum matching and almost 2-SAT as well
                 as an $ O^*(2^k) $ algorithm for node multiway cut with
                 a standard parameterization by the solution size,
                 improving previous bounds for these problems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Englert:2013:EC,
  author =       "Matthias Englert and Heiko R{\"o}glin and Jacob
                 Sp{\"o}nemann and Berthold V{\"o}cking",
  title =        "Economical Caching",
  journal =      j-TOCT,
  volume =       "5",
  number =       "2",
  pages =        "4:1--4:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2493246.2493247",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:08 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study the management of buffers and storages in
                 environments with unpredictably varying prices in a
                 competitive analysis. In the economical caching
                 problem, there is a storage with a certain capacity.
                 For each time step, an online algorithm is given a
                 price from the interval $ [1, \alpha] $, a consumption,
                 and possibly a buying limit. The online algorithm has
                 to decide the amount to purchase from some commodity,
                 knowing the parameter $ \alpha $ but without knowing
                 how the price evolves in the future. The algorithm can
                 purchase at most the buying limit. If it purchases more
                 than the current consumption, then the excess is stored
                 in the storage; otherwise, the gap between consumption
                 and purchase must be taken from the storage. The goal
                 is to minimize the total cost. Interesting motivating
                 applications are, for example, stream caching on mobile
                 devices with different classes of service, battery
                 management in micro hybrid cars, and the efficient
                 purchase of resources. First we consider the simple but
                 natural class of algorithms that can informally be
                 described as memoryless. We show that these algorithms
                 cannot achieve a competitive ratio below $ \sqrt \alpha
                 $. Then we present a more sophisticated deterministic
                 algorithm achieving a competitive ratio of where $W$
                 denotes the Lambert $W$ function. We prove that this
                 algorithm is optimal and that not even randomized
                 online algorithms can achieve a better competitive
                 ratio. On the other hand, we show how to achieve a
                 constant competitive ratio if the storage capacity of
                 the online algorithm exceeds the storage capacity of an
                 optimal offline algorithm by a factor of $ \log \alpha
                 $.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Bogdanov:2013:HFL,
  author =       "Andrej Bogdanov and Akinori Kawachi and Hidetoki
                 Tanaka",
  title =        "Hard Functions for Low-Degree Polynomials over Prime
                 Fields",
  journal =      j-TOCT,
  volume =       "5",
  number =       "2",
  pages =        "5:1--5:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2493246.2493248",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:08 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "In this article, we present a new hardness
                 amplification for low-degree polynomials over prime
                 fields, namely, we prove that if some function is
                 mildly hard to approximate by any low-degree
                 polynomials then the sum of independent copies of the
                 function is very hard to approximate by them. This
                 result generalizes the XOR lemma for low-degree
                 polynomials over the binary field given by Viola and
                 Wigderson [2008]. The main technical contribution is
                 the analysis of the Gowers norm over prime fields. For
                 the analysis, we discuss a generalized low-degree test,
                 which we call the Gowers test, for polynomials over
                 prime fields, which is a natural generalization of that
                 over the binary field given by Alon et al. [2003]. This
                 Gowers test provides a new technique to analyze the
                 Gowers norm over prime fields. Actually, the rejection
                 probability of the Gowers test can be analyzed in the
                 framework of Kaufman and Sudan [2008]. However, our
                 analysis is self-contained and quantitatively better.
                 By using our argument, we also prove the hardness of
                 modulo functions for low-degree polynomials over prime
                 fields.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Williams:2013:ATP,
  author =       "Ryan Williams",
  title =        "Alternation-Trading Proofs, Linear Programming, and
                 Lower Bounds",
  journal =      j-TOCT,
  volume =       "5",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2493246.2493249",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:08 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "A fertile area of recent research has demonstrated
                 concrete polynomial-time lower bounds for natural hard
                 problems on restricted computational models. Among
                 these problems are Satisfiability, Vertex Cover,
                 Hamilton Path, MOD$_6$-SAT, Majority-of-Majority-SAT,
                 and Tautologies, to name a few. The proofs of these
                 lower bounds follow a proof-by-contradiction strategy
                 that we call resource trading or alternation trading.
                 An important open problem is to determine how powerful
                 such proofs can possibly be. We propose a methodology
                 for studying these proofs that makes them amenable to
                 both formal analysis and automated theorem proving. We
                 prove that the search for better lower bounds can often
                 be turned into a problem of solving a large series of
                 linear programming instances. Implementing a
                 small-scale theorem prover based on these results, we
                 extract new human-readable time lower bounds for
                 several problems and identify patterns that allow for
                 further generalization. The framework can also be used
                 to prove concrete limitations on the current
                 techniques.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ron:2013:ANR,
  author =       "Dana Ron and Gilad Tsur",
  title =        "On Approximating the Number of Relevant Variables in a
                 Function",
  journal =      j-TOCT,
  volume =       "5",
  number =       "2",
  pages =        "7:1--7:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2493246.2493250",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:08 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "In this work we consider the problem of approximating
                 the number of relevant variables in a function given
                 query access to the function. Since obtaining a
                 multiplicative factor approximation is hard in general,
                 we consider several relaxations of the problem. In
                 particular, we consider a relaxation of the property
                 testing variant of the problem and we consider
                 relaxations in which we have a promise that the
                 function belongs to a certain family of functions
                 (e.g., linear functions). In the former relaxation the
                 task is to distinguish between the case that the number
                 of relevant variables is at most $k$, and the case in
                 which it is far from any function in which the number
                 of relevant variables is more than $ (1 + \gamma) k $
                 for a parameter $ \gamma $. We give both upper bounds
                 and almost matching lower bounds for the relaxations we
                 study.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Allender:2013:ISI,
  author =       "Eric Allender and Shafi Goldwasser",
  title =        "Introduction to the special issue on innovations in
                 theoretical computer science 2012",
  journal =      j-TOCT,
  volume =       "5",
  number =       "3",
  pages =        "8:1--8:??",
  month =        aug,
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2493252.2493253",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:12 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  note =         "Special issue on innovations in theoretical computer
                 science 2012.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Pagh:2013:CMM,
  author =       "Rasmus Pagh",
  title =        "Compressed matrix multiplication",
  journal =      j-TOCT,
  volume =       "5",
  number =       "3",
  pages =        "9:1--9:??",
  month =        aug,
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2493252.2493254",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:12 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  note =         "Special issue on innovations in theoretical computer
                 science 2012.",
  abstract =     "We present a simple algorithm that approximates the
                 product of $n$-by-$n$ real matrices $A$ and $B$. Let $
                 || A B ||_F $ denote the Frobenius norm of $ A B $, and
                 $b$ be a parameter determining the time\slash accuracy
                 trade-off. Given $2$-wise independent hash functions $
                 h_1, h_2 : [n] \to [b] $, and $ s_1, s_2 : [n] \to \{ -
                 1, + 1 \} $ the algorithm works by first
                 ``compressing'' the matrix product into the polynomial
                 $ p (x) = \Sigma_{k = 1}^n \left (\Sigma_{i = 1}^n
                 A_{ik} s_1 (i) x^{h 1 (i)} \right) \left (\Sigma_{j =
                 1}^n B_{kj} s_2 (j) x^{h 2 (j)} \right) $. Using the
                 fast Fourier transform to compute polynomial
                 multiplication, we can compute $ c_0, \ldots {}, c_{b -
                 1} $ such that $ \Sigma_i c_i x^i = (p (x) \bmod x^b) +
                 (p (x) \div x^b) $ in time $ {\~ O}(n^2 + n b) $. An
                 unbiased estimator of $ (A B)_{ij} $ with variance at
                 most $ || A B ||_F^2 / b $ can then be computed as: $
                 C_{ij} = s_1 (i) s_2 (j) c_{(h_1 (i) + h_2 (j)) \bmod
                 b} $. Our approach also leads to an algorithm for
                 computing AB exactly, with high probability, in time $
                 {\~ O}(N + n b) $ in the case where $A$ and $B$ have at
                 most $N$ nonzero entries, and $ A B $ has at most $b$
                 nonzero entries.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Viderman:2013:LTD,
  author =       "Michael Viderman",
  title =        "Linear-time decoding of regular expander codes",
  journal =      j-TOCT,
  volume =       "5",
  number =       "3",
  pages =        "10:1--10:??",
  month =        aug,
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2493252.2493255",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:12 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  note =         "Special issue on innovations in theoretical computer
                 science 2012.",
  abstract =     "Sipser and Spielman (IEEE IT, [1996]) showed that any
                 $ (c, d) $-regular expander code with expansion
                 parameter $ > 3 / 4 $ is decodable in linear time from
                 a constant fraction of errors. Feldman et al. (IEEE IT,
                 [2007]) proved that expansion parameter $ > 2 / 3 + (1
                 / 3) c $ is sufficient to correct a constant fraction
                 of errors in polynomial time using LP decoding. In this
                 work, we give a simple combinatorial algorithm that
                 achieves even better parameters. In particular, our
                 algorithm runs in linear time and works for any
                 expansion parameter $ > 2 / 3 - (1 / 6) c $. We also
                 prove that our decoding algorithm can be executed in
                 logarithmic time on a linear number of parallel
                 processors.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ozols:2013:QRS,
  author =       "Maris Ozols and Martin Roetteler and J{\'e}r{\'e}mie
                 Roland",
  title =        "Quantum rejection sampling",
  journal =      j-TOCT,
  volume =       "5",
  number =       "3",
  pages =        "11:1--11:??",
  month =        aug,
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2493252.2493256",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:12 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  note =         "Special issue on innovations in theoretical computer
                 science 2012.",
  abstract =     "Rejection sampling is a well-known method to sample
                 from a target distribution, given the ability to sample
                 from a given distribution. The method has been first
                 formalized by von Neumann [1951] and has many
                 applications in classical computing. We define a
                 quantum analogue of rejection sampling: given a black
                 box producing a coherent superposition of (possibly
                 unknown) quantum states with some amplitudes, the
                 problem is to prepare a coherent superposition of the
                 same states, albeit with different target amplitudes.
                 The main result of this article is a tight
                 characterization of the query complexity of this
                 quantum state generation problem. We exhibit an
                 algorithm, which we call quantum rejection sampling,
                 and analyze its cost using semidefinite programming.
                 Our proof of a matching lower bound is based on the
                 automorphism principle that allows to symmetrize any
                 algorithm over the automorphism group of the problem.
                 Our main technical innovation is an extension of the
                 automorphism principle to continuous groups that arise
                 for quantum state generation problems where the oracle
                 encodes unknown quantum states, instead of just
                 classical data. Furthermore, we illustrate how quantum
                 rejection sampling may be used as a primitive in
                 designing quantum algorithms, by providing three
                 different applications. We first show that it was
                 implicitly used in the quantum algorithm for linear
                 systems of equations by Harrow et al. [2009]. Second we
                 show that it can be used to speed up the main step in
                 the quantum Metropolis sampling algorithm by Temme et
                 al. [2011]. Finally, we derive a new quantum algorithm
                 for the hidden shift problem of an arbitrary Boolean
                 function and relate its query complexity to
                 ``water-filling'' of the Fourier spectrum.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Drucker:2013:HCP,
  author =       "Andrew Drucker",
  title =        "High-confidence predictions under adversarial
                 uncertainty",
  journal =      j-TOCT,
  volume =       "5",
  number =       "3",
  pages =        "12:1--12:??",
  month =        aug,
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2493252.2493257",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:12 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  note =         "Special issue on innovations in theoretical computer
                 science 2012.",
  abstract =     "We study the setting in which the bits of an unknown
                 infinite binary sequence $x$ are revealed sequentially
                 to an observer. We show that very limited assumptions
                 about $x$ allow one to make successful predictions
                 about unseen bits of $x$. First, we study the problem
                 of successfully predicting a single 0 from among the
                 bits of $x$. In our model, we have only one chance to
                 make a prediction, but may do so at a time of our
                 choosing. This model is applicable to a variety of
                 situations in which we want to perform an action of
                 fixed duration, and need to predict a ``safe''
                 time-interval to perform it. Letting $ N_t $ denote the
                 number of $1$'s among the first $t$ bits of $x$, we say
                 that $x$ is ``$ \epsilon $-weakly sparse'' if $ \lim
                 \inf (N_t / t) < = \epsilon $. Our main result is a
                 randomized algorithm that, given any $ \epsilon
                 $-weakly sparse sequence $x$, predicts a $0$ of $x$
                 with success probability as close as desired to $ 1 -
                 \epsilon $. Thus, we can perform this task with
                 essentially the same success probability as under the
                 much stronger assumption that each bit of $x$ takes the
                 value $1$ independently with probability $ \epsilon $.
                 We apply this result to show how to successfully
                 predict a bit ($0$ or $1$ ) under a broad class of
                 possible assumptions on the sequence $x$. The
                 assumptions are stated in terms of the behavior of a
                 finite automaton $M$ reading the bits of $x$. We also
                 propose and solve a variant of the well-studied
                 ``ignorant forecasting'' problem. For every $ \epsilon
                 > 0 $, we give a randomized forecasting algorithm $
                 S_\epsilon $ that, given sequential access to a binary
                 sequence $x$, makes a prediction of the form: ``A $p$
                 fraction of the next $N$ bits will be $1$'s.'' (The
                 algorithm gets to choose $p$, $N$, and the time of the
                 prediction.) For any fixed sequence $x$, the forecast
                 fraction $p$ is accurate to within $ \pm {} \epsilon $
                 with probability $ 1 - \epsilon $.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Chattopadhyay:2013:GIA,
  author =       "Arkadev Chattopadhyay and Jacobo Tor{\'a}n and Fabian
                 Wagner",
  title =        "Graph Isomorphism is Not {AC$^0$}-Reducible to Group
                 Isomorphism",
  journal =      j-TOCT,
  volume =       "5",
  number =       "4",
  pages =        "13:1--13:??",
  month =        nov,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2540088",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:15 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We give a new upper bound for the Group and Quasigroup
                 Isomorphism problems when the input structures are
                 given explicitly by multiplication tables. We show that
                 these problems can be computed by polynomial size
                 nondeterministic circuits of unbounded fan-in with $
                 O(\log \log n) $ depth and $ O(\log^2 n) $
                 nondeterministic bits, where n is the number of group
                 elements. This improves the existing upper bound for
                 the problems. In the previous bound the circuits have
                 bounded fan-in but depth $ O(\log^2 n) $ and also $
                 O(\log^2 n) $ nondeterministic bits. We then prove that
                 the kind of circuits from our upper bound cannot
                 compute the Parity function. Since Parity is
                 AC$^0$-reducible to Graph Isomorphism, this implies
                 that Graph Isomorphism is strictly harder than Group or
                 Quasigroup Isomorphism under the ordering defined by
                 AC$^0$ reductions. We extend this result to the
                 stronger ACC$^0 [p]$ reduction and its randomized
                 version.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "13",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{De:2013:EOH,
  author =       "Anindya De and Elchanan Mossel",
  title =        "Explicit Optimal Hardness via {Gaussian} Stability
                 Results",
  journal =      j-TOCT,
  volume =       "5",
  number =       "4",
  pages =        "14:1--14:??",
  month =        nov,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2505766",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:15 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The results of Raghavendra [2008] show that assuming
                 Khot's Unique Games Conjecture [2002], for every
                 constraint satisfaction problem there exists a generic
                 semidefinite program that achieves the optimal
                 approximation factor. This result is existential as it
                 does not provide an explicit optimal rounding procedure
                 nor does it allow to calculate exactly the Unique Games
                 hardness of the problem. Obtaining an explicit optimal
                 approximation scheme and the corresponding
                 approximation factor is a difficult challenge for each
                 specific approximation problem. Khot et al. [2004]
                 established a general approach for determining the
                 exact approximation factor and the corresponding
                 optimal rounding algorithm for any given constraint
                 satisfaction problem. However, this approach crucially
                 relies on results explicitly proving optimal partitions
                 in the Gaussian space. Until recently, Borell's result
                 [1985] was the only nontrivial Gaussian partition
                 result known. In this article we derive the first
                 explicit optimal approximation algorithm and the
                 corresponding approximation factor using a new result
                 on Gaussian partitions due to Isaksson and Mossel
                 [2012]. This Gaussian result allows us to determine the
                 exact Unique Games Hardness of MAX-$3$-EQUAL. In
                 particular, our results show that Zwick's algorithm for
                 this problem achieves the optimal approximation factor
                 and prove that the approximation achieved by the
                 algorithm is $ \approx 0.796 $ as conjectured by Zwick
                 [1998]. We further use the previously known optimal
                 Gaussian partitions results to obtain a new Unique
                 Games Hardness factor for MAX-$k$-CSP: Using the
                 well-known fact that jointly normal pairwise
                 independent random variables are fully independent, we
                 show that the UGC hardness of Max-$k$-CSP is $ \lceil
                 (k + 1) / 2 \rceil 2^{k - 1} $, improving on results of
                 Austrin and Mossel [2009].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "14",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Dalmau:2013:RSC,
  author =       "V{\'\i}ctor Dalmau and Andrei Krokhin",
  title =        "Robust Satisfiability for {CSPs}: Hardness and
                 Algorithmic Results",
  journal =      j-TOCT,
  volume =       "5",
  number =       "4",
  pages =        "15:1--15:??",
  month =        nov,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2540090",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:15 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "An algorithm for a constraint satisfaction problem is
                 called robust if it outputs an assignment satisfying at
                 least a $ (1 - f (\epsilon)) $-fraction of constraints
                 for each $ (1 - \epsilon) $-satisfiable instance (i.e.,
                 such that at most a \epsilon -fraction of constraints
                 needs to be removed to make the instance satisfiable),
                 where $ f(\epsilon) \to 0 $ as $ \epsilon \to 0 $. We
                 establish an algebraic framework for analyzing
                 constraint satisfaction problems admitting an efficient
                 robust algorithm with functions $f$ of a given growth
                 rate. We use this framework to derive hardness results.
                 We also describe three classes of problems admitting an
                 efficient robust algorithm such that $f$ is $ O (1 /
                 \log (1 / \epsilon)) $, $ O(\epsilon^{1 / k}) $ for
                 some $ k > 1 $, and $ O(\epsilon) $, respectively.
                 Finally, we give a complete classification of robust
                 satisfiability with a given $f$ for the Boolean case.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "15",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Fellows:2013:DFP,
  author =       "Michael Fellows and Fedor V. Fomin and Daniel
                 Lokshtanov and Elena Losievskaja and Frances Rosamond
                 and Saket Saurabh",
  title =        "Distortion is Fixed Parameter Tractable",
  journal =      j-TOCT,
  volume =       "5",
  number =       "4",
  pages =        "16:1--16:??",
  month =        nov,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2489789",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:15 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study low-distortion embedding of metric spaces
                 into the line, and more generally, into the shortest
                 path metric of trees, from the parameterized complexity
                 perspective. Let $ M = M (G) $ be the shortest path
                 metric of an edge-weighted graph $G$, with the vertex
                 set $ V (G) $ and the edge set $ E (G) $, on $n$
                 vertices. We give the first fixed parameter tractable
                 algorithm that for an unweighted graph metric $M$ and
                 integer $d$ either constructs an embedding of $M$ into
                 the line with distortion at most $d$, or concludes that
                 no such embedding exists. Our algorithm requires O(
                 nd$^4$ (2 d + 1)$^{2d}$ ) time which is a significant
                 improvement over the best previous algorithm that runs
                 in time $ O(n^{4d + 2} d^{O(1)}) $. Because of its
                 apparent similarity to the notoriously hard Bandwidth
                 Minimization problem, we find it surprising that this
                 problem turns out to be fixed parameter tractable. We
                 extend our results on embedding unweighted graph metric
                 into the line in two ways. First, we give an algorithm
                 to construct small-distortion embeddings of weighted
                 graph metrics. The running time of our algorithm is $
                 O(n (d W)^4 (2 d + 1)^{2dW}) $, where $W$ is the
                 largest edge weight of the input graph. To complement
                 this result, we show that the exponential dependence on
                 the maximum edge weight is unavoidable. In particular,
                 we show that deciding whether a weighted graph metric $
                 M (G) $ with maximum weight $ W < | V (G)| $ can be
                 embedded into the line with distortion at most $d$ is
                 NP-complete for every fixed rational $ d \geq 2 $. This
                 rules out any possibility of an algorithm with running
                 time $ O((n W)^{h(d)}) $ where $h$ is a function of $d$
                 alone. Second, we consider more general host metrics
                 for which analogous results hold. In particular, we
                 prove that for any tree $T$ with maximum degree \Delta,
                 embedding $M$ into a shortest path metric of $T$ is
                 fixed parameter tractable, parameterized by $ (\Delta,
                 d) $.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "16",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Razborov:2013:RA,
  author =       "Alexander Razborov and Emanuele Viola",
  title =        "Real Advantage",
  journal =      j-TOCT,
  volume =       "5",
  number =       "4",
  pages =        "17:1--17:??",
  month =        nov,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2540089",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:15 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We highlight the challenge of proving correlation
                 bounds between boolean functions and real-valued
                 polynomials, where any non-boolean output counts
                 against correlation. We prove that real-valued
                 polynomials of degree $ 1 2 \lg_2 \lg_2 n $ have
                 correlation with parity at most zero. Such a result is
                 false for modular and threshold polynomials. Its proof
                 is based on a variant of an anti-concentration result
                 by Costello et al. [2006].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "17",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Harkins:2013:ELA,
  author =       "Ryan C. Harkins and John M. Hitchcock",
  title =        "Exact Learning Algorithms, Betting Games, and Circuit
                 Lower Bounds",
  journal =      j-TOCT,
  volume =       "5",
  number =       "4",
  pages =        "18:1--18:??",
  month =        nov,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2539126.2539130",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Dec 12 17:32:15 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "This article extends and improves the work of Fortnow
                 and Klivans [2009], who showed that if a circuit class
                 $C$ has an efficient learning algorithm in Angluin's
                 model of exact learning via equivalence and membership
                 queries [Angluin 1988], then we have the lower bound
                 EXP$^{NP}$ not $C$. We use entirely different
                 techniques involving betting games [Buhrman et al.
                 2001] to remove the NP oracle and improve the lower
                 bound to EXP not $C$. This shows that it is even more
                 difficult to design a learning algorithm for $C$ than
                 the results of Fortnow and Klivans [2009] indicated. We
                 also investigate the connection between betting games
                 and natural proofs, and as a corollary the existence of
                 strong pseudorandom generators. Our results also yield
                 further evidence that the class of Boolean circuits has
                 no efficient exact learning algorithm. This is because
                 our separation is strong in that it yields a natural
                 proof [Razborov and Rudich 1997] against the class.
                 From this we conclude that an exact learning algorithm
                 for Boolean circuits would imply that strong
                 pseudorandom generators do not exist, which contradicts
                 widely believed conjectures from cryptography. As a
                 corollary we obtain that if strong pseudorandom
                 generators exist, then there is no exact learning
                 algorithm for Boolean circuits.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "18",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ada:2014:HBP,
  author =       "Anil Ada and Arkadev Chattopadhyay and Stephen A. Cook
                 and Lila Fontes and Michal Kouck{\'y} and Toniann
                 Pitassi",
  title =        "The Hardness of Being Private",
  journal =      j-TOCT,
  volume =       "6",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2567671",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Apr 1 06:02:31 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Kushilevitz [1989] initiated the study of
                 information-theoretic privacy within the context of
                 communication complexity. Unfortunately, it has been
                 shown that most interesting functions are not privately
                 computable [Kushilevitz 1989, Brandt and Sandholm
                 2008]. The unattainability of perfect privacy for many
                 functions motivated the study of approximate privacy.
                 Feigenbaum et al. [2010a, 2010b] define notions of
                 worst-case as well as average-case approximate privacy
                 and present several interesting upper bounds as well as
                 some open problems for further study. In this article,
                 we obtain asymptotically tight bounds on the trade-offs
                 between both the worst-case and average-case
                 approximate privacy of protocols and their
                 communication cost for Vickrey auctions. Further, we
                 relate the notion of average-case approximate privacy
                 to other measures based on information cost of
                 protocols. This enables us to prove exponential lower
                 bounds on the subjective approximate privacy of
                 protocols for computing the Intersection function,
                 independent of its communication cost. This proves a
                 conjecture of Feigenbaum et al. [2010a].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Austrin:2014:NNH,
  author =       "Per Austrin and Ryan O'Donnell and Li-Yang Tan and
                 John Wright",
  title =        "New {NP}-Hardness Results for $3$-Coloring and
                 $2$-to-$1$ Label Cover",
  journal =      j-TOCT,
  volume =       "6",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2537800",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Apr 1 06:02:31 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We show that given a 3-colorable graph, it is NP-hard
                 to find a 3-coloring with $ (16 / 17 + \epsilon) $ of
                 the edges bichromatic. In a related result, we show
                 that given a satisfiable instance of the 2-to-1 Label
                 Cover problem, it is NP-hard to find a $ (23 / 24 +
                 \epsilon) $-satisfying assignment.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Glasser:2014:UDN,
  author =       "Christian Gla{\ss}er and John M. Hitchcock and A.
                 Pavan and Stephan Travers",
  title =        "Unions of Disjoint {NP}-Complete Sets",
  journal =      j-TOCT,
  volume =       "6",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2591508",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Apr 1 06:02:31 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study the following question: if A and B are
                 disjoint NP-complete sets, then is A \cup B
                 NP-complete? We provide necessary and sufficient
                 conditions under which the union of disjoint
                 NP-complete sets remain complete.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Sun:2014:ECN,
  author =       "Shu-Ming Sun and Ning Zhong",
  title =        "On Effective Convergence of Numerical Solutions for
                 Differential Equations",
  journal =      j-TOCT,
  volume =       "6",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2578219",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Apr 1 06:02:31 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "This article studies the effective convergence of
                 numerical solutions of initial value problems (IVPs)
                 for ordinary differential equations (ODEs). A
                 convergent sequence $ \{ Y_m \} $ of numerical
                 solutions is said to be effectively convergent to the
                 exact solution if there is an algorithm that computes
                 an $ N \in N $, given an arbitrary $ n \in N $ as
                 input, such that the error between $ Y_m $ and the
                 exact solution is less than $ 2^{-n} $ for all $ m \geq
                 N $. It is proved that there are convergent numerical
                 solutions generated from Euler's method which are not
                 effectively convergent. It is also shown that a
                 theoretically-proved-computable solution using Picard's
                 iteration method might not be computable by classical
                 numerical methods, which suggests that sometimes there
                 is a gap between theoretical computability and
                 practical numerical computations concerning solutions
                 of ODEs. Moreover, it is noted that the main theorem
                 (Theorem 4.1) provides an example of an IVP with a
                 nonuniform Lipschitz function for which the numerical
                 solutions generated by Euler's method are still
                 convergent.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{ODonnell:2014:OLB,
  author =       "Ryan O'Donnell and Yi Wu and Yuan Zhou",
  title =        "Optimal Lower Bounds for Locality-Sensitive Hashing
                 (Except When $q$ is Tiny)",
  journal =      j-TOCT,
  volume =       "6",
  number =       "1",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2578221",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Apr 1 06:02:31 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/hash.bib;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study lower bounds for Locality-Sensitive Hashing
                 (LSH) in the strongest setting: point sets in $ \{ 0, 1
                 \}^d $ under the Hamming distance. Recall that $H$ is
                 said to be an $ (r, c r, p, q) $-sensitive hash family
                 if all pairs $ x, y \in \{ 0, 1 \}^d $ with $ {\rm
                 dist}(x, y) \leq r $ have probability at least $p$ of
                 collision under a randomly chosen $ h \in H $, whereas
                 all pairs $ x, y \in \{ 0, 1 \}^d $ with $ {\rm
                 dist}(x, y) \geq c r $ have probability at most $q$ of
                 collision. Typically, one considers $ d \to \infty $,
                 with $ c > 1 $ fixed and $q$ bounded away from $0$. For
                 its applications to approximate nearest-neighbor search
                 in high dimensions, the quality of an LSH family $H$ is
                 governed by how small its $ \rho $ parameter $ \rho =
                 \ln (1 / p) / l n(1 / q) $ is as a function of the
                 parameter $c$. The seminal paper of Indyk and Motwani
                 [1998] showed that for each $ c \geq 1 $, the extremely
                 simple family $ H = \{ x \mapsto x $ _i$ : i \in [d] \}
                 $ achieves $ \rho \leq 1 / c $. The only known lower
                 bound, due to Motwani et al. [2007], is that $ \rho $
                 must be at least $ (e^{1 / c} - 1) / (e^{1 / c} + 1)
                 \geq .46 / c $ (minus $ o_d(1) $ ). The contribution of
                 this article is twofold. (1) We show the ``optimal''
                 lower bound for $ \rho $: it must be at least $ 1 / c $
                 (minus $ o_d(1) $ ). Our proof is very simple,
                 following almost immediately from the observation that
                 the noise stability of a boolean function at time $t$
                 is a log-convex function of $t$. (2) We raise and
                 discuss the following issue: neither the application of
                 LSH to nearest-neighbor search nor the known LSH lower
                 bounds hold as stated if the q parameter is tiny. Here,
                 ``tiny'' means $ q = 2^{- \Theta (d)} $, a parameter
                 range we believe is natural.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Cygan:2014:CCG,
  author =       "Marek Cygan and Stefan Kratsch and Marcin Pilipczuk
                 and Michal Pilipczuk and Magnus Wahlstr{\"o}m",
  title =        "Clique Cover and Graph Separation: New
                 Incompressibility Results",
  journal =      j-TOCT,
  volume =       "6",
  number =       "2",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2594439",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Jun 5 14:42:53 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The field of kernelization studies polynomial-time
                 preprocessing routines for hard problems in the
                 framework of parameterized complexity. In this article,
                 we show that, unless the polynomial hierarchy collapses
                 to its third level, the following parameterized
                 problems do not admit a polynomial-time preprocessing
                 algorithm that reduces the size of an instance to
                 polynomial in the parameter: ---Edge Clique Cover,
                 parameterized by the number of cliques, ---Directed
                 Edge/Vertex Multiway Cut, parameterized by the size of
                 the cutset, even in the case of two terminals,
                 ---Edge/Vertex Multicut, parameterized by the size of
                 the cutset, and --- k -Way Cut, parameterized by the
                 size of the cutset.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Chen:2014:HIA,
  author =       "Yijia Chen and J{\"o}rg Flum and Moritz M{\"u}ller",
  title =        "Hard Instances of Algorithms and Proof Systems",
  journal =      j-TOCT,
  volume =       "6",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2601336",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Jun 5 14:42:53 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "If the class TAUT of tautologies of propositional
                 logic has no almost optimal algorithm, then every
                 algorithm A deciding TAUT has a hard sequence, that is,
                 a polynomial time computable sequence witnessing that A
                 is not almost optimal. We show that this result extends
                 to every $ \Pi t p$-complete problem with $ t \geq 1$;
                 however, assuming the Measure Hypothesis, there is a
                 problem which has no almost optimal algorithm but is
                 decided by an algorithm without hard sequences. For
                 problems Q with an almost optimal algorithm, we analyze
                 whether every algorithm deciding Q, which is not almost
                 optimal, has a hard sequence.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Goldberg:2014:CAC,
  author =       "Leslie Ann Goldberg and Mark Jerrum",
  title =        "The Complexity of Approximately Counting Tree
                 Homomorphisms",
  journal =      j-TOCT,
  volume =       "6",
  number =       "2",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2600917",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Jun 5 14:42:53 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study two computational problems, parameterised by
                 a fixed tree H. \#HOMSTO( H ) is the problem of
                 counting homomorphisms from an input graph G to H.
                 \#WHOMSTO( H ) is the problem of counting weighted
                 homomorphisms to H, given an input graph G and a weight
                 function for each vertex v of G. Even though H is a
                 tree, these problems turn out to be sufficiently rich
                 to capture all of the known approximation behaviour in
                 \# P. We give a complete trichotomy for \#WHOMSTO( H ).
                 If H is a star, then \#WHOMSTO( H ) is in FP. If H is
                 not a star but it does not contain a certain induced
                 subgraph J 3, then \#WHOMSTO( H ) is equivalent under
                 approximation-preserving (AP) reductions to \#BIS, the
                 problem of counting independent sets in a bipartite
                 graph. This problem is complete for the class \#RH \Pi
                 1 under AP-reductions. Finally, if H contains an
                 induced J$_3$, then \#WHOMSTO( H ) is equivalent under
                 AP-reductions to \#SAT, the problem of counting
                 satisfying assignments to a CNF Boolean formula. Thus,
                 \#WHOMSTO( H ) is complete for \#P under AP-reductions.
                 The results are similar for \#HOMSTO( H ) except that a
                 rich structure emerges if H contains an induced J$_3$.
                 We show that there are trees H for which \#HOMSTO( H )
                 is \# SAT -equivalent (disproving a plausible
                 conjecture of Kelk). However, it is still not known
                 whether \#HOMSTO( H ) is \#SAT-hard for every tree H
                 which contains an induced J 3. It turns out that there
                 is an interesting connection between these
                 homomorphism-counting problems and the problem of
                 approximating the partition function of the
                 ferromagnetic Potts model. In particular, we show that
                 for a family of graphs Jq, parameterised by a positive
                 integer q, the problem \#HOMSTO( Jq ) is
                 AP-interreducible with the problem of approximating the
                 partition function of the q -state Potts model. It was
                 not previously known that the Potts model had a
                 homomorphism-counting interpretation. We use this
                 connection to obtain some additional upper bounds for
                 the approximation complexity of \#HOMSTO( Jq ).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Etessami:2014:NCC,
  author =       "Kousha Etessami and Alistair Stewart and Mihalis
                 Yannakakis",
  title =        "A Note on the Complexity of Comparing Succinctly
                 Represented Integers, with an Application to Maximum
                 Probability Parsing",
  journal =      j-TOCT,
  volume =       "6",
  number =       "2",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2601327",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Jun 5 14:42:53 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The following two decision problems capture the
                 complexity of comparing integers or rationals that are
                 succinctly represented in product-of-exponentials
                 notation, or equivalently, via arithmetic circuits
                 using only multiplication and division gates, and
                 integer inputs. Input instance: Four lists of positive
                 integers: a 1,\ldots{}, an \in N+ n; b 1,\ldots{}, bn
                 \in N+ n; c 1,\ldots{}, cm \in N+ m; d 1, \ldots{}, dm
                 \in N+ m; where each of the integers is represented in
                 binary. Problem 1 (equality testing): Decide whether $
                 a_1 b_1 a_2 b_2 \cdots a_n b_n = c_1 d_1 c_2 d_2 \cdots
                 c_m d_m $. Problem 2 (inequality testing): Decide
                 whether $ a_1 b_1 a_2 b_2 \cdots a_n b_n \geq c_1 d_1
                 c_2 d_2 \cdots c_m d_m $. Problem 1 is easily decidable
                 in polynomial time using a simple iterative algorithm.
                 Problem 2 is much harder. We observe that the
                 complexity of Problem 2 is intimately connected to deep
                 conjectures and results in number theory. In
                 particular, if a refined form of the ABC conjecture
                 formulated by Baker in 1998 holds, or if the older
                 Lang-Waldschmidt conjecture (formulated in 1978) on
                 linear forms in logarithms holds, then Problem 2 is
                 decidable in P-time (in the standard Turing model of
                 computation). Moreover, it follows from the best
                 available quantitative bounds on linear forms in
                 logarithms, namely, by Baker and W{\"u}stholz [1993] or
                 Matveev [2000], that if m and n are fixed universal
                 constants then Problem 2 is decidable in P-time
                 (without relying on any conjectures). This latter fact
                 was observed earlier by Shub [1993]. We describe one
                 application: P-time maximum probability parsing for
                 arbitrary stochastic context-free grammars (where
                 \epsilon -rules are allowed).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Allender:2014:ISI,
  author =       "Eric Allender and Shafi Goldwasser",
  title =        "Introduction to the Special Issue on Innovations in
                 Theoretical Computer Science 2012 --- {Part II}",
  journal =      j-TOCT,
  volume =       "6",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2638595",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Oct 1 16:40:04 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
  remark =       "Special issue on innovations in theoretical computer
                 science 2012 --- Part II.",
}

@Article{Kanade:2014:LHS,
  author =       "Varun Kanade and Thomas Steinke",
  title =        "Learning Hurdles for Sleeping Experts",
  journal =      j-TOCT,
  volume =       "6",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2505983",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Oct 1 16:40:04 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study the online decision problem in which the set
                 of available actions varies over time, also called the
                 sleeping experts problem. We consider the setting in
                 which the performance comparison is made with respect
                 to the best ordering of actions in hindsight. In this
                 article, both the payoff function and the availability
                 of actions are adversarial. Kleinberg et al. [2010]
                 gave a computationally efficient no-regret algorithm in
                 the setting in which payoffs are stochastic. Kanade et
                 al. [2009] gave an efficient no-regret algorithm in the
                 setting in which action availability is stochastic.
                 However, the question of whether there exists a
                 computationally efficient no-regret algorithm in the
                 adversarial setting was posed as an open problem by
                 Kleinberg et al. [2010]. We show that such an algorithm
                 would imply an algorithm for PAC learning DNF, a
                 long-standing important open problem. We also consider
                 the setting in which the number of available actions is
                 restricted and study its relation to agnostic-learning
                 monotone disjunctions over examples with bounded
                 Hamming weight.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
  remark =       "Special issue on innovations in theoretical computer
                 science 2012 --- Part II.",
}

@Article{Valiant:2014:ERF,
  author =       "Paul Valiant",
  title =        "Evolvability of Real Functions",
  journal =      j-TOCT,
  volume =       "6",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2633598",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Oct 1 16:40:04 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We formulate a notion of evolvability for functions
                 with domain and range that are real-valued vectors, a
                 compelling way of expressing many natural biological
                 processes. We show that linear and fixed-degree
                 polynomial functions are evolvable in the following
                 dually-robust sense: There is a single evolution
                 algorithm that, for all convex loss functions,
                 converges for all distributions. It is possible that
                 such dually-robust results can be achieved by simpler
                 and more-natural evolution algorithms. Towards this
                 end, we introduce a simple and natural algorithm that
                 we call wide-scale random noise and prove a
                 corresponding result for the L$_2$ metric. We
                 conjecture that the algorithm works for a more general
                 class of metrics.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
  remark =       "Special issue on innovations in theoretical computer
                 science 2012 --- Part II.",
}

@Article{Brakerski:2014:LFH,
  author =       "Zvika Brakerski and Craig Gentry and Vinod
                 Vaikuntanathan",
  title =        "{(Leveled)} Fully Homomorphic Encryption without
                 Bootstrapping",
  journal =      j-TOCT,
  volume =       "6",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2633600",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Oct 1 16:40:04 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We present a novel approach to fully homomorphic
                 encryption (FHE) that dramatically improves performance
                 and bases security on weaker assumptions. A central
                 conceptual contribution in our work is a new way of
                 constructing leveled, fully homomorphic encryption
                 schemes (capable of evaluating arbitrary
                 polynomial-size circuits of a-priori bounded depth),
                 without Gentry's bootstrapping procedure. Specifically,
                 we offer a choice of FHE schemes based on the learning
                 with error (LWE) or Ring LWE (RLWE) problems that have
                 2 \lambda security against known attacks. We construct
                 the following. (1) A leveled FHE scheme that can
                 evaluate depth-$L$ arithmetic circuits (composed of
                 fan-in 2 gates) using $ O(\lambda . L 3)$ per-gate
                 computation, quasilinear in the security parameter.
                 Security is based on RLWE for an approximation factor
                 exponential in $L$. This construction does not use the
                 bootstrapping procedure. (2) A leveled FHE scheme that
                 can evaluate depth-$L$ arithmetic circuits (composed of
                 fan-in 2 gates) using $ O (\lambda 2)$ per-gate
                 computation, which is independent of $L$. Security is
                 based on RLWE for quasipolynomial factors. This
                 construction uses bootstrapping as an optimization. We
                 obtain similar results for LWE, but with worse
                 performance. All previous (leveled) FHE schemes
                 required a per-gate computation of \Omega (\lambda
                 3.5), and all of them relied on subexponential hardness
                 assumptions. We introduce a number of further
                 optimizations to our scheme based on the Ring LWE
                 assumption. As an example, for circuits of large width
                 (e.g., where a constant fraction of levels have width $
                 \Omega (\lambda)$), we can reduce the per-gate
                 computation of the bootstrapped version to $ O
                 (\lambda)$, independent of $L$, by batching the
                 bootstrapping operation. At the core of our
                 construction is a new approach for managing the noise
                 in lattice-based ciphertexts, significantly extending
                 the techniques of Brakerski and Vaikuntanathan
                 [2011b].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "13",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
  remark =       "Special issue on innovations in theoretical computer
                 science 2012 --- Part II.",
}

@Article{Cook:2014:OWF,
  author =       "James Cook and Omid Etesami and Rachel Miller and Luca
                 Trevisan",
  title =        "On the One-Way Function Candidate Proposed by
                 {Goldreich}",
  journal =      j-TOCT,
  volume =       "6",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2633602",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Oct 1 16:40:04 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Goldreich [2000] proposed a candidate one-way function
                 based on a bipartite graph of small right-degree d,
                 where the vertices on the left (resp. right) represent
                 input (resp. output) bits of the function. Each output
                 bit is computed by evaluating a fixed d -ary binary
                 predicate on the input bits adjacent to that output
                 bit. We study this function when the predicate is
                 random or depends linearly on many of its input bits.
                 We assume that the graph is a random balanced bipartite
                 graph with right-degree d. Inverting this function as a
                 one-way function by definition means finding an element
                 in the preimage of output of this function for a random
                 input. We bound the expected size of this preimage.
                 Next, using the preceding bound, we prove that two
                 restricted types of backtracking algorithms called
                 myopic and drunk backtracking algorithms with high
                 probability take exponential time to invert the
                 function, even if we allow the algorithms to use DPLL
                 elimination rules. (For drunk algorithms, a similar
                 result was proved by Itsykson [2010].) We also ran a
                 SAT solver on the satisfiability problem equivalent to
                 the problem of inverting the function, and
                 experimentally observed an exponential increase in
                 running time as a function of the input length.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "14",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
  remark =       "Special issue on innovations in theoretical computer
                 science 2012 --- Part II.",
}

@Article{Cook:2014:CCC,
  author =       "Stephen A. Cook and Yuval Filmus and Dai Tri Man
                 L{\^e}",
  title =        "The complexity of the comparator circuit value
                 problem",
  journal =      j-TOCT,
  volume =       "6",
  number =       "4",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2635822",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Aug 18 17:06:20 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "In 1990, Subramanian [1990] defined the complexity
                 class CC as the set of problems log-space reducible to
                 the comparator circuit value problem (CCV). He and Mayr
                 showed that NL $ \subseteq $ CC $ \subseteq $ P, and
                 proved that in addition to CCV several other problems
                 are complete for CC, including the stable marriage
                 problem, and finding the lexicographically first
                 maximal matching in a bipartite graph. Although the
                 class has not received much attention since then, we
                 are interested in CC because we conjecture that it is
                 incomparable with the parallel class NC which also
                 satisfies NL $ \subseteq $ NC $ \subseteq $ P, and note
                 that this conjecture implies that none of the
                 CC-complete problems has an efficient polylog time
                 parallel algorithm. We provide evidence for our
                 conjecture by giving oracle settings in which
                 relativized CC and relativized NC are incomparable. We
                 give several alternative definitions of CC, including
                 (among others) the class of problems computed by
                 uniform polynomial-size families of comparator circuits
                 supplied with copies of the input and its negation, the
                 class of problems AC0-reducible to Ccv, and the class
                 of problems computed by uniform AC0 circuits with AXccv
                 gates. We also give a machine model for CC, which
                 corresponds to its characterization as log-space
                 uniform polynomial-size families of comparator
                 circuits. These various characterizations show that CC
                 is a robust class. Our techniques also show that the
                 corresponding function class FCC is closed under
                 composition. The main technical tool we employ is
                 universal comparator circuits. Other results include a
                 simpler proof of NL $ \subseteq $ CC, a more careful
                 analysis showing the lexicographically first maximal
                 matching problem and its variants are CC-complete under
                 AC0 many-one reductions, and an explanation of the
                 relation between the Gale--Shapley algorithm and
                 Subramanian's algorithm for stable marriage. This
                 article continues the previous work of Cook et al.
                 [2011], which focused on Cook-Nguyen style uniform
                 proof complexity, answering several open questions
                 raised in that article.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "15",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Fellows:2014:FCU,
  author =       "Michael R. Fellows and Bart M. P. Jansen",
  title =        "{FPT} is characterized by useful obstruction sets:
                 Connecting algorithms, kernels, and quasi-orders",
  journal =      j-TOCT,
  volume =       "6",
  number =       "4",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2635820",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Aug 18 17:06:20 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Many graph problems were first shown to be
                 fixed-parameter tractable using the results of
                 Robertson and Seymour on graph minors. We show that the
                 combination of finite, computable obstruction sets and
                 efficient order tests is not just one way of obtaining
                 strongly uniform FPT algorithms, but that all of FPT
                 may be captured in this way. Our new characterization
                 of FPT has a strong connection to the theory of
                 kernelization, as we prove that problems with
                 polynomial kernels can be characterized by obstruction
                 sets whose elements have polynomial size. Consequently
                 we investigate the interplay between the sizes of
                 problem kernels and the sizes of the elements of such
                 obstruction sets, obtaining several examples of how
                 results in one area yield new insights in the other. We
                 show how exponential-size minor-minimal obstructions
                 for pathwidth $k$ form the crucial ingredient in a
                 novel or-cross-composition for $k$-Pathwidth,
                 complementing the trivial and-composition that is known
                 for this problem. In the other direction, we show that
                 or-cross-compositions into a parameterized problem can
                 be used to rule out the existence of efficiently
                 generated quasi-orders on its instances that
                 characterize the no-instances by polynomial-size
                 obstructions.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "16",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Gobel:2014:CCH,
  author =       "Andreas G{\"o}bel and Leslie Ann Goldberg and David
                 Richerby",
  title =        "The complexity of counting homomorphisms to cactus
                 graphs modulo 2",
  journal =      j-TOCT,
  volume =       "6",
  number =       "4",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2635825",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Aug 18 17:06:20 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "A homomorphism from a graph $G$ to a graph $H$ is a
                 function from $ V(G)$ to $ V(H)$ that preserves edges.
                 Many combinatorial structures that arise in mathematics
                 and in computer science can be represented naturally as
                 graph homomorphisms and as weighted sums of graph
                 homomorphisms. In this article, we study the complexity
                 of counting homomorphisms modulo 2. The complexity of
                 modular counting was introduced by Papadimitriou and
                 Zachos and it has been pioneered by Valiant who
                 famously introduced a problem for which counting modulo
                 7 is easy but counting modulo 2 is intractable. Modular
                 counting provides a rich setting in which to study the
                 structure of homomorphism problems. In this case, the
                 structure of the graph $H$ has a big influence on the
                 complexity of the problem. Thus, our approach is
                 graph-theoretic. We give a complete solution for the
                 class of cactus graphs, which are connected graphs in
                 which every edge belongs to at most one cycle. Cactus
                 graphs arise in many applications such as the modelling
                 of wireless sensor networks and the comparison of
                 genomes. We show that, for some cactus graphs $H$,
                 counting homomorphisms to $H$ modulo 2 can be done in
                 polynomial time. For every other fixed cactus graph
                 $H$, the problem is complete in the complexity class $
                 \oplus P$, which is a wide complexity class to which
                 every problem in the polynomial hierarchy can be
                 reduced (using randomised reductions). Determining
                 which $H$ lead to tractable problems can be done in
                 polynomial time. Our result builds upon the work of
                 Faben and Jerrum, who gave a dichotomy for the case in
                 which $H$ is a tree.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "17",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Watson:2014:ALB,
  author =       "Thomas Watson",
  title =        "Advice Lower Bounds for the Dense Model Theorem",
  journal =      j-TOCT,
  volume =       "7",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2676659",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Jan 13 17:53:00 MST 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We prove a lower bound on the amount of nonuniform
                 advice needed by black-box reductions for the Dense
                 Model Theorem of Green, Tao, and Ziegler, and of
                 Reingold, Trevisan, Tulsiani, and Vadhan. The latter
                 theorem roughly says that for every distribution $D$
                 that is $ \delta $-dense in a distribution that is $
                 \epsilon '$-indistinguishable from uniform, there
                 exists a ``dense model'' for $D$, that is, a
                 distribution that is $ \delta $ -dense in the uniform
                 distribution and is $ \epsilon $-indistinguishable from
                 $D$. This $ \epsilon $-indistinguishability is with
                 respect to an arbitrary small class of functions $F$.
                 For the natural case where $ \epsilon ' \geq \Omega
                 (\epsilon \delta)$ and $ \epsilon \geq \delta O(1)$,
                 our lower bound implies that $ \Omega (\sqrt (1 /
                 \epsilon) \log (1 / \delta) \cdot \log | F |)$ advice
                 bits are necessary for a certain type of reduction that
                 establishes a stronger form of the Dense Model Theorem
                 (and which encompasses all known proofs of the Dense
                 Model Theorem in the literature). There is only a
                 polynomial gap between our lower bound and the best
                 upper bound for this case (due to Zhang), which is $ O
                 ((1 / \epsilon^2) \log (1 / \delta) \cdot \log | F |)$.
                 Our lower bound can be viewed as an analogue of list
                 size lower bounds for list-decoding of error-correcting
                 codes, but for ``dense model decoding'' instead.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Awasthi:2014:LLF,
  author =       "Pranjal Awasthi and Madhav Jha and Marco Molinaro and
                 Sofya Raskhodnikova",
  title =        "Limitations of Local Filters of {Lipschitz} and
                 Monotone Functions",
  journal =      j-TOCT,
  volume =       "7",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2692372.2692373",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Jan 13 17:53:00 MST 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study local filters for two properties of functions
                 of the form $ f : \{ 0, 1 d \} \to R $: the Lipschitz
                 property and monotonicity. A local filter with additive
                 error a is a randomized algorithm that is given
                 black-box access to a function $f$ and a query point
                 $x$ in the domain of $f$. It outputs a value $f$ (x)
                 such that (i) the reconstructed function $ f(x)$
                 satisfies the property (in our case, is Lipschitz or
                 monotone) and (ii) if the input function $f$ satisfies
                 the property, then for every point $x$ in the domain
                 (with high constant probability) the reconstructed
                 value $ F(x)$ differs from $ f(x)$ by at most $a$.
                 Local filters were introduced by Saks and Seshadhri
                 [2010]. The relaxed definition we study is due to
                 Bhattacharyya et al. [2012], except that we further
                 relax it by allowing additive error. Local filters for
                 Lipschitz and monotone functions have applications to
                 areas such as data privacy. We show that every local
                 filter for Lipschitz or monotone functions runs in time
                 exponential in the dimension d, even when the filter is
                 allowed significant additive error. Prior lower bounds
                 (for local filters with no additive error, that is,
                 with $ a = 0$) applied only to a more restrictive class
                 of filters, for example, nonadaptive filters. To prove
                 our lower bounds, we construct families of hard
                 functions and show that lookups of a local filter on
                 these functions are captured by a combinatorial object
                 that we call a $c$-connector. Then we present a lower
                 bound on the maximum outdegree of a $c$-connector and
                 show that it implies the desired bounds on the running
                 time of local filters. Our lower bounds, in particular,
                 imply the same bound on the running time for a class of
                 privacy mechanisms.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Greco:2014:CNC,
  author =       "Gianluigi Greco and Enrico Malizia and Luigi Palopoli
                 and Francesco Scarcello",
  title =        "The Complexity of the Nucleolus in Compact Games",
  journal =      j-TOCT,
  volume =       "7",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2692372.2692374",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Jan 13 17:53:00 MST 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The nucleolus is a well-known solution concept for
                 coalitional games to fairly distribute the total
                 available worth among the players. The nucleolus is
                 known to be NP -hard to compute over compact
                 coalitional games, that is, over games whose functions
                 specifying the worth associated with each coalition are
                 encoded in terms of polynomially computable functions
                 over combinatorial structures. In particular, hardness
                 results have been exhibited over minimum spanning tree
                 games, threshold games, and flow games. However, due to
                 its intricate definition involving reasoning over
                 exponentially many coalitions, a nontrivial upper bound
                 on its complexity was missing in the literature and
                 looked for. This article faces this question and
                 precisely characterizes the complexity of the
                 nucleolus, by exhibiting an upper bound that holds on
                 any class of compact games, and by showing that this
                 bound is tight even on the (structurally simple) class
                 of graph games. The upper bound is established by
                 proposing a variant of the standard linear-programming
                 based algorithm for nucleolus computation and by
                 studying a framework for reasoning about succinctly
                 specified linear programs, which are contributions of
                 interest in their own. The hardness result is based on
                 an elaborate combinatorial reduction, which is
                 conceptually relevant for it provides a ``measure'' of
                 the computational cost to be paid for guaranteeing
                 voluntary participation to the distribution process. In
                 fact, the pre-nucleolus is known to be efficiently
                 computable over graph games, with this solution concept
                 being defined as the nucleolus but without guaranteeing
                 that each player is granted with it at least the worth
                 she can get alone, that is, without collaborating with
                 the other players. Finally, this article identifies
                 relevant tractable classes of coalitional games, based
                 on the notion of type of a player. Indeed, in most
                 applications where many players are involved, it is
                 often the case that such players do belong in fact to a
                 limited number of classes, which is known in advance
                 and may be exploited for computing the nucleolus in a
                 fast way.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Kratsch:2014:KLB,
  author =       "Stefan Kratsch and Marcin Pilipczuk and Ashutosh Rai
                 and Venkatesh Raman",
  title =        "Kernel Lower Bounds using Co-Nondeterminism: Finding
                 Induced Hereditary Subgraphs",
  journal =      j-TOCT,
  volume =       "7",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2691321",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Jan 13 17:53:00 MST 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "This work further explores the applications of
                 co-nondeterminism for showing kernelization lower
                 bounds. The only known example prior to this work
                 excludes polynomial kernelizations for the so-called
                 Ramsey problem of finding an independent set or a
                 clique of at least $k$ vertices in a given graph
                 [Kratsch 2012]. We study the more general problem of
                 finding induced subgraphs on $k$ vertices fulfilling
                 some hereditary property $ \Pi $, called $ \Pi
                 $-Induced Subgraph. The problem is NP-hard for all
                 nontrivial choices of $ \Pi $ by a classic result of
                 Lewis and Yannakakis [1980]. The parameterized
                 complexity of this problem was classified by Khot and
                 Raman [2002] depending on the choice of $ \Pi $. The
                 interesting cases for kernelization are for $ \Pi $
                 containing all independent sets and all cliques, since
                 the problem is trivially polynomial time solvable or
                 W[1]-hard otherwise. Our results are twofold. Regarding
                 $ \Pi $-Induced Subgraph, we show that for a large
                 choice of natural graph properties $ \Pi $, including
                 chordal, perfect, cluster, and cograph, there is no
                 polynomial kernel with respect to $k$. This is
                 established by two theorems, each one capturing
                 different (but not necessarily exclusive) sets of
                 properties: one using a co-nondeterministic variant of
                 OR-cross-composition and one by a polynomial parameter
                 transformation from Ramsey. Additionally, we show how
                 to use improvement versions of NP-hard problems as
                 source problems for lower bounds, without requiring
                 their NP-hardness. For example, for $ \Pi $-Induced
                 Subgraph our compositions may assume existing solutions
                 of size $ k - 1$. This follows from the more general
                 fact that source problems for OR-(cross-)compositions
                 need only be NP-hard under co-nondeterministic
                 reductions. We believe this to be useful for further
                 lower-bound proofs, for example, since improvement
                 versions simplify the construction of a disjunction
                 (OR) of instances required in compositions. This adds a
                 second way of using co-nondeterminism for lower
                 bounds.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Filmus:2015:ELB,
  author =       "Yuval Filmus and Toniann Pitassi and Rahul Santhanam",
  title =        "Exponential Lower Bounds for {AC$0$-Frege} Imply
                 Superpolynomial {Frege} Lower Bounds",
  journal =      j-TOCT,
  volume =       "7",
  number =       "2",
  pages =        "5:1--5:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2656209",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 12 06:02:22 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We give a general transformation that turns
                 polynomial-size Frege proofs into subexponential-size
                 AC$^0$-Frege proofs. This indicates that proving truly
                 exponential lower bounds for AC$^0$-Frege is hard, as
                 it is a long-standing open problem to prove
                 superpolynomial lower bounds for Frege. Our
                 construction is optimal for proofs of formulas of
                 unbounded depth. As a consequence of our main result,
                 we are able to shed some light on the question of
                 automatizability for bounded-depth Frege systems.
                 First, we present a simpler proof of the results of
                 Bonet et al. showing that under cryptographic
                 assumptions, bounded-depth Frege proofs are not
                 automatizable. Second, we show that because our proof
                 is more general, under the right cryptographic
                 assumptions, it could resolve the automatizability
                 question for lower-depth Frege systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Blaser:2015:SCT,
  author =       "Markus Bl{\"a}ser and Bodo Manthey",
  title =        "Smoothed Complexity Theory",
  journal =      j-TOCT,
  volume =       "7",
  number =       "2",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2656210",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 12 06:02:22 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Smoothed analysis is a new way of analyzing algorithms
                 introduced by Spielman and Teng. Classical methods like
                 worst-case or average-case analysis have accompanying
                 complexity classes, such as P and Avg-P, respectively.
                 Whereas worst-case or average-case analysis give us a
                 means to talk about the running time of a particular
                 algorithm, complexity classes allow us to talk about
                 the inherent difficulty of problems. Smoothed analysis
                 is a hybrid of worst-case and average-case analysis and
                 compensates some of their drawbacks. Despite its
                 success for the analysis of single algorithms and
                 problems, there is no embedding of smoothed analysis
                 into computational complexity theory, which is
                 necessary to classify problems according to their
                 intrinsic difficulty. We propose a framework for
                 smoothed complexity theory, define the relevant
                 classes, and prove some first hardness results (of
                 bounded halting and tiling) and tractability results
                 (binary optimization problems, graph coloring,
                 satisfiability) within this framework.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Chen:2015:FCC,
  author =       "Hubie Chen and Moritz M{\"u}ller",
  title =        "The Fine Classification of Conjunctive Queries and
                 Parameterized Logarithmic Space",
  journal =      j-TOCT,
  volume =       "7",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2751316",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 12 06:02:22 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We perform a fundamental investigation of the
                 complexity of conjunctive query evaluation from the
                 perspective of parameterized complexity. We classify
                 sets of Boolean conjunctive queries according to the
                 complexity of this problem. Previous work showed that a
                 set of conjunctive queries is fixed-parameter tractable
                 precisely when the set is equivalent to a set of
                 queries having bounded treewidth. We present a fine
                 classification of query sets up to parameterized
                 logarithmic space reduction. We show that, in the
                 bounded treewidth regime, there are three complexity
                 degrees and that the properties that determine the
                 degree of a query set are bounded pathwidth and bounded
                 tree depth. We also engage in a study of the two higher
                 degrees via logarithmic space machine characterizations
                 and complete problems. Our work yields a significantly
                 richer perspective on the complexity of conjunctive
                 queries and, at the same time, suggests new avenues of
                 research in parameterized complexity.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Komarath:2015:PEB,
  author =       "Balagopal Komarath and Jayalal Sarma",
  title =        "Pebbling, Entropy, and Branching Program Size Lower
                 Bounds",
  journal =      j-TOCT,
  volume =       "7",
  number =       "2",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2751320",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 12 06:02:22 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We contribute to the program of proving lower bounds
                 on the size of branching programs solving the Tree
                 Evaluation Problem introduced by Cook et al. [2012].
                 Proving a superpolynomial lower bound for the size of
                 nondeterministic thrifty branching programs would be an
                 important step toward separating NL from P using the
                 tree evaluation problem. First, we show that Read-Once
                 Nondeterministic Thrifty BPs are equivalent to whole
                 black-white pebbling algorithms, thus showing a tight
                 lower bound (ignoring polynomial factors) for this
                 model. We then introduce a weaker restriction of
                 nondeterministic thrifty branching programs called
                 Bitwise Independence. The best known [Cook et al. 2012]
                 nondeterministic thrifty branching programs (of size $
                 O(k^{h / 2 + 1})$) for the tree evaluation problem are
                 Bitwise Independent. As our main result, we show that
                 any Bitwise Independent Nondeterministic Thrifty
                 Branching Program solving $ {\rm BT}_2 (h, k)$ must
                 have at least $ (k2)^{h / 2}$ states. Prior to this
                 work, lower bounds were known for nondeterministic
                 thrifty branching programs only for fixed heights $ h =
                 2, 3, 4$ [Cook et al. 2012]. We prove our results by
                 associating a fractional black-white pebbling strategy
                 with any bitwise independent nondeterministic thrifty
                 branching program solving the Tree Evaluation Problem.
                 Such a connection was not known previously, even for
                 fixed heights. Our main technique is the entropy method
                 introduced by Jukna and Z{\'a}k [2001] originally in
                 the context of proving lower bounds for read-once
                 branching programs. We also show that the previous
                 lower bounds known [Cook et al. 2012] for deterministic
                 branching programs for the Tree Evaluation Problem can
                 be obtained using this approach. Using this method, we
                 also show tight lower bounds for any $k$-way
                 deterministic branching program solving the Tree
                 Evaluation Problem when the instances are restricted to
                 have the same group operation in all internal nodes.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{ODonnell:2015:HMM,
  author =       "Ryan O'Donnell and Yi Wu and Yuan Zhou",
  title =        "Hardness of {Max-2Lin} and {Max-3Lin} over Integers,
                 Reals, and Large Cyclic Groups",
  journal =      j-TOCT,
  volume =       "7",
  number =       "2",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2751322",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 12 06:02:22 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "In 1997, H{\aa}stad showed NP-hardness of $ (1 -
                 \epsilon, 1 / q + \delta)$-approximating Max-3Lin($
                 Z_q$); however, it was not until 2007 that Guruswami
                 and Raghavendra were able to show NP-hardness of $ (1 -
                 \epsilon, \delta)$-approximating Max-3Lin($Z$). In
                 2004, Khot--Kindler--Mossel--O'Donnell showed
                 UG-hardness of $ (1 - \epsilon, \delta)$-approximating
                 Max-2Lin($ Z_q$) for $ q = q (\epsilon, \delta)$ a
                 sufficiently large constant; however, achieving the
                 same hardness for Max-2Lin($Z$) was given as an open
                 problem in Raghavendra's 2009 thesis. In this work, we
                 show that fairly simple modifications to the proofs of
                 the Max-3Lin($ Z_q$) and Max-2Lin($ Z_q$) results yield
                 optimal hardness results over $Z$. In fact, we show a
                 kind of ``bicriteria'' hardness: Even when there is a $
                 (1 - \epsilon)$-good solution over $Z$, it is hard for
                 an algorithm to find a $ \delta $-good solution over
                 $Z$, $R$, or $ Z_m$ for any $ m \geq q (\epsilon,
                 \delta)$ of the algorithm's choosing.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ambainis:2015:LBD,
  author =       "Andris Ambainis and William Gasarch and Aravind
                 Srinivasan and Andrey Utis",
  title =        "Lower Bounds on the Deterministic and Quantum
                 Communication Complexity of {Hamming}-Distance
                 Problems",
  journal =      j-TOCT,
  volume =       "7",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2698587",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Aug 7 10:02:02 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Alice and Bob want to know if two strings of length n
                 are almost equal. That is, do the strings differ on at
                 most a bits? Let $ 0 \leq a \leq n - 1 $. We show (1)
                 any deterministic protocol-as well as any error-free
                 quantum protocol ($ C* $ version)-for this problem
                 requires at least $ n - 2 $ bits of communication, and
                 (2) a lower bound of $ n / 2 - 1 $ for error-free $ Q*
                 $ quantum protocols. We also show the same results for
                 determining if two strings differ in exactly $a$ bits.
                 Our results are obtained by lower-bounding the ranks of
                 the appropriate matrices.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Jerrum:2015:SHF,
  author =       "Mark Jerrum and Kitty Meeks",
  title =        "Some Hard Families of Parameterized Counting
                 Problems",
  journal =      j-TOCT,
  volume =       "7",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2786017",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Aug 7 10:02:02 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We consider parameterized subgraph counting problems
                 of the following form: given a graph $f$G, how many
                 $k$-tuples of its vertices induce a subgraph with a
                 given property? A number of such problems are known to
                 be \#W[1]-complete; here, we substantially generalize
                 some of these existing results by proving hardness for
                 two large families of such problems. We demonstrate
                 that it is \#W[1]-hard to count the number of
                 $k$-vertex subgraphs having any property where the
                 number of distinct edge densities of labeled subgraphs
                 that satisfy the property is $ o(k^2)$. In the special
                 case in which the property in question depends only on
                 the number of edges in the subgraph, we give a
                 strengthening of this result, which leads to our second
                 family of hard problems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Case:2015:MD,
  author =       "Adam Case and Jack H. Lutz",
  title =        "Mutual Dimension",
  journal =      j-TOCT,
  volume =       "7",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2786566",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Aug 7 10:02:02 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We define the lower and upper mutual dimensions $ {\rm
                 mdim}(x : y) $ and $ {\rm Mdim}(x : y) $ between any
                 two points $x$ and $y$ in Euclidean space. Intuitively,
                 these are the lower and upper densities of the
                 algorithmic information shared by $x$ and $y$. We show
                 that these quantities satisfy the main desiderata for a
                 satisfactory measure of mutual algorithmic information.
                 Our main theorem, the data processing inequality for
                 mutual dimension, says that if $ f : R^m > R^n$ is
                 computable and Lipschitz, then the inequalities $ {\rm
                 mdim}(f(x) : y) \leq {\rm mdim} (x : y)$ and $ {\rm
                 Mdim}(f(x) : y) \leq {\rm Mdim}(x : y)$ hold for all $
                 x \in R^m$ and $ y \in R^t$. We use this inequality and
                 related inequalities that we prove in like fashion to
                 establish conditions under which various classes of
                 computable functions on Euclidean space preserve or
                 otherwise transform mutual dimensions between points.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Fernau:2015:UPT,
  author =       "Henning Fernau and Alejandro L{\'o}pez-Ortiz and
                 Jazm{\'\i}n Romero",
  title =        "Using Parametric Transformations Toward Polynomial
                 Kernels for Packing Problems Allowing Overlaps",
  journal =      j-TOCT,
  volume =       "7",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2786015",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Aug 7 10:02:02 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We consider the problem of discovering overlapping
                 communities in networks that we model as
                 generalizations of the Set and Graph Packing problems
                 with overlap. As usual for Set Packing problems, we
                 seek a collection $ S^' \subseteq S $ consisting of at
                 least $k$ sets subject to certain disjointness
                 restrictions. In the $r$-Set Packing with
                 $t$-Membership, each element of $U$ belongs to at most
                 $t$ sets of $ S^'$, while in $r$-Set Packing with
                 $t$-Overlap, each pair of sets in $ S^'$ overlaps in at
                 most $t$ elements. For both problems, each set of $S$
                 has at most $r$ elements. Similarly, both of our Graph
                 Packing problems seek a collection $K$ of at least $k$
                 subgraphs in a graph $G$, each isomorphic to a graph $
                 H \in H$. In $H$-Packing with $t$-Membership, each
                 vertex of $G$ belongs to at most $t$ subgraphs of $K$,
                 while in $H$-Packing with $t$-Overlap, each pair of
                 subgraphs in K overlaps in at most $t$ vertices. For
                 both problems, each member of $H$ has at most $r$
                 vertices and $m$ edges, where $t$, $r$, and $m$ are
                 constants. Here, we show NP-completeness results for
                 all of our packing problems. Furthermore, we give a
                 dichotomy result for the $H$-Packing with
                 $t$-Membership problem analogous to the Kirkpatrick and
                 Hell dichotomy [Kirkpatrick and Hell 1978]. Using
                 polynomial parameter transformations, we reduce the
                 $r$-Set Packing with $t$-Membership to a problem kernel
                 with $ O((r + 1)^r k^r)$ elements and the $H$ Packing
                 with $t$-Membership and its edge version to problem
                 kernels with $ O((r + 1)^r k^r)$ and $ O((m + 1)^m
                 k^m)$ vertices, respectively. On the other hand, by
                 generalizing [Fellows et al. 2008; Moser 2009], we
                 achieve a kernel with $ O(r^r k^{r - t - 1})$ elements
                 for the $r$-Set Packing with $t$ Overlap and kernels
                 with $ O(r^r k^{r - t - 1})$ and $ O(m^m k^{m - t -
                 1})$ vertices for the $H$-Packing with $t$-Overlap and
                 its edge version, respectively. In all cases, $k$ is
                 the input parameter, while $t$, $r$, and $m$ are
                 constants.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "13",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Drange:2015:ESC,
  author =       "P{\aa}l Gr{\o}n{\aa}s Drange and Fedor V. Fomin and
                 Michal Pilipczuk and Yngve Villanger",
  title =        "Exploring the Subexponential Complexity of Completion
                 Problems",
  journal =      j-TOCT,
  volume =       "7",
  number =       "4",
  pages =        "14:1--14:??",
  month =        sep,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2799640",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Oct 1 16:40:05 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Let $F$ be a family of graphs. In the $F$-Completion
                 problem, we are given an $n$-vertex graph $G$ and an
                 integer $k$ as input, and asked whether at most $k$
                 edges can be added to $G$ so that the resulting graph
                 does not contain a graph from $F$ as an induced
                 subgraph. It was shown recently that two special cases
                 of $F$-Completion, namely, (i) the problem of
                 completing into a chordal graph known as Minimum
                 Fill-in (SIAM J. Comput. 2013), which corresponds to
                 the case of $ F = \{ C_4, C_5, C_6, \ldots \} $, and
                 (ii) the problem of completing into a split graph
                 (Algorithmica 2015), that is, the case of $ F = C_4, 2
                 K_2, C_5$, are solvable in parameterized subexponential
                 time $ 2^{O (\sqrt k \log k)} n^{O(1)}$. The
                 exploration of this phenomenon is the main motivation
                 for our research on $F$-Completion. In this article, we
                 prove that completions into several well-studied
                 classes of graphs without long induced cycles and paths
                 also admit parameterized subexponential time algorithms
                 by showing that: --- The problem Trivially Perfect
                 Completion, which is $F$-Completion for $ F = C_4,
                 P_4$, a cycle and a path on four vertices, is solvable
                 in parameterized subexponential time $ 2^{O (\sqrt k
                 \log k)} n^{O(1)}$. --- The problems known in the
                 literature as Pseudosplit Completion, the case in which
                 F2 $ K_2$, $ C_4$, and Threshold Completion, in which $
                 F = 2 K_2, P_4, C_4$, are also solvable in time $ 2^{O
                 (\sqrt k \log k)} n^{O(1)}$. We complement our
                 algorithms for $F$-Completion with the following lower
                 bounds: --- For $ F = 2 K_2$, $ F = C_4$,$ F = P o_4$,
                 and $ F = {2 K_2, P_4}$, $F$-Completion cannot be
                 solved in time $ 2^{o(k)} n^{O(1)}$ unless the
                 Exponential Time Hypothesis (ETH) fails. Our upper and
                 lower bounds provide a complete picture of the
                 subexponential parameterized complexity of
                 $F$-Completion problems for any $ F \subseteq {2 K_2,
                 C_4, P_4}$.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "14",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Regev:2015:QXG,
  author =       "Oded Regev and Thomas Vidick",
  title =        "Quantum {XOR} Games",
  journal =      j-TOCT,
  volume =       "7",
  number =       "4",
  pages =        "15:1--15:??",
  month =        sep,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2799560",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Oct 1 16:40:05 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We introduce quantum XOR games, a model of two-player,
                 one-round games that extends the model of XOR games by
                 allowing the referee's questions to the players to be
                 quantum states. We give examples showing that quantum
                 XOR games exhibit a wide range of behaviors that are
                 known not to exist for standard XOR games, such as
                 cases in which the use of entanglement leads to an
                 arbitrarily large advantage over the use of no
                 entanglement. By invoking two deep extensions of
                 Grothendieck's inequality, we present an efficient
                 algorithm that gives a constant-factor approximation to
                 the best performance that players can obtain in a given
                 game, both in the case that they have no shared
                 entanglement and that they share unlimited
                 entanglement. As a byproduct of the algorithm, we prove
                 some additional interesting properties of quantum XOR
                 games, such as the fact that sharing a maximally
                 entangled state of arbitrary dimension gives only a
                 small advantage over having no entanglement at all.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "15",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Goldreich:2015:IOP,
  author =       "Oded Goldreich and Or Meir",
  title =        "Input-Oblivious Proof Systems and a Uniform Complexity
                 Perspective on {P\slash poly}",
  journal =      j-TOCT,
  volume =       "7",
  number =       "4",
  pages =        "16:1--16:??",
  month =        sep,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2799645",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Oct 1 16:40:05 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "An input-oblivious proof system is a proof system in
                 which the proof does not depend on the claim being
                 proved. Input-oblivious versions of NP and MA were
                 introduced in passing by Fortnow, Santhanam, and
                 Williams, who also showed that those classes are
                 related to questions on circuit complexity. In this
                 article, we wish to highlight the notion of
                 input-oblivious proof systems and initiate a more
                 systematic study of them. We begin by describing in
                 detail the results of Fortnow et al. and discussing
                 their connection to circuit complexity. We then extend
                 the study to input-oblivious versions of IP, and PCP,
                 and ZK and present few preliminary results regarding
                 those versions.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "16",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Teutsch:2015:ADM,
  author =       "Jason Teutsch and Marius Zimand",
  title =        "On Approximate Decidability of Minimal Programs",
  journal =      j-TOCT,
  volume =       "7",
  number =       "4",
  pages =        "17:1--17:??",
  month =        sep,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2799561",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Oct 1 16:40:05 MDT 2015",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "An index $e$ in a numbering of partial-recursive
                 functions is called minimal if every lesser index
                 computes a different function from $e$. Since the
                 1960s, it has been known that, in any reasonable
                 programming language, no effective procedure determines
                 whether or not a given index is minimal. We investigate
                 whether the task of determining minimal indices can be
                 solved in an approximate sense. Our first question,
                 regarding the set of minimal indices, is whether there
                 exists an algorithm that can correctly label 1 out of
                 $k$ indices as either minimal or nonminimal. Our second
                 question, regarding the function that computes minimal
                 indices, is whether one can compute a short list of
                 candidate indices that includes a minimal index for a
                 given program. We give negative answers to both
                 questions for the important case of numberings with
                 linearly bounded translators.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "17",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Kratsch:2016:PCK,
  author =       "Stefan Kratsch and D{\'a}niel Marx and Magnus
                 Wahlstr{\"o}m",
  title =        "Parameterized Complexity and Kernelizability of Max
                 Ones and Exact Ones Problems",
  journal =      j-TOCT,
  volume =       "8",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2858787",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Feb 6 08:06:18 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "For a finite set $ \Gamma $ of Boolean relations, Max
                 Ones SAT($ \Gamma $) and Exact Ones SAT($ \Gamma $) are
                 generalized satisfiability problems where every
                 constraint relation is from $ \Gamma $, and the task is
                 to find a satisfying assignment with at least/exactly
                 $k$ variables set to $1$, respectively. We study the
                 parameterized complexity of these problems, including
                 the question whether they admit polynomial kernels. For
                 Max Ones SAT($ \Gamma $), we give a classification into
                 five different complexity levels: polynomial-time
                 solvable, admits a polynomial kernel, fixed-parameter
                 tractable, solvable in polynomial time for fixed $k$,
                 and NP-hard already for $ k = 1$. For Exact Ones SAT($
                 \Gamma $), we refine the classification obtained
                 earlier by taking a closer look at the fixed-parameter
                 tractable cases and classifying the sets $ \Gamma $ for
                 which Exact Ones SAT($ \Gamma $) admits a polynomial
                 kernel.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Volkovich:2016:CAR,
  author =       "Ilya Volkovich",
  title =        "Characterizing Arithmetic Read-Once Formulae",
  journal =      j-TOCT,
  volume =       "8",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2858783",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Feb 6 08:06:18 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "An arithmetic Read-Once Formula (ROF for short) is a
                 formula (i.e., a tree of computation) in which the
                 operations are $ \{ +, \times \} $ and such that every
                 input variable labels at most one leaf. We give a
                 simple characterization of such formulae. Other than
                 being interesting in its own right, our
                 characterization gives rise to a property-testing
                 algorithm for functions computable by such formulae. To
                 the best of our knowledge, prior to our work, no
                 characterization and/or property-testing algorithm was
                 known for this kind of formulae.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Schmitz:2016:CHB,
  author =       "Sylvain Schmitz",
  title =        "Complexity Hierarchies beyond Elementary",
  journal =      j-TOCT,
  volume =       "8",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2858784",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Feb 6 08:06:18 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We introduce a hierarchy of fast-growing complexity
                 classes and show its suitability for completeness
                 statements of many nonelementary problems. This
                 hierarchy allows the classification of many decision
                 problems with a nonelementary complexity, which occur
                 naturally in areas such as logic, combinatorics, formal
                 languages, and verification, with complexities ranging
                 from simple towers of exponentials to Ackermannian and
                 beyond.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ailon:2016:OLR,
  author =       "Nir Ailon",
  title =        "An {$ \Omega ((n \log n) / R) $} Lower Bound for
                 {Fourier} Transform Computation in the {$R$}-Well
                 Conditioned Model",
  journal =      j-TOCT,
  volume =       "8",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2858785",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Feb 6 08:06:18 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Obtaining a nontrivial (superlinear) lower bound for
                 computation of the Fourier transform in the linear
                 circuit model has been a long-standing open problem for
                 more than 40 years. An early result by Morgenstern from
                 1973, provides an $ \Omega (n \log n) $ lower bound for
                 the unnormalized Fourier transform when the constants
                 used in the computation are bounded. The proof uses a
                 potential function related to a determinant. That
                 result does not explain why the normalized Fourier
                 transform (of unit determinant) should be difficult to
                 compute in the same model. Hence, it is not scale
                 insensitive. More recently, Ailon [2013] showed that if
                 only unitary 2-by-2 gates are used, and additionally no
                 extra memory is allowed, then the normalized Fourier
                 transform requires $ \Omega (n \log n) $ steps. This
                 rather limited result is also sensitive to scaling, but
                 highlights the complexity inherent in the Fourier
                 transform arising from introducing entropy, unlike,
                 say, the identity matrix (which is as complex as the
                 Fourier transform using Morgenstern's arguments, under
                 proper scaling). This work improves on Ailon [2013] in
                 two ways: First, we eliminate the scaling restriction
                 and provide a lower bound for computing any scaling of
                 the Fourier transform. Second, we allow the
                 computational model to use extra memory. Our
                 restriction is that the composition of all gates up to
                 any point must be a well- conditioned linear
                 transformation. The lower bound is $ \Omega (R^{-1} n
                 \log n) $, where $R$ is the uniform condition number.
                 Well-conditioned is a natural requirement for
                 algorithms accurately computing linear transformations
                 on machine architectures of bounded word size. Hence,
                 this result can be seen as a tradeoff between speed and
                 accuracy. The main technical contribution is an
                 extension of matrix entropy used in Ailon [2013] for
                 unitary matrices to a potential function computable for
                 any invertible matrix, using ``quasi-entropy'' of
                 ``quasi-probabilities.''",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Fischer:2016:TRO,
  author =       "Eldar Fischer and Yonatan Goldhirsh and Oded Lachish",
  title =        "Testing Read-Once Formula Satisfaction",
  journal =      j-TOCT,
  volume =       "8",
  number =       "2",
  pages =        "5:1--5:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2897184",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat May 21 08:02:14 MDT 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study the query complexity of testing for
                 properties defined by read-once formulas, as instances
                 of massively parametrized properties, and prove several
                 testability and nontestability results. First, we prove
                 the testability of any property accepted by a Boolean
                 read-once formula involving any bounded arity gates,
                 with a number of queries exponential in $ \epsilon $,
                 doubly exponential in the arity, and independent of all
                 other parameters. When the gates are limited to being
                 monotone, we prove that there is an estimation
                 algorithm that outputs an approximation of the distance
                 of the input from satisfying the property. For formulas
                 only involving And/Or gates, we provide a more
                 efficient test whose query complexity is only
                 quasipolynomial in $ \epsilon $ . On the other hand, we
                 show that such testability results do not hold in
                 general for formulas over non-Boolean alphabets.
                 Specifically, we construct a property defined by a
                 read-once arity 2 (non-Boolean) formula over an
                 alphabet of size 4, such that any 1/4-test for it
                 requires a number of queries depending on the formula
                 size. We also present such a formula over an alphabet
                 of size 5 that also satisfies a strong monotonicity
                 condition.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Fellows:2016:TPM,
  author =       "Michael R. Fellows and Danny Hermelin and Frances
                 Rosamond and Hadas Shachnai",
  title =        "Tractable Parameterizations for the Minimum Linear
                 Arrangement Problem",
  journal =      j-TOCT,
  volume =       "8",
  number =       "2",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2898352",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat May 21 08:02:14 MDT 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The M inimum Linear Arrangement (MLA) problem involves
                 embedding a given graph on the integer line so that the
                 sum of the edge lengths of the embedded graph is
                 minimized. Most layout problems are either intractable
                 or not known to be tractable, parameterized by the
                 treewidth of the input graph. We investigate MLA with
                 respect to three parameters that provide more structure
                 than treewidth. In particular, we give a factor $ (1 +
                 \epsilon) $-approximation algorithm for MLA
                 parameterized by $ (\epsilon, k) $, where $k$ is the
                 vertex cover number of the input graph. By a similar
                 approach, we obtain two FPT algorithms that exactly
                 solve MLA parameterized by, respectively, the max leaf
                 and edge clique cover numbers of the input graph.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Goldreich:2016:SBT,
  author =       "Oded Goldreich and Dana Ron",
  title =        "On Sample-Based Testers",
  journal =      j-TOCT,
  volume =       "8",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2898355",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat May 21 08:02:14 MDT 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The standard definition of property testing endows the
                 tester with the ability to make arbitrary queries to
                 ``elements'' of the tested object. In contrast,
                 sample-based testers only obtain independently
                 distributed elements (a.k.a. labeled samples) of the
                 tested object. While sample-based testers were defined
                 by Goldreich, Goldwasser, and Ron (JACM 1998), with few
                 exceptions, most research in property testing has
                 focused on query-based testers. In this work, we
                 advance the study of sample-based property testers by
                 providing several general positive results as well as
                 by revealing relations between variants of this testing
                 model. In particular: -We show that certain types of
                 query-based testers yield sample-based testers of
                 sublinear sample complexity. For example, this holds
                 for a natural class of proximity oblivious testers. -We
                 study the relation between distribution-free
                 sample-based testers and one-sided error sample-based
                 testers w.r.t. the uniform distribution. While most of
                 this work ignores the time complexity of testing, one
                 part of it does focus on this aspect. The main result
                 in this part is a sublinear- time sample-based tester,
                 in the dense graphs model, for $k$-colorability, for
                 any $ k \geq 2$.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Kumar:2016:ACL,
  author =       "Mrinal Kumar and Gaurav Maheshwari and Jayalal Sarma",
  title =        "Arithmetic Circuit Lower Bounds via Maximum-Rank of
                 Partial Derivative Matrices",
  journal =      j-TOCT,
  volume =       "8",
  number =       "3",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2898437",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed May 25 17:15:05 MDT 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We introduce the polynomial coefficient matrix and
                 identify the maximum rank of this matrix under variable
                 substitution as a complexity measure for multivariate
                 polynomials. We use our techniques to prove
                 super-polynomial lower bounds against several classes
                 of non-multilinear arithmetic circuits. In particular,
                 we obtain the following results: --- As our first main
                 result, we prove that any homogeneous depth-3 circuit
                 for computing the product of d matrices of dimension $
                 n \times $ n requires $ \Omega (n^{d - 1} / 2^d) $
                 size. This improves the lower bounds in Nisan and
                 Wigderson [1995] for $ d = \omega (1) $. --- As our
                 second main result, we show that there is an explicit
                 polynomial on $n$ variables and degree at most $ n / 2$
                 for which any depth-$3$ circuit of product dimension at
                 most $ n / 10$ (dimension of the space of affine forms
                 feeding into each product gate) requires size $
                 2^{\Omega (n)}$. This generalizes the lower bounds
                 against diagonal circuits proved in Saxena [2008].
                 Diagonal circuits are of product dimension $1$. --- We
                 prove a $ n^{ \Omega ( = log n)}$ lower bound on the
                 size of product-sparse formulas. By definition, any
                 multilinear formula is a product-sparse formula. Thus,
                 this result extends the known super-polynomial lower
                 bounds on the size of multilinear formulas [Raz 2006].
                 --- We prove a $ 2^{ \Omega (n)}$ lower bound on the
                 size of partitioned arithmetic branching programs. This
                 result extends the known exponential lower bound on the
                 size of ordered arithmetic branching programs [Jansen
                 2008].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Fulla:2016:GCW,
  author =       "Peter Fulla and Stanislav Zivn{\'y}",
  title =        "A {Galois} Connection for Weighted (Relational) Clones
                 of Infinite Size",
  journal =      j-TOCT,
  volume =       "8",
  number =       "3",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2898438",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed May 25 17:15:05 MDT 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "A Galois connection between clones and relational
                 clones on a fixed finite domain is one of the
                 cornerstones of the so-called algebraic approach to the
                 computational complexity of non-uniform Constraint
                 Satisfaction Problems (CSPs). Cohen et al. established
                 a Galois connection between finitely-generated weighted
                 clones and finitely-generated weighted relational
                 clones [SICOMP'13], and asked whether this connection
                 holds in general. We answer this question in the
                 affirmative for weighted (relational) clones with real
                 weights and show that the complexity of the
                 corresponding valued CSPs is preserved.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Haviv:2016:LDS,
  author =       "Ishay Haviv and Oded Regev",
  title =        "The List-Decoding Size of {Fourier}-Sparse {Boolean}
                 Functions",
  journal =      j-TOCT,
  volume =       "8",
  number =       "3",
  pages =        "10:1--10:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2898439",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed May 25 17:15:05 MDT 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "A function defined on the Boolean hypercube is
                 $k$-Fourier-sparse if it has at most $k$ nonzero
                 Fourier coefficients. For a function $ f : F_2^n \to R$
                 and parameters $k$ and $d$, we prove a strong upper
                 bound on the number of $k$-Fourier-sparse Boolean
                 functions that disagree with $f$ on at most $d$ inputs.
                 Our bound implies that the number of uniform and
                 independent random samples needed for learning the
                 class of $k$-Fourier-sparse Boolean functions on $n$
                 variables exactly is at most $ O (n \cdot k \log k)$.
                 As an application, we prove an upper bound on the query
                 complexity of testing Booleanity of Fourier-sparse
                 functions. Our bound is tight up to a logarithmic
                 factor and quadratically improves on a result due to
                 Gur and Tamuz [2013].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Nguyen:2016:SPN,
  author =       "Dung Nguyen and Alan L. Selman",
  title =        "Structural Properties of Nonautoreducible Sets",
  journal =      j-TOCT,
  volume =       "8",
  number =       "3",
  pages =        "11:1--11:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2898440",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed May 25 17:15:05 MDT 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We investigate autoreducibility properties of complete
                 sets for NEXP under different polynomial-time
                 reductions. Specifically, we show under some
                 polynomial-time reductions that there are complete sets
                 for NEXP that are not autoreducible. We obtain the
                 following main results: -For any positive integers s
                 and k such that 2$^s$ --- 1 {$>$} k, there is a \leq
                 $_{s - T}^p$ -complete set for NEXP that is not \leq
                 $_{k - tt}^p$ -autoreducible. -For every constant c
                 {$>$} 1, there is a \leq $_{2 - T}^p$ -complete set for
                 NEXP that is not autoreducible under nonadaptive
                 reductions that make no more than three queries, such
                 that each of them has a length between n$^{1 / c}$ and
                 n$^c$, where n is input size. -For any positive integer
                 k, there is a \leq $_{k - tt}^p$ -complete set for NEXP
                 that is not autoreducible under \leq $_{k - tt}^p$
                 -reductions whose truth table is not a disjunction or a
                 negated disjunction. Finally, we show that settling the
                 question of whether every \leq $_{dtt}^p$ -complete set
                 for NEXP is \leq $_{NOR - tt}^p$ -autoreducible either
                 positively or negatively would lead to major results
                 about the exponential time complexity classes.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Gobel:2016:CHS,
  author =       "Andreas G{\"o}bel and Leslie Ann Goldberg and David
                 Richerby",
  title =        "Counting Homomorphisms to Square-Free Graphs, Modulo
                 2",
  journal =      j-TOCT,
  volume =       "8",
  number =       "3",
  pages =        "12:1--12:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2898441",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed May 25 17:15:05 MDT 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study the problem $ \oplus $ Homs ToH of counting,
                 modulo 2, the homomorphisms from an input graph to a
                 fixed undirected graph H. A characteristic feature of
                 modular counting is that cancellations make wider
                 classes of instances tractable than is the case for
                 exact (nonmodular) counting; thus, subtle dichotomy
                 theorems can arise. We show the following dichotomy:
                 for any $H$ that contains no 4-cycles, $ \oplus $ Homs
                 To H is either in polynomial time or is $ \oplus $
                 P-complete. This partially confirms a conjecture of
                 Faben and Jerrum that was previously only known to hold
                 for trees and for a restricted class of tree-width-2
                 graphs called cactus graphs. We confirm the conjecture
                 for a rich class of graphs, including graphs of
                 unbounded tree-width. In particular, we focus on
                 square-free graphs, which are graphs without 4-cycles.
                 These graphs arise frequently in combinatorics, for
                 example, in connection with the strong perfect graph
                 theorem and in certain graph algorithms. Previous
                 dichotomy theorems required the graph to be tree-like
                 so that tree-like decompositions could be exploited in
                 the proof. We prove the conjecture for a much richer
                 class of graphs by adopting a much more general
                 approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Caragiannis:2016:LDA,
  author =       "Ioannis Caragiannis and Christos Kaklamanis and Maria
                 Kyropoulou",
  title =        "Limitations of Deterministic Auction Design for
                 Correlated Bidders",
  journal =      j-TOCT,
  volume =       "8",
  number =       "4",
  pages =        "13:1--13:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2934309",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The seminal work of Myerson (Mathematics of OR '81)
                 characterizes incentive-compatible single-item auctions
                 among bidders with independent valuations. In this
                 setting, relatively simple deterministic auction
                 mechanisms achieve revenue optimality. When bidders
                 have correlated valuations, designing the
                 revenue-optimal deterministic auction is a
                 computationally demanding problem; indeed,
                 Papadimitriou and Pierrakos (STOC '11) proved that it
                 is APX-hard, obtaining an explicit inapproximability
                 factor of 1999/2000 = 99.95\%. In the current article,
                 we strengthen this inapproximability factor to 63/64
                 \approx 98.5\%. Our proof is based on a gap-preserving
                 reduction from the M ax-NM 3SAT problem; a variant of
                 the maximum satisfiability problem where each clause
                 has exactly three literals and no clause contains both
                 negated and unnegated literals. We furthermore show
                 that the gap between the revenue of deterministic and
                 randomized auctions can be as low as 13/14 \approx
                 92.9\%, improving an explicit gap of 947/948 \approx
                 99.9\% by Dobzinski, Fu, and Kleinberg (STOC '11).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "13",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Gurjar:2016:PGP,
  author =       "Rohit Gurjar and Arpita Korwar and Jochen Messner and
                 Simon Straub and Thomas Thierauf",
  title =        "Planarizing Gadgets for Perfect Matching Do Not
                 Exist",
  journal =      j-TOCT,
  volume =       "8",
  number =       "4",
  pages =        "14:1--14:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2934310",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "To reduce a graph problem to its planar version, a
                 standard technique is to replace crossings in a drawing
                 of the input graph by planarizing gadgets. We show
                 unconditionally that such a reduction is not possible
                 for the perfect matching problem and also extend this
                 to some other problems related to perfect matching. We
                 further show that there is no planarizing gadget for
                 the Hamiltonian cycle problem.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "14",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ron:2016:PEH,
  author =       "Dana Ron and Gilad Tsur",
  title =        "The Power of an Example: Hidden Set Size Approximation
                 Using Group Queries and Conditional Sampling",
  journal =      j-TOCT,
  volume =       "8",
  number =       "4",
  pages =        "15:1--15:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2930657",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We study a basic problem of approximating the size of
                 an unknown set S in a known universe U. We consider two
                 versions of the problem. In both versions, the
                 algorithm can specify subsets T \subseteq U. In the
                 first version, which we refer to as the group query or
                 subset query version, the algorithm is told whether T
                 \cap S is nonempty. In the second version, which we
                 refer to as the subset sampling version, if T \cap S is
                 nonempty, then the algorithm receives a uniformly
                 selected element from T \cap S. We study the difference
                 between these two versions in both the case that the
                 algorithm is adaptive and the case in which it is
                 nonadaptive. Our main focus is on a natural family of
                 allowed subsets, which correspond to intervals, as well
                 as variants of this family.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "15",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Gal:2016:SEA,
  author =       "Anna G{\'a}l and Jing-Tang Jang and Nutan Limaye and
                 Meena Mahajan and Karteek Sreenivasaiah",
  title =        "Space-Efficient Approximations for Subset Sum",
  journal =      j-TOCT,
  volume =       "8",
  number =       "4",
  pages =        "16:1--16:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2894843",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "SubsetSum is a well-known NP-complete problem: given $
                 t \in Z^+ $ and a set $S$ of $m$ positive integers,
                 output YES if and only if there is a subset $ S'
                 \subseteq S$ such that the sum of all numbers in $ S'$
                 equals $t$. The problem and its search and optimization
                 versions are known to be solvable in pseudopolynomial
                 time in general. We develop a one-pass deterministic
                 streaming algorithm that uses space $ O(\frac {\log
                 t}{\epsilon })$ and decides if some subset of the input
                 stream adds up to a value in the range $ \{ (1 \pm
                 \epsilon) t \} $. Using this algorithm, we design
                 space-efficient fully polynomial-time approximation
                 schemes (FPTAS) solving the search and optimization
                 versions of SubsetSum. Our algorithms run in $ O(\frac
                 {1}{\epsilon m^2})$ time and $ O(\frac {1}{\epsilon })$
                 space on unit-cost RAMs, where $ 1 + \epsilon $ is the
                 approximation factor. This implies constant space
                 quadratic time FPTAS on unit-cost RAMs when \epsilon is
                 a constant. Previous FPTAS used space linear in $m$. In
                 addition, we show that on certain inputs, when a
                 solution is located within a short prefix of the input
                 sequence, our algorithms may run in sublinear time. We
                 apply our techniques to the problem of finding balanced
                 separators, and we extend our results to some other
                 variants of the more general knapsack problem. When the
                 input numbers are encoded in unary, the decision
                 version has been known to be in $ \log $ space. We give
                 streaming space lower and upper bounds for unary
                 SubsetSum (USS). If the input length is $N$ when the
                 numbers are encoded in unary, we show that randomized s
                 pass streaming algorithms for exact SubsetSum need
                 space $ \Omega (\frac {\sqrt {N}}{s})$ and give a
                 simple deterministic two-pass streaming algorithm using
                 $ O(\sqrt {N \log N})$ space. Finally, we formulate an
                 encoding under which USS is monotone and show that the
                 exact and approximate versions in this formulation have
                 monotone $ O(\log^2 t)$ depth Boolean circuits. We also
                 show that any circuit using $ \epsilon $-approximator
                 gates for SubsetSum under this encoding needs $ \Omega
                 (n / \log n)$ gates to compute the disjointness
                 function.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "16",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Lauria:2016:RLB,
  author =       "Massimo Lauria",
  title =        "A Rank Lower Bound for Cutting Planes Proofs of
                 {Ramsey's Theorem}",
  journal =      j-TOCT,
  volume =       "8",
  number =       "4",
  pages =        "17:1--17:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2903266",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Ramsey's Theorem is a cornerstone of combinatorics and
                 logic. In its simplest formulation it says that for
                 every $ k > 0 $ and $ s > 0 $, there is a minimum
                 number $ r(k, s) $ such that any simple graph with at
                 least $ r (k, s) $ vertices contains either a clique of
                 size $k$ or an independent set of size $s$. We study
                 the complexity of proving upper bounds for the number $
                 r (k, k)$. In particular, we focus on the propositional
                 proof system cutting planes; we show that any cutting
                 plane proof of the upper bound ``$ r (k, k) \leq 4^k$''
                 requires high rank. In order to do that we show a
                 protection lemma which could be of independent
                 interest.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "17",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Viola:2016:QMH,
  author =       "Emanuele Viola",
  title =        "Quadratic Maps Are Hard to Sample",
  journal =      j-TOCT,
  volume =       "8",
  number =       "4",
  pages =        "18:1--18:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2934308",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "This note proves the existence of a quadratic GF(2)
                 map $ p \colon \{ 0, 1 \}^n \to \{ 0, 1 \} $ such that
                 no constant-depth circuit of size $ \poly (n) $ can
                 sample the distribution $ (u, p(u)) $ for uniform
                 $u$.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "18",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Hrubes:2016:HMV,
  author =       "P. Hrubes",
  title =        "On Hardness of Multilinearization and
                 {VNP}-Completeness in Characteristic $2$",
  journal =      j-TOCT,
  volume =       "9",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2940323",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "For a Boolean function $ f \colon \{ 0, 1 \}^n \to \{
                 0, 1 \} $, let $ \fcirc $ be the unique multilinear
                 polynomial such that $ f(x) = \fcirc (x) $ holds for
                 every $ x \in \{ 0, 1 \}^n $. We show that, assuming VP
                 /= VNP, there exists a polynomial-time computable $f$
                 such that $ \fcirc $ requires superpolynomial
                 arithmetic circuits. In fact, this $f$ can be taken as
                 a monotone 2-CNF, or a product of affine functions.
                 This holds over any field. To prove the results in
                 characteristic 2, we design new VNP-complete families
                 in this characteristic. This includes the polynomial
                 EC$_n$ counting edge covers in a graph and the
                 polynomial mclique$_n$ counting cliques in a graph with
                 deleted perfect matching. They both correspond to
                 polynomial-time decidable problems, a phenomenon
                 previously encountered only in characteristic /= 2.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Krebs:2016:SDP,
  author =       "Andreas Krebs and Nutan Limaye and Meena Mahajan and
                 Karteek Sreenivasaiah",
  title =        "Small Depth Proof Systems",
  journal =      j-TOCT,
  volume =       "9",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2956229",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "A proof system for a language L is a function f such
                 that Range( f ) is exactly L. In this article, we look
                 at proof systems from a circuit complexity point of
                 view and study proof systems that are computationally
                 very restricted. The restriction we study is proof
                 systems that can be computed by bounded fanin circuits
                 of constant depth (NC$^0$ ) or of O (log \log n ) depth
                 but with O (1) alternations (poly \log AC$^0$ ). Each
                 output bit depends on very few input bits; thus such
                 proof systems correspond to a kind of local error
                 correction on a theorem-proof pair. We identify exactly
                 how much power we need for proof systems to capture all
                 regular languages. We show that all regular languages
                 have poly \log AC$^0$ proof systems, and from a
                 previous result (Beyersdorff et al. [2011a], where
                 NC$^0$ proof systems were first introduced), this is
                 tight. Our technique also shows that M aj has poly \log
                 AC$^0$ proof system. We explore the question of whether
                 T aut has NC$^0$ proof systems. Addressing this
                 question about 2TAUT, and since 2TAUT is closely
                 related to reachability in graphs, we ask the same
                 question about Reachability. We show that if Directed
                 reachability has NC$^0$ proof systems, then so does
                 2TAUT. We then show that both Undirected Reachability
                 and Directed UnReachability have NC$^0$ proof systems,
                 but Directed Reachability is still open. In the context
                 of how much power is needed for proof systems for
                 languages in NP, we observe that proof systems for a
                 good fraction of languages in NP do not need the full
                 power of AC$^0$; they have SAC$^0$ or coSAC$^0$ proof
                 systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Arvind:2016:NVC,
  author =       "V. Arvind and P. S. Joglekar and S. Raja",
  title =        "Noncommutative {Valiant}'s Classes: Structure and
                 Complete Problems",
  journal =      j-TOCT,
  volume =       "9",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2956230",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "In this article, we explore the noncommutative
                 analogues, VP$_{nc}$ and VNP$_{nc}$, of Valiant's
                 algebraic complexity classes and show some striking
                 connections to classical formal language theory. Our
                 main results are the following: --- We show that Dyck
                 polynomials (defined from the Dyck languages of formal
                 language theory) are complete for the class VP$_{nc}$
                 under $ \leq_{abp}$ reductions. To the best of our
                 knowledge, these are the first natural polynomial
                 families shown to be VP$_{nc}$ -complete. Likewise, it
                 turns out that PAL (palindrome polynomials defined from
                 palindromes) are complete for the class VSKEW$_{nc}$
                 (defined by polynomial-size skew circuits) under $
                 \leq_{abp}$ reductions. The proof of these results is
                 by suitably adapting the classical
                 Chomsky--Sch{\"u}tzenberger theorem showing that Dyck
                 languages are the hardest CFLs. --- Assuming that
                 VP$_{nc}$ /= VNP$_{nc}$, we exhibit a strictly infinite
                 hierarchy of $p$-families, with respect to the
                 projection reducibility, between the complexity classes
                 VP$_{nc}$ and VNP$_{nc}$ (analogous to Ladner's theorem
                 [Ladner 1975]). --- Additionally, inside VP$_{nc}$, we
                 show that there is a strict hierarchy of p-families
                 (based on the nesting depth of Dyck polynomials) with
                 respect to the $ \leq_{abp}$ reducibility (defined
                 explicitly in this article).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Fontes:2016:RDD,
  author =       "Lila Fontes and Rahul Jain and Iordanis Kerenidis and
                 Sophie Laplante and Mathieu Lauri{\`e}re and
                 J{\'e}r{\'e}mie Roland",
  title =        "Relative Discrepancy Does Not Separate Information and
                 Communication Complexity",
  journal =      j-TOCT,
  volume =       "9",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2967605",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Does the information complexity of a function equal
                 its communication complexity? We examine whether any
                 currently known techniques might be used to show a
                 separation between the two notions. Ganor et al. [2014]
                 recently provided such a separation in the
                 distributional case for a specific input distribution.
                 We show that in the non-distributional setting, the
                 relative discrepancy bound is smaller than the
                 information complexity; hence, it cannot separate
                 information and communication complexity. In addition,
                 in the distributional case, we provide a linear program
                 formulation for relative discrepancy and relate it to
                 variants of the partition bound, resolving also an open
                 question regarding the relation of the partition bound
                 and information complexity. Last, we prove the
                 equivalence between the adaptive relative discrepancy
                 and the public-coin partition, implying that the
                 logarithm of the adaptive relative discrepancy bound is
                 quadratically tight with respect to communication.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Beame:2016:NAF,
  author =       "Paul Beame and Nathan Grosshans and Pierre McKenzie
                 and Luc Segoufin",
  title =        "Nondeterminism and An Abstract Formulation of
                 {Neciporuk}'s Lower Bound Method",
  journal =      j-TOCT,
  volume =       "9",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3013516",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Dec 26 17:25:10 MST 2016",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "A formulation of Neciporuk's lower bound method
                 slightly more inclusive than the usual
                 complexity-measure-specific formulation is presented.
                 Using this general formulation, limitations to lower
                 bounds achievable by the method are obtained for
                 several computation models, such as branching programs
                 and Boolean formulas having access to a sublinear
                 number of nondeterministic bits. In particular, it is
                 shown that any lower bound achievable by the method of
                 Neciporuk for the size of nondeterministic and parity
                 branching programs is at most $ O(n^{3 / 2} / \log n)
                 $.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Artemenko:2017:PGO,
  author =       "Sergei Artemenko and Ronen Shaltiel",
  title =        "Pseudorandom Generators with Optimal Seed Length for
                 Non-{Boolean} Poly-Size Circuits",
  journal =      j-TOCT,
  volume =       "9",
  number =       "2",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3018057",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jul 24 17:35:50 MDT 2017",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 https://www.math.utah.edu/pub/tex/bib/prng.bib;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "A sampling procedure for a distribution $P$ over $ \{
                 0, 1 \}^l$ is a function $ C : \{ 0, 1 \}^n \to \{ 0, 1
                 \}^l$ such that the distribution $ C(U_n)$ (obtained by
                 applying $C$ on the uniform distribution $ U_n$) is the
                 ``desired distribution'' $P$. Let $ n > r \geq l =
                 n^{\Omega (1)}$. An $ \epsilon - n b$-PRG (defined by
                 Dubrov and Ishai [2006]) is a function $ G : \{ 0, 1
                 \}^r \to \{ 0, 1 \}^n$ such that for every $ C : \{ 0,
                 1 \}^n \to \{ 0, 1 \}^l$ in some class of ``interesting
                 sampling procedures,'' '$ C(U_r) = C(G (U_r))$ is $
                 \epsilon $-close to $ C(U_n)$ in statistical distance.
                 We construct poly-time computable nb-PRGs with $ r = O
                 (l)$ for poly-size circuits relying on the assumption
                 that there exists $ \beta > 0$ and a problem $L$ in $ E
                 = {\rm DTIME}(2^{O(n)})$ such that for every large
                 enough n, nondeterministic circuits of size $ 2^{ \beta
                 n}$ that have NP-gates cannot solve $L$ on inputs of
                 length $n$. This assumption is a scaled nonuniform
                 analog of (the widely believed) EXP /= $ \Sigma_2^P$,
                 and similar assumptions appear in various contexts in
                 derandomization. Previous nb-PRGs of Dubrov and Ishai
                 have $ r = \Omega (l^2)$ and are based on very strong
                 cryptographic assumptions or, alternatively, on
                 nonstandard assumptions regarding incompressibility of
                 functions on random inputs. When restricting to
                 poly-size circuits $ C : \{ 0, 1 \}^n \to \{ 0, 1 \}^l$
                 with Shannon entropy $ H(C(U_n)) \leq k$, for $ l > k =
                 n^{\Omega (1)}$, our nb-PRGs have $ r = O (k)$. The
                 nb-PRGs of Dubrov and Ishai use seed length $ r =
                 \Omega (k^2)$ and require that the probability
                 distribution of $ C(U_n)$ is efficiently computable.
                 Our nb-PRGs follow from a notion of ``conditional
                 PRGs,'' which may be of independent interest. These are
                 PRGs where $ G(U_r)$ remains pseudorandom even when
                 conditioned on a ``large'' event $ \{ A(G(U_r)) = 1 \}
                 $, for an arbitrary poly-size circuit $A$. A related
                 notion was considered by Shaltiel and Umans [2005] in a
                 different setting, and our proofs use ideas from that
                 paper, as well as ideas of Dubrov and Ishai. We also
                 give an unconditional construction of poly-time
                 computable nb-PRGs for $ \poly (n)$-size, depth $d$
                 circuits $ C : \{ 0, 1 \}^n \to \{ 0, 1 \}^l$ with $ r
                 = O(l \cdot \log^{d + O (1)} n)$. This improves upon
                 the previous work of Dubrov and Ishai that has $ r \geq
                 l^2$. This result follows by adapting a recent PRG
                 construction of Trevisan and Xue [2013] to the case of
                 nb-PRGs. We also show that this PRG can be implemented
                 by a uniform family of constant-depth circuits with
                 slightly increased seed length.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Bhattacharyya:2017:LBC,
  author =       "Arnab Bhattacharyya and Sivakanth Gopi",
  title =        "Lower Bounds for Constant Query Affine-Invariant
                 {LCCs} and {LTCs}",
  journal =      j-TOCT,
  volume =       "9",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3016802",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jul 24 17:35:50 MDT 2017",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Affine-invariant codes are codes whose coordinates
                 form a vector space over a finite field and which are
                 invariant under affine transformations of the
                 coordinate space. They form a natural, well-studied
                 class of codes; they include popular codes such as
                 Reed--Muller and Reed--Solomon. A particularly
                 appealing feature of affine-invariant codes is that
                 they seem well suited to admit local correctors and
                 testers. In this work, we give lower bounds on the
                 length of locally correctable and locally testable
                 affine-invariant codes with constant query complexity.
                 We show that if a code $ C \subset \Sigma^{K n} $ is an
                 $r$-query affine invariant locally correctable code
                 (LCC), where $K$ is a finite field and $ \Sigma $ is a
                 finite alphabet, then the number of codewords in $C$ is
                 at most $ \exp (O_{K, r, | \Sigma |}(n^{r - 1}))$.
                 Also, we show that if $ C \subset \Sigma^K n$ is an
                 $r$-query affine invariant locally testable code (LTC),
                 then the number of codewords in C is at most $ \exp
                 (O_{K, r, | \Sigma |} (n^{r - 2}))$. The dependence on
                 n in these bounds is tight for constant-query
                 LCCs/LTCs, since Guo, Kopparty, and Sudan (ITCS'13)
                 constructed affine-invariant codes via lifting that
                 have the same asymptotic tradeoffs. Note that our
                 result holds for non-linear codes, whereas previously,
                 Ben-Sasson and Sudan (RANDOM'11) assumed linearity to
                 derive similar results. Our analysis uses higher-order
                 Fourier analysis. In particular, we show that the
                 codewords corresponding to an affine-invariant LCC/LTC
                 must be far from each other with respect to Gowers norm
                 of an appropriate order. This then allows us to bound
                 the number of codewords, using known decomposition
                 theorems, which approximate any bounded function in
                 terms of a finite number of low-degree non-classical
                 polynomials, up to a small error in the Gowers norm.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Gurjar:2017:EPM,
  author =       "Rohit Gurjar and Arpita Korwar and Jochen Messner and
                 Thomas Thierauf",
  title =        "Exact Perfect Matching in Complete Graphs",
  journal =      j-TOCT,
  volume =       "9",
  number =       "2",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3041402",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jul 24 17:35:50 MDT 2017",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "A red-blue graph is a graph where every edge is
                 colored either red or blue. The exact perfect matching
                 problem asks for a perfect matching in a red-blue graph
                 that has exactly a given number of red edges. We show
                 that for complete and bipartite complete graphs, the
                 exact perfect matching problem is logspace equivalent
                 to the perfect matching problem. Hence, an efficient
                 parallel algorithm for perfect matching would carry
                 over to the exact perfect matching problem for this
                 class of graphs. We also report some progress in
                 extending the result to arbitrary graphs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Galanis:2017:CTA,
  author =       "Andreas Galanis and Leslie Ann Goldberg and Mark
                 Jerrum",
  title =        "A Complexity Trichotomy for Approximately Counting
                 List {$H$}-Colorings",
  journal =      j-TOCT,
  volume =       "9",
  number =       "2",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3037381",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jul 24 17:35:50 MDT 2017",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We examine the computational complexity of
                 approximately counting the list H -colorings of a
                 graph. We discover a natural graph-theoretic trichotomy
                 based on the structure of the graph H. If H is an
                 irreflexive bipartite graph or a reflexive complete
                 graph, then counting list H -colorings is trivially in
                 polynomial time. Otherwise, if H is an irreflexive
                 bipartite permutation graph or a reflexive proper
                 interval graph, then approximately counting list H
                 -colorings is equivalent to \#BIS, the problem of
                 approximately counting independent sets in a bipartite
                 graph. This is a well-studied problem that is believed
                 to be of intermediate complexity-it is believed that it
                 does not have an FPRAS, but that it is not as difficult
                 as approximating the most difficult counting problems
                 in \#P. For every other graph H, approximately counting
                 list H -colorings is complete for \#P with respect to
                 approximation-preserving reductions (so there is no
                 FPRAS unless NP = RP). Two pleasing features of the
                 trichotomy are (1) it has a natural formulation in
                 terms of hereditary graph classes, and (2) the proof is
                 largely self-contained and does not require any
                 universal algebra (unlike similar dichotomies in the
                 weighted case). We are able to extend the hardness
                 results to the bounded-degree setting, showing that all
                 hardness results apply to input graphs with maximum
                 degree at most 6.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Dhayal:2017:MMP,
  author =       "Anant Dhayal and Jayalal Sarma and Saurabh Sawlani",
  title =        "Min\slash Max-Poly Weighting Schemes and the {NL}
                 versus {UL} Problem",
  journal =      j-TOCT,
  volume =       "9",
  number =       "2",
  pages =        "10:1--10:??",
  month =        may,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3070902",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jul 24 17:35:50 MDT 2017",
  bibsource =    "http://www.acm.org/pubs/contents/journals/toct/;
                 https://www.math.utah.edu/pub/tex/bib/hash.bib;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "For a graph $ G (V, E) (| V | = n) $ and a vertex $ s
                 \in V $, a weighting scheme $ (W : E \mapsto Z^+) $ is
                 called a min-unique (resp. max-unique) weighting scheme
                 if, for any vertex $v$ of the graph $G$, there is a
                 unique path of minimum (resp. maximum) weight from $s$
                 to $v$, where weight of a path is the sum of the
                 weights assigned to the edges. Instead, if the number
                 of paths of minimum (resp. maximum) weight is bounded
                 by $ n^c$ for some constant $c$, then the weighting
                 scheme is called a min-poly (resp. max-poly) weighting
                 scheme. In this article, we propose an unambiguous
                 nondeterministic log-space (UL) algorithm for the
                 problem of testing reachability graphs augmented with a
                 min-poly weighting scheme. This improves the result in
                 Reinhardt and Allender [2000], in which a UL algorithm
                 was given for the case when the weighting scheme is
                 min-unique. Our main technique involves triple
                 inductive counting and generalizes the techniques of
                 Immerman [1988], Szelepcs{\'e}nyi [1988], and Reinhardt
                 and Allender [2000], combined with a hashing technique
                 due to Fredman et al. [1984] (also used in Garvin et
                 al. [2014]). We combine this with a complementary
                 unambiguous verification method to give the desired UL
                 algorithm. At the other end of the spectrum, we propose
                 a UL algorithm for testing reachability in layered DAGs
                 augmented with max-poly weighting schemes. To achieve
                 this, we first reduce reachability in layered DAGs to
                 the longest path problem for DAGs with a unique source,
                 such that the reduction also preserves the max-unique
                 and max-poly properties of the graph. Using our
                 techniques, we generalize the double inductive counting
                 method in Limaye et al. [2009], in which the UL
                 algorithm was given for the longest path problem on
                 DAGs with a unique sink and augmented with a max-unique
                 weighting scheme. An important consequence of our
                 results is that, to show NL = UL, it suffices to design
                 log-space computable min-poly (or max-poly) weighting
                 schemes for layered DAGs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Chen:2017:AMC,
  author =       "Hubie Chen and Benoit Larose",
  title =        "Asking the Metaquestions in Constraint Tractability",
  journal =      j-TOCT,
  volume =       "9",
  number =       "3",
  pages =        "11:1--11:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3134757",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jan 22 09:30:15 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The constraint satisfaction problem (CSP) involves
                 deciding, given a set of variables and a set of
                 constraints on the variables, whether or not there is
                 an assignment to the variables satisfying all of the
                 constraints. One formulation of the CSP is as the
                 problem of deciding, given a pair (G H) of relational
                 structures, whether or not there is a homomorphism from
                 the first structure to the second structure. The CSP is
                 generally NP-hard; a common way to restrict this
                 problem is to fix the second structure H so that each
                 structure H gives rise to a problem CSP(H). The problem
                 family CSP(H) has been studied using an algebraic
                 approach, which links the algorithmic and complexity
                 properties of each problem CSP(H) to a set of
                 operations, the so-called polymorphisms of H. Certain
                 types of polymorphisms are known to imply the
                 polynomial-time tractability of CSP(H), and others are
                 conjectured to do so. This article systematically
                 studies-for various classes of polymorphisms-the
                 computational complexity of deciding whether or not a
                 given structure H admits a polymorphism from the class.
                 Among other results, we prove the NP-completeness of
                 deciding a condition conjectured to characterize the
                 tractable problems CSP(H), as well as the
                 NP-completeness of deciding if CSP(H) has bounded
                 width.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Elberfeld:2017:CGB,
  author =       "Michael Elberfeld and Pascal Schweitzer",
  title =        "Canonizing Graphs of Bounded Tree Width in Logspace",
  journal =      j-TOCT,
  volume =       "9",
  number =       "3",
  pages =        "12:1--12:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3132720",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jan 22 09:30:15 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Graph canonization is the problem of computing a
                 unique representative, a canon, from the isomorphism
                 class of a given graph. This implies that two graphs
                 are isomorphic exactly if their canons are equal. We
                 show that graphs of bounded tree width can be canonized
                 by logarithmic-space (logspace) algorithms. This
                 implies that the isomorphism problem for graphs of
                 bounded tree width can be decided in logspace. In the
                 light of isomorphism for trees being hard for the
                 complexity class logspace, this makes the ubiquitous
                 class of graphs of bounded tree width one of the few
                 classes of graphs for which the complexity of the
                 isomorphism problem has been exactly determined.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Schmid:2017:FCS,
  author =       "Markus L. Schmid",
  title =        "Finding Consensus Strings with Small Length Difference
                 between Input and Solution Strings",
  journal =      j-TOCT,
  volume =       "9",
  number =       "3",
  pages =        "13:1--13:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3110290",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jan 22 09:30:15 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The Closest Substring Problem is to decide, for given
                 strings $ s_1 $, \ldots{}, $ s_k $ of length at most
                 $l$ and numbers $m$ and $d$, whether there is a
                 length-$m$ string $s$ and length-$m$ substrings $
                 s^'_i$ of s$_i$, such that $s$ has a Hamming distance
                 of at most $d$ from each $ s^'_i$. If we instead
                 require the sum of all the Hamming distances between
                 $s$ and each $ s^'_i$ to be bounded by $d$, then it is
                 called the Consensus Patterns Problem. We contribute to
                 the parameterised complexity analysis of these
                 classical NP-hard string problems by investigating the
                 parameter ($ l - m$), i.e., the length difference
                 between input and solution strings. For most
                 combinations of ($ l - m$) and one of the classical
                 parameters ($m$, $l$, $k$, or $d$), we obtain
                 fixed-parameter tractability. However, even for
                 constant ($ l - m$) and constant alphabet size, both
                 problems remain NP-hard. While this follows from known
                 results with respect to the Closest Substring, we need
                 a new reduction in the case of the Consensus Patterns.
                 As a by-product of this reduction, we obtain an exact
                 exponential-time algorithm for both problems, which is
                 based on an alphabet reduction.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "13",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Adamaszek:2017:HAS,
  author =       "Anna Adamaszek and Tomasz Kociumaka and Marcin
                 Pilipczuk and Michal Pilipczuk",
  title =        "Hardness of Approximation for Strip Packing",
  journal =      j-TOCT,
  volume =       "9",
  number =       "3",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3092026",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jan 22 09:30:15 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Strip packing is a classical packing problem, where
                 the goal is to pack a set of rectangular objects into a
                 strip of a given width, while minimizing the total
                 height of the packing. The problem has multiple
                 applications, for example, in scheduling and
                 stock-cutting, and has been studied extensively. When
                 the dimensions of the objects are allowed to be
                 exponential in the total input size, it is known that
                 the problem cannot be approximated within a factor
                 better than 3/2, unless P = NP. However, there was no
                 corresponding lower bound for polynomially bounded
                 input data. In fact, Nadiradze and Wiese [SODA 2016]
                 have recently proposed a $ (1.4 +
                 \epsilon)$-approximation algorithm for this variant,
                 thus showing that strip packing with polynomially
                 bounded data can be approximated better than when
                 exponentially large values are allowed in the input.
                 Their result has subsequently been improved to a $ (4 /
                 3 + \epsilon)$-approximation by two independent
                 research groups [FSTTCS 2016, WALCOM 2017]. This raises
                 a question whether strip packing with polynomially
                 bounded input data admits a quasi-polynomial time
                 approximation scheme, as is the case for related
                 two-dimensional packing problems like maximum
                 independent set of rectangles or two-dimensional
                 knapsack. In this article, we answer this question in
                 negative by proving that it is NP-hard to approximate
                 strip packing within a factor better than 12/11, even
                 when restricted to polynomially bounded input data. In
                 particular, this shows that the strip packing problem
                 admits no quasi-polynomial time approximation scheme,
                 unless NP $ \subseteq $ DTIME($ 2^{\polylog (n)}$).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "14",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Chen:2017:PCM,
  author =       "Hubie Chen",
  title =        "Proof Complexity Modulo the Polynomial Hierarchy:
                 Understanding Alternation as a Source of Hardness",
  journal =      j-TOCT,
  volume =       "9",
  number =       "3",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3087534",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jan 22 09:30:15 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We present and study a framework in which one can
                 present alternation-based lower bounds on proof length
                 in proof systems for quantified Boolean formulas. A key
                 notion in this framework is that of proof system
                 ensemble, which is (essentially) a sequence of proof
                 systems where, for each, proof checking can be
                 performed in the polynomial hierarchy. We introduce a
                 proof system ensemble called relaxing QU-res that is
                 based on the established proof system QU-resolution.
                 Our main results include an exponential separation of
                 the treelike and general versions of relaxing QU-res
                 and an exponential lower bound for relaxing QU-res;
                 these are analogs of classical results in propositional
                 proof complexity.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "15",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Iwama:2018:PT,
  author =       "Kazuo Iwama and Yuichi Yoshida",
  title =        "Parameterized Testability",
  journal =      j-TOCT,
  volume =       "9",
  number =       "4",
  pages =        "16:1--16:??",
  month =        jan,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3155294",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jan 22 09:30:15 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "This article studies property testing for NP
                 optimization problems with parameter $k$ under the
                 general graph model with an augmentation of random edge
                 sampling capability. It is shown that a variety of such
                 problems, including $k$-Vertex Cover, $k$-Feedback
                 Vertex Set, $k$-Multicut, $k$-Path-Free, and
                 $k$-Dominating Set, are constant-query testable if $k$
                 is constant. It should be noted that the first four
                 problems are fixed parameter tractable (FPT) and it
                 turns out that algorithmic techniques for their FPT
                 algorithms (branch-and-bound search, color coding,
                 etc.) are also useful for our testers. $k$-Dominating
                 Set is $ W [2]$-hard, but we can still test the
                 property with a constant number of queries, since the
                 definition of $ \epsilon $-farness makes the problem
                 trivial for non-sparse graphs that are the source of
                 hardness for the original optimization problem. We also
                 consider $k$-Odd Cycle Transversal, which is another
                 well-known FPT problem, but we only give a
                 sublinear-query tester when $k$ is a constant.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "16",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Pallavoor:2018:PPT,
  author =       "Ramesh Krishnan S. Pallavoor and Sofya Raskhodnikova
                 and Nithin Varma",
  title =        "Parameterized Property Testing of Functions",
  journal =      j-TOCT,
  volume =       "9",
  number =       "4",
  pages =        "17:1--17:??",
  month =        jan,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3155296",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jan 22 09:30:15 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We investigate the parameters in terms of which the
                 complexity of sublinear-time algorithms should be
                 expressed. Our goal is to find input parameters that
                 are tailored to the combinatorics of the specific
                 problem being studied and design algorithms that run
                 faster when these parameters are small. This direction
                 enables us to surpass the (worst-case) lower bounds,
                 expressed in terms of the input size, for several
                 problems. Our aim is to develop a similar level of
                 understanding of the complexity of sublinear-time
                 algorithms to the one that was enabled by research in
                 parameterized complexity for classical algorithms.
                 Specifically, we focus on testing properties of
                 functions. By parameterizing the query complexity in
                 terms of the size r of the image of the input function,
                 we obtain testers for monotonicity and convexity of
                 functions of the form $ f \colon [n] \to R $ with query
                 complexity $ O(\log r) $, with no dependence on $n$.
                 The result for monotonicity circumvents the $ \Omega
                 (\log n)$ lower bound by Fischer (Inf. Comput. 2004)
                 for this problem. We present several other
                 parameterized testers, providing compelling evidence
                 that expressing the query complexity of property
                 testers in terms of the input size is not always the
                 best choice.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "17",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Pilipczuk:2018:SEA,
  author =       "Michal Pilipczuk and Marcin Wrochna",
  title =        "On Space Efficiency of Algorithms Working on
                 Structural Decompositions of Graphs",
  journal =      j-TOCT,
  volume =       "9",
  number =       "4",
  pages =        "18:1--18:??",
  month =        jan,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3154856",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jan 22 09:30:15 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Dynamic programming on path and tree decompositions of
                 graphs is a technique that is ubiquitous in the field
                 of parameterized and exponential-time algorithms.
                 However, one of its drawbacks is that the space usage
                 is exponential in the decomposition's width. Following
                 the work of Allender et al. [5], we investigate whether
                 this space complexity explosion is unavoidable. Using
                 the idea of reparameterization of Cai and Juedes [18],
                 we prove that the question is closely related to a
                 conjecture that the Longest Common Subsequence problem
                 parameterized by the number of input strings does not
                 admit an algorithm that simultaneously uses XP time and
                 FPT space. Moreover, we extend the complexity landscape
                 sketched for pathwidth and treewidth by Allender et al.
                 by considering the parameter tree-depth. We prove that
                 computations on tree-depth decompositions correspond to
                 a model of non-deterministic machines that work in
                 polynomial time and logarithmic space, with access to
                 an auxiliary stack of maximum height equal to the
                 decomposition's depth. Together with the results of
                 Allender et al., this describes a hierarchy of
                 complexity classes for polynomial-time
                 non-deterministic machines with different restrictions
                 on the access to working space, which mirrors the
                 classic relations between treewidth, pathwidth, and
                 tree-depth.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "18",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Chakraborty:2018:SPT,
  author =       "Diptarka Chakraborty and Raghunath Tewari",
  title =        "An {$ O(n \epsilon) $} Space and Polynomial Time
                 Algorithm for Reachability in Directed Layered Planar
                 Graphs",
  journal =      j-TOCT,
  volume =       "9",
  number =       "4",
  pages =        "19:1--19:??",
  month =        jan,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3154857",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jan 22 09:30:15 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Given a graph $G$ and two vertices $s$ and $t$ in it,
                 graph reachability is the problem of checking whether
                 there exists a path from $s$ to $t$ in $G$. We show
                 that reachability in directed layered planar graphs can
                 be decided in polynomial time and $ O(n^\epsilon)$
                 space, for any $ \epsilon > 0$. The previous best-known
                 space bound for this problem with polynomial time was
                 approximately $ O(\sqrt n)$ space (Imai et al. 2013).
                 Deciding graph reachability in SC (Steve's class) is an
                 important open question in complexity theory, and in
                 this article, we make progress toward resolving this
                 question.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "19",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Chang:2018:MMS,
  author =       "Ching-Lueh Chang",
  title =        "Metric $1$-Median Selection: Query Complexity vs.
                 Approximation Ratio",
  journal =      j-TOCT,
  volume =       "9",
  number =       "4",
  pages =        "20:1--20:??",
  month =        jan,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3154858",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jan 22 09:30:15 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Consider the problem of finding a point in a metric
                 space $ (\{ 1, 2, \ldots, n \}, d) $ with the minimum
                 average distance to other points. We show that this
                 problem has no deterministic $ o(n^{1 + 1 / (h - 1)} /
                 h)$-query $ 2 h c (1 - \epsilon)$-approximation
                 algorithms for any constant $ \epsilon > 0$ and any $ h
                 = h (n) \in Z^+ \backslash \{ 1 \} $ satisfying $ h = o
                 (n 1 / (h - 1))$. Combining our result with existing
                 ones, we determine the best approximation ratio
                 achievable by deterministic $ O(n^{1 + \epsilon
                 })$-query (respectively, $ O(n^{1 + \epsilon })$-time)
                 algorithms to be $ 2 \lceil 1 / \epsilon \rceil $, for
                 all constants $ \epsilon \in (0, 1)$.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "20",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Aydinlioglu:2018:ARU,
  author =       "Baris Aydinlioglu and Eric Bach",
  title =        "Affine Relativization: Unifying the Algebrization and
                 Relativization Barriers",
  journal =      j-TOCT,
  volume =       "10",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3170704",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:49 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We strengthen existing evidence for the so-called
                 ``algebrization barrier.'' Algebrization-short for
                 algebraic relativization-was introduced by Aaronson and
                 Wigderson (AW) (STOC 2008) to characterize proofs
                 involving arithmetization, simulation, and other
                 ``current techniques.'' However, unlike relativization,
                 eligible statements under this notion do not seem to
                 have basic closure properties, making it conceivable to
                 take two proofs, both with algebrizing conclusions, and
                 combine them to get a proof without. Further, the
                 notion is undefined for most types of statements and
                 does not seem to yield a general criterion by which we
                 can tell, given a proof, whether it algebrizes. In
                 fact, the very notion of an algebrizing proof is never
                 made explicit, and casual attempts to define it are
                 problematic. All these issues raise the question of
                 what evidence, if any, is obtained by knowing whether
                 some statement does or does not algebrize. We give a
                 reformulation of algebrization without these
                 shortcomings. First, we define what it means for any
                 statement/proof to hold relative to any language, with
                 no need to refer to devices like a Turing machine with
                 an oracle tape. Our approach dispels the widespread
                 misconception that the notion of oracle access is
                 inherently tied to a computational model. We also
                 connect relativizing statements to proofs, by showing
                 that every proof that some statement relativizes is
                 essentially a relativizing proof of that statement. We
                 then define a statement/proof as relativizing affinely
                 if it holds relative to every affine oracle -here an
                 affine oracle is the result of a particular error
                 correcting code applied to the characteristic string of
                 a language. We show that every statement that AW
                 declare as algebrizing does relativize affinely, in
                 fact, has a proof that relativizes affinely, and that
                 no such proof exists for any of the statements shown
                 not-algebrizing by AW in the classical computation
                 model. Our work complements, and goes beyond, the
                 subsequent work by Impagliazzo, Kabanets, and
                 Kolokolova (STOC 2009), which also proposes a
                 reformulation of algebrization, but falls short of
                 recovering some key results of AW, most notably
                 regarding the NEXP versus P/poly question. Using our
                 definitions, we obtain new streamlined proofs of
                 several classic results in complexity, including PSPACE
                 \subset IP and NEXP \subset MIP. This may be of
                 separate interest.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Watson:2018:CCS,
  author =       "Thomas Watson",
  title =        "Communication Complexity of Statistical Distance",
  journal =      j-TOCT,
  volume =       "10",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3170708",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:49 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We prove nearly matching upper and lower bounds on the
                 randomized communication complexity of the following
                 problem: Alice and Bob are each given a probability
                 distribution over n elements, and they wish to estimate
                 within \pm \epsilon the statistical (total variation)
                 distance between their distributions. For some range of
                 parameters, there is up to a log n factor gap between
                 the upper and lower bounds, and we identify a barrier
                 to using information complexity techniques to improve
                 the lower bound in this case. We also prove a side
                 result that we discovered along the way: the randomized
                 communication complexity of n -bit Majority composed
                 with n -bit Greater Than is \Theta ( n log n ).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Anderson:2018:ITL,
  author =       "Matthew Anderson and Michael A. Forbes and Ramprasad
                 Saptharishi and Amir Shpilka and Ben Lee Volk",
  title =        "Identity Testing and Lower Bounds for Read-$k$
                 Oblivious Algebraic Branching Programs",
  journal =      j-TOCT,
  volume =       "10",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3170709",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:49 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Read-$k$ oblivious algebraic branching programs are a
                 natural generalization of the well-studied model of
                 read-once oblivious algebraic branching program (ABP).
                 In this work, we give an exponential lower bound of $
                 \exp (n / k^{O(k)})$ on the width of any read-$k$
                 oblivious ABP computing some explicit multilinear
                 polynomial $f$ that is computed by a polynomial-size
                 depth-3 circuit. We also study the polynomial identity
                 testing (PIT) problem for this model and obtain a
                 white-box subexponential-time PIT algorithm. The
                 algorithm runs in time $ 2^{\~ O(n^{1 - 1 / (2k) -
                 1})}$ and needs white box access only to know the order
                 in which the variables appear in the ABP.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Goos:2018:RCV,
  author =       "Mika G{\"o}{\"o}s and T. S. Jayram and Toniann Pitassi
                 and Thomas Watson",
  title =        "Randomized Communication versus Partition Number",
  journal =      j-TOCT,
  volume =       "10",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3170711",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:49 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We show that randomized communication complexity can
                 be superlogarithmic in the partition number of the
                 associated communication matrix, and we obtain
                 near-optimal randomized lower bounds for the Clique
                 versus Independent Set problem. These results
                 strengthen the deterministic lower bounds obtained in
                 prior work (G{\"o}{\"o}s, Pitassi, and Watson,
                 FOCS'15). One of our main technical contributions
                 states that information complexity when the cost is
                 measured with respect to only 1-inputs (or only
                 0-inputs) is essentially equivalent to information
                 complexity with respect to all inputs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{ODonnell:2018:WCQ,
  author =       "Ryan O'Donnell and A. C. Cem Say",
  title =        "The Weakness of {CTC} Qubits and the Power of
                 Approximate Counting",
  journal =      j-TOCT,
  volume =       "10",
  number =       "2",
  pages =        "5:1--5:??",
  month =        may,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3196832",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We present results in structural complexity theory
                 concerned with the following interrelated topics:
                 computation with postselection/restarting, closed
                 timelike curves (CTCs), and approximate counting. The
                 first result is a new characterization of the lesser
                 known complexity class BPP$_{path}$ in terms of more
                 familiar concepts. Precisely, BPP$_{path}$ is the class
                 of problems that can be efficiently solved with a
                 nonadaptive oracle for the approximate counting
                 problem. Similarly, PP equals the class of problems
                 that can be solved efficiently with nonadaptive queries
                 for the related approximate difference problem. Another
                 result is concerned with the computational power
                 conferred by CTCs, or equivalently, the computational
                 complexity of finding stationary distributions for
                 quantum channels. Using the preceding characterization
                 of PP, we show that any poly( n )-time quantum
                 computation using a CTC of O (log n ) qubits may as
                 well just use a CTC of 1 classical bit. This result
                 essentially amounts to showing that one can find a
                 stationary distribution for a poly( n )-dimensional
                 quantum channel in PP.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Komusiewicz:2018:TRT,
  author =       "Christian Komusiewicz",
  title =        "Tight Running Time Lower Bounds for Vertex Deletion
                 Problems",
  journal =      j-TOCT,
  volume =       "10",
  number =       "2",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3186589",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "For a graph class $ \Pi $, the $ \Pi $-Vertex Deletion
                 problem has as input an undirected graph G = ( V, E )
                 and an integer k and asks whether there is a set of at
                 most k vertices that can be deleted from G such that
                 the resulting graph is a member of $ \Pi $. By a
                 classic result of Lewis and Yannakakis [17], $ \Pi
                 $-Vertex Deletion is NP-hard for all hereditary
                 properties $ \Pi $. We adapt the original NP-hardness
                 construction to show that under the exponential time
                 hypothesis (ETH), tight complexity results can be
                 obtained. We show that \Pi -Vertex Deletion does not
                 admit a 2$^{o (n)}$ -time algorithm where n is the
                 number of vertices in G. We also obtain a dichotomy for
                 running time bounds that include the number m of edges
                 in the input graph. On the one hand, if $ \Pi $
                 contains all edgeless graphs, then there is no 2$^{o (n
                 + m)}$ -time algorithm for \Pi -Vertex Deletion. On the
                 other hand, if there is a fixed edgeless graph that is
                 not contained in $ \Pi $ and containment in $ \Pi $ can
                 be determined in 2$^{O (n)}$ time or 2$^{o (m)}$ time,
                 then $ \Pi $-Vertex Deletion can be solved in 2$^{O (\&
                 sqrt; m)}$ + O ( n ) or 2$^{o (m)}$ + O ( n ) time,
                 respectively. We also consider restrictions on the
                 domain of the input graph G. For example, we obtain
                 that $ \Pi $-Vertex Deletion cannot be solved in 2$^{o
                 (\& sqrt; n)}$ time if G is planar and $ \Pi $ is
                 hereditary and contains and excludes infinitely many
                 planar graphs. Finally, we provide similar results for
                 the problem variant where the deleted vertex set has to
                 induce a connected graph.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Lutz:2018:AIP,
  author =       "Jack H. Lutz and Neil Lutz",
  title =        "Algorithmic Information, Plane {Kakeya} Sets, and
                 Conditional Dimension",
  journal =      j-TOCT,
  volume =       "10",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3201783",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We formulate the conditional Kolmogorov complexity of
                 x given y at precision r, where x and y are points in
                 Euclidean spaces and r is a natural number. We
                 demonstrate the utility of this notion in two ways; (1)
                 We prove a point-to-set principle that enables one to
                 use the (relativized, constructive) dimension of a
                 single point in a set E in a Euclidean space to
                 establish a lower bound on the (classical) Hausdorff
                 dimension of E. We then use this principle, together
                 with conditional Kolmogorov complexity in Euclidean
                 spaces, to give a new proof of the known,
                 two-dimensional case of the Kakeya conjecture. This
                 theorem of geometric measure theory, proved by Davies
                 in 1971, says that every plane set containing a unit
                 line segment in every direction has Hausdorff dimension
                 2. (2)We use conditional Kolmogorov complexity in
                 Euclidean spaces to develop the lower and upper
                 conditional dimensions dim( x | y ) and Dim( x | y ) of
                 x given y, where x and y are points in Euclidean
                 spaces. Intuitively, these are the lower and upper
                 asymptotic algorithmic information densities of x
                 conditioned on the information in y. We prove that
                 these conditional dimensions are robust and that they
                 have the correct information-theoretic relationships
                 with the well-studied dimensions dim( x ) and Dim( x )
                 and the mutual dimensions mdim( x: y ) and Mdim( x: y
                 ).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Gharibian:2018:GSC,
  author =       "Sevag Gharibian and Jamie Sikora",
  title =        "Ground State Connectivity of Local {Hamiltonians}",
  journal =      j-TOCT,
  volume =       "10",
  number =       "2",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3186587",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The study of ground state energies of local
                 Hamiltonians has played a fundamental role in quantum
                 complexity theory. In this article, we take a new
                 direction by introducing the physically motivated
                 notion of ``ground state connectivity'' of local
                 Hamiltonians, which captures problems in areas ranging
                 from quantum stabilizer codes to quantum memories.
                 Roughly, ``ground state connectivity'' corresponds to
                 the natural question: Given two ground states | \Psi
                 \rangle and | \phi \rangle of a local Hamiltonian H, is
                 there an ``energy barrier'' (with respect to H ) along
                 any sequence of local operations mapping | \Psi \rangle
                 to | \phi \rangle ? We show that the complexity of this
                 question can range from QCMA-complete to
                 PSPACE-complete, as well as NEXP-complete for an
                 appropriately defined ``succinct'' version of the
                 problem. As a result, we obtain a natural QCMA-complete
                 problem, a goal which has generally proven difficult
                 since the conception of QCMA over a decade ago. Our
                 proofs rely on a new technical tool, the Traversal
                 Lemma, which analyzes the Hilbert space a local unitary
                 evolution must traverse under certain conditions. We
                 show that this lemma is essentially tight with respect
                 to the length of the unitary evolution in question.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Bliznets:2018:HAF,
  author =       "Ivan Bliznets and Marek Cygan and Pawel Komosa and
                 Michal Pilipczuk",
  title =        "Hardness of Approximation for {$H$}-free Edge
                 Modification Problems",
  journal =      j-TOCT,
  volume =       "10",
  number =       "2",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3196834",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The H --- free Edge Deletion problem asks, for a given
                 graph G and integer k, whether it is possible to delete
                 at most k edges from G to make it H -free-that is, not
                 containing H as an induced subgraph. The H -free Edge
                 Completion problem is defined similarly, but we add
                 edges instead of deleting them. The study of these two
                 problem families has recently been the subject of
                 intensive studies from the point of view of
                 parameterized complexity and kernelization. In
                 particular, it was shown that the problems do not admit
                 polynomial kernels (under plausible complexity
                 assumptions) for almost all graphs H, with several
                 important exceptions occurring when the class of H
                 -free graphs exhibits some structural properties. In
                 this work, we complement the parameterized study of
                 edge modification problems to H -free graphs by
                 considering their approximability. We prove that
                 whenever H is 3-connected and has at least two
                 nonedges, then both H --- free Edge Deletion and H
                 -free Edge Completion are very hard to approximate:
                 they do not admit poly(OPT)-approximation in polynomial
                 time, unless P=NP, or even in time subexponential in
                 OPT, unless the exponential time hypothesis fails. The
                 assumption of the existence of two nonedges appears to
                 be important: we show that whenever H is a complete
                 graph without one edge, then H -free Edge Deletion is
                 tightly connected to the Min Horn Deletion problem,
                 whose approximability is still open. Finally, in an
                 attempt to extend our hardness results beyond
                 3-connected graphs, we consider the cases of H being a
                 path or a cycle, and we achieve an almost complete
                 dichotomy there.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Minahan:2018:CDI,
  author =       "Daniel Minahan and Ilya Volkovich",
  title =        "Complete Derandomization of Identity Testing and
                 Reconstruction of Read-Once Formulas",
  journal =      j-TOCT,
  volume =       "10",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jun,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3196836",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "In this article, we study the identity testing problem
                 of arithmetic read-once formulas (ROFs) and some
                 related models. An ROF is a formula (a circuit whose
                 underlying graph is a tree) in which the operations are
                 { +, $ \times $ } and such that every input variable
                 labels at most one leaf. We obtain the first
                 polynomial-time deterministic identity testing
                 algorithm that operates in the black-box setting for
                 ROFs, as well as some other related models. As an
                 application, we obtain the first polynomial-time
                 deterministic reconstruction algorithm for such
                 formulas. Our results are obtained by improving and
                 extending the analysis of the algorithm of Shpilka and
                 Yolkovich [51].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Filmus:2018:IPS,
  author =       "Yuval Filmus and Guy Kindler and Elchanan Mossel and
                 Karl Wimmer",
  title =        "Invariance Principle on the Slice",
  journal =      j-TOCT,
  volume =       "10",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jun,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3186590",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The non-linear invariance principle of Mossel,
                 O'Donnell, and Oleszkiewicz establishes that if $
                 f(x_1, \ldots, x_n) $ is a multilinear low-degree
                 polynomial with low influences, then the distribution
                 of if $ f(b_1, \ldots, b_n) $ is close (in various
                 senses) to the distribution of $ f(G_1, \ldots, G_n) $,
                 where $ B_i \in R \{ - 1, 1 \} $ are independent
                 Bernoulli random variables and $ G_i \asymp N(0, 1) $
                 are independent standard Gaussians. The invariance
                 principle has seen many applications in theoretical
                 computer science, including the Majority is Stablest
                 conjecture, which shows that the Goemans--Williamson
                 algorithm for MAX-CUT is optimal under the Unique Games
                 Conjecture. More generally, MOO's invariance principle
                 works for any two vectors of hypercontractive random
                 variables $ (X_1, \ldots, X_n) $, $ (Y_1, \ldots, Y_n)
                 $ such that (i) Matching moments: $ X_i $ and $ Y_i $
                 have matching first and second moments and (ii)
                 Independence: the variables $ X_1 $, \ldots, $ X_n $
                 are independent, as are $ Y_1 $, \ldots, $ Y_n $. The
                 independence condition is crucial to the proof of the
                 theorem, yet in some cases we would like to use
                 distributions $ X_1 $, \ldots, $ X_n $ in which the
                 individual coordinates are not independent. A common
                 example is the uniform distribution on the slice $
                 (^{[n]}_k) $ which consists of all vectors $ (x_1,
                 \ldots, x_n) \in \{ 0, 1 \}^n $ with Hamming weight
                 $k$. The slice shows up in theoretical computer science
                 (hardness amplification, direct sum testing), extremal
                 combinatorics (Erd{\H{o}}s--Ko--Rado theorems), and
                 coding theory (in the guise of the Johnson association
                 scheme). Our main result is an invariance principle in
                 which $ (X_1, \ldots, X_n)$ is the uniform distribution
                 on a slice $ (^{[n]}_{pn})$ and $ (Y_1, \ldots, Y_n)$
                 consists either of $n$ independent $ {\em Ber}(p)$
                 random variables, or of $n$ independent $ N(p, p (1 -
                 p))$ random variables. As applications, we prove a
                 version of Majority is Stablest for functions on the
                 slice, a version of Bourgain's tail theorem, a version
                 of the Kindler-Safra structural theorem, and a
                 stability version of the $t$-intersecting
                 Erd{\H{o}}s--Ko--Rado theorem, combining techniques of
                 Wilson and Friedgut. Our proof relies on a combination
                 of ideas from analysis and probability, algebra, and
                 combinatorics. In particular, we make essential use of
                 recent work of the first author which describes an
                 explicit Fourier basis for the slice.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Thapper:2018:LSR,
  author =       "Johan Thapper and Stanislav Zivn{\'y}",
  title =        "The Limits of {SDP} Relaxations for General-Valued
                 {CSPs}",
  journal =      j-TOCT,
  volume =       "10",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jun,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3201777",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "It has been shown that for a general-valued constraint
                 language \Gamma the following statements are
                 equivalent: (1) any instance of VCSP( \Gamma ) can be
                 solved to optimality using a constant level of the
                 Sherali--Adams LP hierarchy, (2) any instance of VCSP(
                 \Gamma ) can be solved to optimality using the third
                 level of the Sherali--Adams LP hierarchy, and (3) the
                 support of \Gamma satisfies the `` bounded width
                 condition '' (i.e., it contains weak near-unanimity
                 operations of all arities). We show that if the support
                 of \Gamma violates the bounded width condition then not
                 only is VCSP( \Gamma ) not solved by a constant level
                 of the Sherali--Adams LP hierarchy, but it also
                 requires linear levels of the Lasserre SDP hierarchy
                 (also known as the sum-of-squares SDP hierarchy). For
                 \Gamma corresponding to linear equations in an Abelian
                 group, this result follows from existing work on
                 inapproximability of Max-CSPs. By a breakthrough result
                 of Lee, Raghavendra, and Steurer [STOC'15], our result
                 implies that for any \Gamma whose support violates the
                 bounded width condition no SDP relaxation of polynomial
                 size solves VCSP( \Gamma ). We establish our result by
                 proving that various reductions preserve exact
                 solvability by the Lasserre SDP hierarchy (up to a
                 constant factor in the level of the hierarchy). Our
                 results hold for general-valued constraint languages
                 (i.e., sets of functions on a fixed finite domain that
                 take on rational or infinite values) and thus also hold
                 in notable special cases of { 0, \infty }-valued
                 languages (CSPs), {0, 1}-valued languages
                 (Min-CSPs/Max-CSPs), and Q-valued languages
                 (finite-valued CSPs).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Pilipczuk:2018:DMW,
  author =       "Marcin Pilipczuk and Magnus Wahlstr{\"o}m",
  title =        "Directed Multicut is {$ W[1] $}-hard, Even for Four
                 Terminal Pairs",
  journal =      j-TOCT,
  volume =       "10",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jun,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3201775",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We prove that Multicut in directed graphs,
                 parameterized by the size of the cutset, is W [1]-hard
                 and hence unlikely to be fixed-parameter tractable even
                 if restricted to instances with only four terminal
                 pairs. This negative result almost completely resolves
                 one of the central open problems in the area of
                 parameterized complexity of graph separation problems,
                 posted originally by Marx and Razgon [SIAM J. Comput.
                 43(2):355--388 (2014)], leaving only the case of three
                 terminal pairs open. The case of two terminal pairs was
                 shown to be FPT by Chitnis et al. [SIAM J. Comput.
                 42(4):1674--1696 (2013)]. Our gadget methodology also
                 allows us to prove W [1]-hardness of the Steiner
                 Orientation problem parameterized by the number of
                 terminal pairs, resolving an open problem of Cygan,
                 Kortsarz, and Nutov [SIAM J. Discrete Math.
                 27(3):1503-1513 (2013)].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "13",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Lee:2018:CPP,
  author =       "Chin Ho Lee and Emanuele Viola",
  title =        "The Coin Problem for Product Tests",
  journal =      j-TOCT,
  volume =       "10",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jun,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3201787",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "Let X$_{m, \epsilon }$ be the distribution over m bits
                 X$_1$, \ldots{}, X$_m$ where the X$_i$ are independent
                 and each X$_i$ equals 1 with probability (1- \epsilon
                 )/2 and 0 with probability (1 --- \epsilon )/2. We
                 consider the smallest value \epsilon $^*$ of \epsilon
                 such that the distributions X$_{m, \epsilon }$ and
                 X$_{m, 0}$ can be distinguished with constant advantage
                 by a function f: {0,1}$^m$ -{$>$} S, which is the
                 product of k functions f$_1$, f$_2$, \ldots{}, f$_k$ on
                 disjoint inputs of n bits, where each f$_i$: {0,1}$^n$
                 -{$>$} S and m = nk. We prove that \epsilon $^*$ =
                 \Theta (1/\&sqrt; n log k ) if S = [-1,1], while
                 \epsilon $^*$ = \Theta (1/\&sqrt; nk ) if S is the set
                 of unit-norm complex numbers.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "14",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ganardi:2018:CEF,
  author =       "Moses Ganardi and Danny Hucke and Daniel K{\"o}nig and
                 Markus Lohrey",
  title =        "Circuits and Expressions over Finite Semirings",
  journal =      j-TOCT,
  volume =       "10",
  number =       "4",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3241375",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "The computational complexity of the circuit and
                 expression evaluation problem for finite semirings is
                 considered, where semirings are not assumed to have an
                 additive or a multiplicative identity. The following
                 dichotomy is shown: If a finite semiring is such that
                 (i) the multiplicative semigroup is solvable and (ii)
                 it does not contain a subsemiring with an additive
                 identity 0 and a multiplicative identity 1 /= 0, then
                 the circuit evaluation problem is in DET \subseteq
                 NC$^2$, and the expression evaluation problem for the
                 semiring is in TC$^0$. For all other finite semirings,
                 the circuit evaluation problem is P-complete and the
                 expression evaluation problem is NC$^1$ -complete. As
                 an application, we determine the complexity of
                 intersection non-emptiness problems for given
                 context-free grammars (regular expressions) with a
                 fixed regular language.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "15",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Bhattacharyya:2018:PTJ,
  author =       "Rishiraj Bhattacharyya and Sourav Chakraborty",
  title =        "Property Testing of Joint Distributions using
                 Conditional Samples",
  journal =      j-TOCT,
  volume =       "10",
  number =       "4",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3241377",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "In this article, we consider the problem of testing
                 properties of joint distributions under the Conditional
                 Sampling framework. In the standard sampling model,
                 sample complexity of testing properties of joint
                 distributions are exponential in the dimension,
                 resulting in inefficient algorithms for practical use.
                 While recent results achieve efficient algorithms for
                 product distributions with significantly smaller sample
                 complexity, no efficient algorithm is expected when the
                 marginals are not independent. In this article, we
                 initialize the study of conditional sampling in the
                 multidimensional setting. We propose a subcube
                 conditional sampling model where the tester can
                 condition on a (adaptively) chosen subcube of the
                 domain. Due to its simplicity, this model is
                 potentially implementable in many practical
                 applications, particularly when the distribution is a
                 joint distribution over \Sigma $^n$ for some set \Sigma
                 . We present algorithms for various fundamental
                 properties of distributions in the subcube-conditioning
                 model and prove that the sample complexity is
                 polynomial in the dimension n (and not exponential as
                 in the traditional model). We present an algorithm for
                 testing identity to a known distribution using {\~O}(
                 n$^2$ )-subcube-conditional samples, an algorithm for
                 testing identity between two unknown distributions
                 using {\~O}( n$^5$ )-subcube-conditional samples and an
                 algorithm for testing identity to a product
                 distribution using {\~O}( n$^5$ )-subcube-conditional
                 samples. The central concept of our technique involves
                 an elegant chain rule, which can be proved using basic
                 techniques of probability theory, yet it is powerful
                 enough to avoid the curse of dimensionality.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "16",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Guo:2018:USM,
  author =       "Heng Guo and Pinyan Lu",
  title =        "Uniqueness, Spatial Mixing, and Approximation for
                 Ferromagnetic $2$-Spin Systems",
  journal =      j-TOCT,
  volume =       "10",
  number =       "4",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3265025",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We give fully polynomial-time approximation schemes
                 (FPTAS) for the partition function of ferromagnetic
                 2-spin systems in certain parameter regimes. The
                 threshold we obtain is almost tight up to an
                 integrality gap. Our technique is based on the
                 correlation decay framework. The main technical
                 contribution is a new potential function, with which we
                 establish a new kind of spatial mixing.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "17",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Agrawal:2018:SFV,
  author =       "Akanksha Agrawal and Daniel Lokshtanov and Amer E.
                 Mouawad and Saket Saurabh",
  title =        "Simultaneous Feedback Vertex Set: a Parameterized
                 Perspective",
  journal =      j-TOCT,
  volume =       "10",
  number =       "4",
  pages =        "18:1--18:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3265027",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "For a family F of graphs, a graph G, and a positive
                 integer k, the F-Deletion problem asks whether we can
                 delete at most k vertices from G to obtain a graph in
                 F. F-Deletion generalizes many classical graph problems
                 such as Vertex Cover, Feedback Vertex Set, and Odd
                 Cycle Transversal. For an integer $ \alpha \geq 1 $, an
                 $n$ vertex (multi) graph $ G = (V, \cup_{i = 1}^{
                 \alpha } E_i)$, where the edge set of $G$ is
                 partitioned into $ \alpha $ color classes, is called an
                 $ \alpha $-edge-colored (multi) graph. A natural
                 extension of the F-Deletion problem to edge-colored
                 graphs is the Simultaneous F Deletion problem. In the
                 latter problem, we are given an $ \alpha $-edge-colored
                 graph G and the goal is to find a set S of at most k
                 vertices such that each graph G$_i$ --- S, where G$_i$
                 = ( V, E$_i$ ) and $ 1 \leq i \leq \alpha $, is in F.
                 In this work, we study Simultaneous F-Deletion for F
                 being the family of forests. In other words, we focus
                 on the Simultaneous Feedback Vertex Set (SimFVS)
                 problem. Algorithmically, we show that, like its
                 classical counterpart, SimFVS parameterized by k is
                 fixed-parameter tractable (FPT) and admits a polynomial
                 kernel, for any fixed constant $ \alpha $. In
                 particular, we give an algorithm running in 2$^{O
                 (\alpha k)}$ n$^{O (1)}$ time and a kernel with O (
                 \alpha k$^{3(\alpha + 1)}$ ) vertices. The running time
                 of our algorithm implies that SimFVS is FPT even when $
                 \alpha \in o (\log n)$. We complement this positive
                 result by showing that if we allow $ \alpha $ to be in
                 $ O(\log n)$, where $n$ is the number of vertices in
                 the input graph, then SimFVS becomes $ W[1]$-hard. In
                 particular, when $ \alpha $ is roughly equal to $ c
                 \log n$, for a non-zero positive constant $c$, the
                 problem becomes $ W[1]$-hard. Our positive results
                 answer one of the open problems posed by Cai and Ye
                 (MFCS 2014).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "18",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Boczkowski:2018:SCP,
  author =       "Lucas Boczkowski and Iordanis Kerenidis and
                 Fr{\'e}d{\'e}ric Magniez",
  title =        "Streaming Communication Protocols",
  journal =      j-TOCT,
  volume =       "10",
  number =       "4",
  pages =        "19:1--19:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3276748",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Oct 17 17:24:50 MDT 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  abstract =     "We define the Streaming Communication model that
                 combines the main aspects of communication complexity
                 and streaming. Input arrives as a stream, spread
                 between several agents across a network. Each agent has
                 a bounded memory, which can be updated upon receiving a
                 new bit, or a message from another agent. We provide
                 tight tradeoffs between the necessary resources, i.e.,
                 communication between agents and memory, for some of
                 the canonical problems from communication complexity by
                 proving a strong general lower bound technique. Second,
                 we analyze the Approximate Matching problem and show
                 that the complexity of this problem (i.e., the
                 achievable approximation ratio) in the one-way variant
                 of our model is strictly different both from the
                 streaming complexity and the one-way communication
                 complexity thereof.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "19",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ganardi:2019:UTB,
  author =       "Moses Ganardi and Markus Lohrey",
  title =        "A Universal Tree Balancing Theorem",
  journal =      j-TOCT,
  volume =       "11",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3278158",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:09 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3278158",
  abstract =     "We present a general framework for balancing
                 expressions (terms) in the form of so-called tree
                 straight-line programs. The latter can be seen as
                 circuits over the free term algebra extended by
                 contexts (terms with a hole) and the operations, which
                 insert terms/contexts into contexts. In Ref. [16], it
                 was shown that one can compute for a given term of size
                 n in logspace a tree straight-line program of depth O
                 (log n ) and size O ( n / log n ). In the present
                 article, it is shown that the conversion can be done in
                 DLOGTIME-uniform TC$^0$. This allows reducing the term
                 evaluation problem over an arbitrary algebra A to the
                 term evaluation problem over a derived two-sorted
                 algebra F ( A ). Three applications are presented: (i)
                 an alternative proof for a recent result by Krebs et
                 al. [25] on the expression evaluation problem is given;
                 (ii) it is shown that expressions for an arbitrary
                 (possibly non-commutative) semiring can be transformed
                 in DLOGTIME-uniform TC$^0$ into equivalent circuits of
                 logarithmic depth and size O ( n / log n ); and, (iii)
                 a corresponding result for regular expressions is
                 shown.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Kayal:2019:RFR,
  author =       "Neeraj Kayal and Vineet Nair and Chandan Saha and
                 S{\'e}bastien Tavenas",
  title =        "Reconstruction of Full Rank Algebraic Branching
                 Programs",
  journal =      j-TOCT,
  volume =       "11",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3282427",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:09 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3282427",
  abstract =     "An algebraic branching program (ABP) A can be modelled
                 as a product expression $ X_1 \middot X_2 \ldots {} X_d
                 $, where $ X_1 $ and $ X_d $ are 1 $ \times $ w and w $
                 \times $ 1 matrices, respectively, and every other $
                 X_k $ is a w $ \times $ w matrix; the entries of these
                 matrices are linear forms in m variables over a field F
                 (which we assume to be either Q or a field of
                 characteristic poly( m )). The polynomial computed by A
                 is the entry of the 1 $ \times $ 1 matrix obtained from
                 the product $ \Pi_{k = 1}^d X_k $. We say A is a full
                 rank ABP if the $ w^2 (d - 2) + 2 w $ linear forms
                 occurring in the matrices $ X_1, X_2, \ldots {}, X_d $
                 are F-linearly independent. Our main result is a
                 randomized reconstruction algorithm for full rank ABPs:
                 Given blackbox access to an m -variate polynomial f of
                 degree at most m, the algorithm outputs a full rank ABP
                 computing f if such an ABP exists, or outputs ``no full
                 rank ABP exists'' (with high probability). The running
                 time of the algorithm is polynomial in $m$ and $ \beta
                 $, where $ \beta $ is the bit length of the
                 coefficients of $f$. The algorithm works even if $ X_k$
                 is a $ w_{k - 1} \times w_k$ matrix (with $ w_0 = w_d =
                 1$), and $ w = (w_1, \ldots {}, w_{d - 1})$ is unknown.
                 The result is obtained by designing a randomized
                 polynomial time equivalence test for the family of
                 iterated matrix multiplication polynomial IMM$_{w, d}$,
                 the (1, 1)-th entry of a product of d rectangular
                 symbolic matrices whose dimensions are according to $ w
                 \in N^{d - 1}$. At its core, the algorithm exploits a
                 connection between the irreducible invariant subspaces
                 of the Lie algebra of the group of symmetries of a
                 polynomial $f$ that is equivalent to IMM$_{w, d}$ and
                 the ``layer spaces'' of a full rank ABP computing f.
                 This connection also helps determine the group of
                 symmetries of IMM$_{w, d}$ and show that IMM$_{w, d}$
                 is characterized by its group of symmetries.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Larose:2019:SHC,
  author =       "Beno{\^\i}t Larose and Barnaby Martin and Dani{\"e}l
                 Paulusma",
  title =        "Surjective H-Colouring over Reflexive Digraphs",
  journal =      j-TOCT,
  volume =       "11",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3282431",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:09 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3282431",
  abstract =     "The Surjective H-Colouring problem is to test if a
                 given graph allows a vertex-surjective homomorphism to
                 a fixed graph H. The complexity of this problem has
                 been well studied for undirected (partially) reflexive
                 graphs. We introduce endo-triviality, the property of a
                 structure that all of its endomorphisms that do not
                 have range of size 1 are automorphisms, as a means to
                 obtain complexity-theoretic classifications of
                 Surjective H-Colouring in the case of reflexive
                 digraphs. Chen (2014) proved, in the setting of
                 constraint satisfaction problems, that Surjective
                 H-Colouring is NP-complete if H has the property that
                 all of its polymorphisms are essentially unary. We give
                 the first concrete application of his result by showing
                 that every endo-trivial reflexive digraph H has this
                 property. We then use the concept of endo-triviality to
                 prove, as our main result, a dichotomy for Surjective
                 H-Colouring when H is a reflexive tournament: if H is
                 transitive, then Surjective H-Colouring is in NL;
                 otherwise, it is NP-complete. By combining this result
                 with some known and new results, we obtain a complexity
                 classification for Surjective H-Colouring when H is a
                 partially reflexive digraph of size at most 3.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Fulla:2019:CBS,
  author =       "Peter Fulla and Hannes Uppman and Stanislav
                 Zivn{\'y}",
  title =        "The Complexity of {Boolean} Surjective General-Valued
                 {CSPs}",
  journal =      j-TOCT,
  volume =       "11",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3282429",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:09 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3282429",
  abstract =     "Valued constraint satisfaction problems (VCSPs) are
                 discrete optimisation problems with a $ (Q \cup
                 \infty)$-valued objective function given as a sum of
                 fixed-arity functions. In Boolean surjective VCSPs,
                 variables take on labels from $ D = \{ 0, 1 \} $, and
                 an optimal assignment is required to use both labels
                 from $D$. Examples include the classical global Min-Cut
                 problem in graphs and the Minimum Distance problem
                 studied in coding theory. We establish a dichotomy
                 theorem and thus give a complete complexity
                 classification of Boolean surjective VCSPs with respect
                 to exact solvability. Our work generalises the
                 dichotomy for $ \{ 0, \infty \} $-valued constraint
                 languages (corresponding to surjective decision CSPs)
                 obtained by Creignou and H{\'e}brard. For the
                 maximisation problem of $ Q_{\geq 0}$-valued surjective
                 VCSPs, we also establish a dichotomy theorem with
                 respect to approximability. Unlike in the case of
                 Boolean surjective (decision) CSPs, there appears a
                 novel tractable class of languages that is trivial in
                 the non-surjective setting. This newly discovered
                 tractable class has an interesting mathematical
                 structure related to downsets and upsets. Our main
                 contribution is identifying this class and proving that
                 it lies on the borderline of tractability. A crucial
                 part of our proof is a polynomial-time algorithm for
                 enumerating all near-optimal solutions to a generalised
                 Min-Cut problem, which might be of independent
                 interest.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Iwama:2019:ROB,
  author =       "Kazuo Iwama and Atsuki Nagao",
  title =        "Read-Once Branching Programs for Tree Evaluation
                 Problems",
  journal =      j-TOCT,
  volume =       "11",
  number =       "1",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3282433",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:09 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3282433",
  abstract =     "Toward the ultimate goal of separating L and P, Cook,
                 McKenzie, Wehr, Braverman, and Santhanam introduced the
                 Tree Evaluation Problem ( TEP ). For fixed integers h
                 and k > 0, FT$_h$ ( k ) is given as a complete, rooted
                 binary tree of height h, in which each root node is
                 associated with a function from [ k ]$^2$ to [ k ], and
                 each leaf node with a number in [ k ]. The value of an
                 internal node v is defined naturally; that is, if it
                 has a function f and the values of its two child nodes
                 are a and b, then the value of v is f ( a, b ). Our
                 task is to compute the value of the root node by
                 sequentially executing this function evaluation in a
                 bottom-up fashion. The problem is obviously in P, and,
                 if we could prove that any branching program solving
                 FT$^h$ ( k ) needs at least k$^{r (h)}$ states for any
                 unbounded function r, then this problem is not in L,
                 thus achieving our goal. The mentioned authors
                 introduced a restriction called thrifty against the
                 structure of BP's (i,e., against the algorithm for
                 solving the problem) and proved that any thrifty BP
                 needs \Omega ( k$^h$ ) states. This article proves a
                 similar lower bound for read-once branching programs,
                 which allows us to get rid of the restriction on the
                 order of nodes read by the BP that is the nature of the
                 thrifty restriction.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Blais:2019:DTL,
  author =       "Eric Blais and Cl{\'e}ment L. Canonne and Tom Gur",
  title =        "Distribution Testing Lower Bounds via Reductions from
                 Communication Complexity",
  journal =      j-TOCT,
  volume =       "11",
  number =       "2",
  pages =        "6:1--6:??",
  month =        apr,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3305270",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3305270",
  abstract =     "We present a new methodology for proving distribution
                 testing lower bounds, establishing a connection between
                 distribution testing and the simultaneous message
                 passing (SMP) communication model. Extending the
                 framework of Blais, Brody, and Matulef [15], we show a
                 simple way to reduce (private-coin) SMP problems to
                 distribution testing problems. This method allows us to
                 prove new distribution testing lower bounds, as well as
                 to provide simple proofs of known lower bounds. Our
                 main result is concerned with testing identity to a
                 specific distribution, p, given as a parameter. In a
                 recent and influential work, Valiant and Valiant [55]
                 showed that the sample complexity of the aforementioned
                 problem is closely related to the l$_{2 / 3}$
                 -quasinorm of p. We obtain alternative bounds on the
                 complexity of this problem in terms of an arguably more
                 intuitive measure and using simpler proofs. More
                 specifically, we prove that the sample complexity is
                 essentially determined by a fundamental operator in the
                 theory of interpolation of Banach spaces, known as
                 Peetre's K-functional. We show that this quantity is
                 closely related to the size of the effective support of
                 p (loosely speaking, the number of supported elements
                 that constitute the vast majority of the mass of p ).
                 This result, in turn, stems from an unexpected
                 connection to functional analysis and refined
                 concentration of measure inequalities, which arise
                 naturally in our reduction.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Cai:2019:GAG,
  author =       "Jin-Yi Cai and Michael Kowalczyk and Tyson Williams",
  title =        "Gadgets and Anti-Gadgets Leading to a Complexity
                 Dichotomy",
  journal =      j-TOCT,
  volume =       "11",
  number =       "2",
  pages =        "7:1--7:??",
  month =        apr,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3305272",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3305272",
  abstract =     "We introduce an idea called anti-gadgets for
                 reductions in complexity theory. These anti-gadgets are
                 presented as graph fragments, but their effect is
                 equivalent to erasing the presence of other graph
                 fragments, as if we had managed to include a negative
                 copy of a certain graph gadget. We use this idea to
                 prove a complexity dichotomy theorem for the partition
                 function Z(G) of spin systems over 3-regular directed
                 graphs G, $$ Z (G) = \Sigma_{ \sigma : V (G) \to \{ 0,
                 1 \} } \Pi_{(u, v) \in E (G)} f(\sigma (u), \sigma (v))
                 $$, where each edge is given a (not necessarily
                 symmetric) complex-valued binary function $ f : { 0,
                 1}^2 \to C $. We show that $ Z(G) $ is either
                 computable in polynomial time or \#P-hard, depending
                 explicitly on $f$. When the input graph $G$ is planar,
                 there is an additional class of polynomial time
                 computable partition functions $ Z(G)$, while
                 everything else remains \#P-hard. Furthermore, this
                 additional class is precisely those that can be
                 transformed by a holographic reduction to matchgates,
                 followed by the Fisher--Kasteleyn--Temperley algorithm
                 via Pfaffians.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Dinesh:2019:CLB,
  author =       "Krishnamoorthy Dinesh and Sajin Koroth and Jayalal
                 Sarma",
  title =        "Characterization and Lower Bounds for Branching
                 Program Size using Projective Dimension",
  journal =      j-TOCT,
  volume =       "11",
  number =       "2",
  pages =        "8:1--8:??",
  month =        apr,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3305274",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3305274",
  abstract =     "We study projective dimension, a graph parameter,
                 denoted by pd( G ) for a bipartite graph G, introduced
                 by Pudl{\'a}k and R{\"o}dl (1992). For a Boolean
                 function f (on n bits), Pudl{\'a}k and R{\"o}dl
                 associated a bipartite graph G$_f$ and showed that size
                 of the optimal branching program computing f, denoted
                 by bpsize( f ), is at least pd( G$_f$ ) (also denoted
                 by pd( f )). Hence, proving lower bounds for pd( f )
                 implies lower bounds for bpsize( f ). Despite several
                 attempts (Pudl{\'a}k and R{\"o}dl (1992), R{\'o}nyai et
                 al. (2000)), proving super-linear lower bounds for
                 projective dimension of explicit families of graphs has
                 remained elusive. We observe that there exist a Boolean
                 function f for which the gap between the pd( f ) and
                 bpsize( f ) is 2$^{ \Omega (n)}$. Motivated by the
                 argument in Pudl{\'a}k and R{\"o}dl (1992), we define
                 two variants of projective dimension: projective
                 dimension with intersection dimension 1, denoted by
                 upd( f ), and bitwise decomposable projective
                 dimension, denoted by bitpdim( f ). We show the
                 following results: (a) We observe that there exist a
                 Boolean function f for which the gap between upd( f )
                 and bpsize( f ) is 2$^{ \Omega (n)}$. In contrast, we
                 also show that the bitwise decomposable projective
                 dimension characterizes size of the branching program
                 up to a polynomial factor. That is, there exists a
                 constant c > 0 and for any function f, bitpdim( f )/6
                 \leq bpsize( f ) \leq (bitpdim( f )) $^c$. (b) We
                 introduce a new candidate family of functions f for
                 showing super-polynomial lower bounds for bitpdim( f ).
                 As our main result, for this family of functions, we
                 demonstrate gaps between pd( f ) and the above two new
                 measures for f: pd( f ) = O ( \sqrt n ) upd( f ) =
                 \Omega ( n ) bitpdim( f ) = \Omega ( n$^{1.5}$ / log n
                 ). We adapt Nechiporuk's techniques for our linear
                 algebraic setting to prove the best-known bpsize lower
                 bounds for bitpdim. Motivated by this linear algebraic
                 setting of our main result, we derive exponential lower
                 bounds for two restricted variants of pd( f ) and upd(
                 f ) by observing that they are exactly equal to
                 well-studied graph parameters-bipartite clique cover
                 number and bipartite partition number, respectively.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Kerenidis:2019:MPP,
  author =       "Iordanis Kerenidis and Adi Ros{\'e}n and Florent
                 Urrutia",
  title =        "Multi-Party Protocols, Information Complexity and
                 Privacy",
  journal =      j-TOCT,
  volume =       "11",
  number =       "2",
  pages =        "9:1--9:??",
  month =        apr,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3313230",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3313230",
  abstract =     "We introduce a new information-theoretic measure,
                 which we call Public Information Complexity (PIC), as a
                 tool for the study of multi-party computation
                 protocols, and of quantities such as their
                 communication complexity, or the amount of randomness
                 they require in the context of information-theoretic
                 private computations. We are able to use this measure
                 directly in the natural asynchronous message-passing
                 peer-to-peer model and show a number of interesting
                 properties and applications of our new notion: The
                 Public Information Complexity is a lower bound on the
                 Communication Complexity and an upper bound on the
                 Information Complexity; the difference between the
                 Public Information Complexity and the Information
                 Complexity provides a lower bound on the amount of
                 randomness used in a protocol; any communication
                 protocol can be compressed to its Public Information
                 Cost; and an explicit calculation of the zero-error
                 Public Information Complexity of the k -party, n -bit
                 Parity function, where a player outputs the bitwise
                 parity of the inputs. The latter result also
                 establishes that the amount of randomness needed by a
                 private protocol that computes this function is \Omega
                 ( n ).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ramya:2019:LBS,
  author =       "C. Ramya and B. V. Raghavendra Rao",
  title =        "Lower bounds for Sum and Sum of Products of Read-once
                 Formulas",
  journal =      j-TOCT,
  volume =       "11",
  number =       "2",
  pages =        "10:1--10:??",
  month =        apr,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3313232",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3313232",
  abstract =     "We study limitations of polynomials computed by
                 depth-2 circuits built over read-once formulas (ROFs).
                 In particular: (*) We prove a $ 2^{ \Omega (n)} $ lower
                 bound for the sum of ROFs computing the $ 2 n$-variate
                 polynomial in VP defined by Raz and Yehudayoff. (*) We
                 obtain a $ 2^{ \Omega (\sqrt {n})}$ lower bound on the
                 size of $ \Sigma \Pi^{[n^{1 / 15}]}$ arithmetic
                 circuits built over restricted ROFs of unbounded depth
                 computing the permanent of an $ n \times n$ matrix
                 (superscripts on gates denote bound on the fan-in). The
                 restriction is that the number of variables with +
                 gates as a parent in a proper sub formula of the ROF
                 has to be bounded by $ \sqrt {n}$. This proves an
                 exponential lower bound for a subclass of possibly
                 non-multilinear formulas of unbounded depth computing
                 the permanent polynomial. (*) We also show an
                 exponential lower bound for the above model against a
                 polynomial in VP. (*) Finally, we observe that the
                 techniques developed yield an exponential lower bound
                 on the size of $ \Sigma \Pi^{[N^{1 / 30}]}$ arithmetic
                 circuits built over syntactically multi-linear $ \Sigma
                 \Pi \Sigma^{[N^{1 / 4}]}$ arithmetic circuits computing
                 a product of variable disjoint linear forms on $N$
                 variables, where the superscripts on gates denote bound
                 on the fan-in. Our proof techniques are built on the
                 measure developed by Kumar et al. [14] and are based on
                 a non-trivial analysis of ROFs under random partitions.
                 Further, our results exhibit strengths and provide more
                 insight into the lower bound techniques introduced by
                 Raz [19].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Kolay:2019:CCG,
  author =       "Sudeshna Kolay and Fahad Panolan and Saket Saurabh",
  title =        "Communication Complexity and Graph Families",
  journal =      j-TOCT,
  volume =       "11",
  number =       "2",
  pages =        "11:1--11:??",
  month =        apr,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3313234",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3313234",
  abstract =     "Given a graph G and a pair ( F$_1$, F$_2$ ) of graph
                 families, the function GDISJ$_{G, F 1}$, F$_2$ takes as
                 input, two induced subgraphs G$_1$ and G$_2$ of G, such
                 that G$_1$ \in F$_1$ and G$_2$ \in F$_2$ and returns 1
                 if V ( G$_1$ ) \cap V ( G$_2$ )= \oslash and 0
                 otherwise. We study the communication complexity of
                 this problem in the two-party model. In particular, we
                 look at pairs of hereditary graph families. We show
                 that the communication complexity of this function,
                 when the two graph families are hereditary, is
                 sublinear if and only if there are finitely many graphs
                 in the intersection of these two families. Then, using
                 concepts from parameterized complexity, we obtain
                 nuanced upper bounds on the communication complexity of
                 GDISJ$_{G, F 1}$, F$_2$ . A concept related to
                 communication protocols is that of a ( F$_1$, F$_2$
                 )-separating family of a graph G. A collection F of
                 subsets of V ( G ) is called a ( F$_1$, F$_2$ )-
                 separating family for G, if for any two vertex disjoint
                 induced subgraphs G$_1$ \in F$_1$, G$_2$ \in F$_2$,
                 there is a set F \in F with V ( G$_1$ ) \subseteq F and
                 V ( G$_2$ ) \cap F = \oslash . Given a graph G on n
                 vertices, for any pair ( F$_1$, F$_2$ ) of hereditary
                 graph families with sublinear communication complexity
                 for GDISJ$_{G, F 1}$, F$_2$, we give an enumeration
                 algorithm that finds a subexponential sized ( F$_1$,
                 F$_2$ )-separating family. In fact, we give an
                 enumeration algorithm that finds a 2$^{o (k)}$ n$^{O
                 (1)}$ sized ( F$_1$, F$_2$ )-separating family, where k
                 denotes the size of a minimum sized set S of vertices
                 such that V ( G )\ S has a bipartition ( V$_1$, V$_2$ )
                 with G [ V$_1$ ] \in F$_1$ and G [ V$_2$ ] \in F$_2$.
                 We exhibit a wide range of applications for these
                 separating families, to obtain combinatorial bounds,
                 enumeration algorithms, as well as exact and FPT
                 algorithms for several problems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Schoenebeck:2019:BWC,
  author =       "Grant Schoenebeck and Biaoshuai Tao",
  title =        "Beyond Worst-case (In)approximability of Nonsubmodular
                 Influence Maximization",
  journal =      j-TOCT,
  volume =       "11",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jun,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3313904",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3313904",
  abstract =     "We consider the problem of maximizing the spread of
                 influence in a social network by choosing a fixed
                 number of initial seeds, formally referred to as the
                 influence maximization problem. It admits a
                 (1-1/e)-factor approximation algorithm if the influence
                 function is submodular. Otherwise, in the worst case,
                 the problem is NP-hard to approximate to within a
                 factor of N$^{1 - \epsilon }$. This article studies
                 whether this worst-case hardness result can be
                 circumvented by making assumptions about either the
                 underlying network topology or the cascade model. All
                 our assumptions are motivated by many real-life social
                 network cascades. First, we present strong
                 inapproximability results for a very restricted class
                 of networks called the (stochastic) hierarchical
                 blockmodel, a special case of the well-studied
                 (stochastic) blockmodel in which relationships between
                 blocks admit a tree structure. We also provide a
                 dynamic-programming-based polynomial time algorithm,
                 which optimally computes a directed variant of the
                 influence maximization problem on hierarchical
                 blockmodel networks. Our algorithm indicates that the
                 inapproximability result is due to the bidirectionality
                 of influence between agent-blocks. Second, we present
                 strong inapproximability results for a class of
                 influence functions that are ``almost'' submodular,
                 called 2-quasi-submodular. Our inapproximability
                 results hold even for any 2-quasi-submodular f fixed in
                 advance. This result also indicates that the
                 ``threshold'' between submodularity and
                 nonsubmodularity is sharp, regarding the
                 approximability of influence maximization.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Bonamy:2019:TLB,
  author =       "Marthe Bonamy and {\l}Ukasz Kowalik and Micha{\l}
                 Pilipczuk and Arkadiusz Soca{\l}a and Marcin Wrochna",
  title =        "Tight Lower Bounds for the Complexity of
                 Multicoloring",
  journal =      j-TOCT,
  volume =       "11",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jun,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3313906",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3313906",
  abstract =     "In the multicoloring problem, also known as ( a: b )-
                 coloring or b-fold coloring, we are given a graph G and
                 a set of a colors, and the task is to assign a subset
                 of b colors to each vertex of G so that adjacent
                 vertices receive disjoint color subsets. This natural
                 generalization of the classic coloring problem (the b
                 =1 case) is equivalent to finding a homomorphism to the
                 Kneser graph KG$_{a, b}$ and gives relaxations
                 approaching the fractional chromatic number. We study
                 the complexity of determining whether a graph has an (
                 a: b )-coloring. Our main result is that this problem
                 does not admit an algorithm with runtime f ( b )c 2$^{o
                 (log b)c n}$ for any computable f(b) unless the
                 Exponential Time Hypothesis (ETH) fails. A ( b +1)$^n$
                 c poly( n )-time algorithm due to Nederlof [33] shows
                 that this is tight. A direct corollary of our result is
                 that the graph homomorphism problem does not admit a
                 2$^{O (n + h)}$ algorithm unless the ETH fails even if
                 the target graph is required to be a Kneser graph. This
                 refines the understanding given by the recent lower
                 bound of Cygan et al. [9]. The crucial ingredient in
                 our hardness reduction is the usage of detecting
                 matrices of Lindstr{\"o}m [28], which is a
                 combinatorial tool that, to the best of our knowledge,
                 has not yet been used for proving complexity lower
                 bounds. As a side result, we prove that the runtime of
                 the algorithms of Abasi et al. [1] and of Gabizon et
                 al. [14] for the r -monomial detection problem are
                 optimal under the ETH.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "13",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Black:2019:MPU,
  author =       "Timothy Black",
  title =        "Monotone Properties of $k$-Uniform Hypergraphs Are
                 Weakly Evasive",
  journal =      j-TOCT,
  volume =       "11",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jun,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3313908",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3313908",
  abstract =     "A Boolean function in n variables is weakly evasive if
                 its decision-tree complexity is \Omega ( n ). By k
                 -graphs, we mean k -uniform hypergraphs. A k-graph
                 property on v vertices is a Boolean function on n
                 =$^v_k$ variables corresponding to the k -subsets of a
                 v -set that is invariant under the v! permutations of
                 the v -set (isomorphisms of k -graphs). In 1976, Rivest
                 and Vuillemin proved that all nonconstant monotone
                 graph properties ( k = 2) are weakly evasive,
                 confirming a conjecture of Aanderaa and Rosenberg in
                 1973. Then, in 2013, Kulkarni, Qiao, and Sun (KQS)
                 proved the analogous result for 3-graphs. We extend
                 these results to k -graphs for every fixed k. From
                 this, we show that monotone Boolean functions invariant
                 under the action of a large primitive group are weakly
                 evasive. Although KQS employ the powerful topological
                 approach of Kahn et al. in 1984 combined with heavy
                 number theory, our argument is elementary and
                 self-contained (modulo some basic group theory).
                 Inspired by the outline of the KQS approach, we
                 formalize the general framework of ``orbit augmentation
                 sequences'' of sets with group actions. We show that a
                 parameter of such sequences, called the spacing, is a
                 lower bound on the decision-tree complexity for any
                 nontrivial monotone property that is \Gamma -invariant
                 for all groups \Gamma involved in the orbit
                 augmentation sequence, assuming all those groups are p
                 -groups. We develop operations on such sequences such
                 as composition and direct product that will provide
                 helpful machinery for our applications. We apply this
                 general technique to k -graphs via certain liftings of
                 k -graphs with wreath product action of p -groups.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "14",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Aviv:2019:ELG,
  author =       "Nir Aviv and Amnon Ta-Shma",
  title =        "On the Entropy Loss and Gap of Condensers",
  journal =      j-TOCT,
  volume =       "11",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jun,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3317691",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/hash.bib;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3317691",
  abstract =     "Many algorithms are proven to work under the
                 assumption that they have access to a source of random,
                 uniformly distributed bits. However, in practice,
                 sources of randomness are often imperfect, giving n
                 random bits that have only k < n min-entropy. The value
                 n --- k is called the entropy gap of the source.
                 Randomness condensers are hash functions that hash any
                 such source to a shorter source with reduced entropy
                 gap g. The goal is to lose as little entropy as
                 possible in this process. Condensers also have an error
                 parameter \epsilon and use a small seed of uniformly
                 distributed bits whose length we desire to minimize as
                 well. In this work, we study the exact dependencies
                 between the different parameters of seeded randomness
                 condensers. We obtain a non-explicit upper bound,
                 showing the existence of condensers with entropy loss
                 log (1+log 1/ \epsilon / g ) + O (1) and seed length
                 log ( n --- k / \epsilon g ) + O (1). In particular,
                 this implies the existence of condensers with O (log 1
                 / \epsilon ) entropy gap and constant entropy loss.
                 This extends (with slightly improved parameters) the
                 non-explicit upper bound for condensers presented in
                 the work of Dodis et al. (2014), which gives condensers
                 with entropy loss at least log log 1 / \epsilon . We
                 also give a non-explicit upper bound for lossless
                 condensers, which have entropy gap g \geq log 1 /
                 \epsilon / \epsilon + O (1) and seed length log ( n ---
                 k / \epsilon $^2$ g ) + O (1). Furthermore, we address
                 an open question raised in (Dodis et al. 2014), where
                 Dodis et al. showed an explicit construction of
                 condensers with constant gap and O (log log 1/ \epsilon
                 ) loss, using seed length O ( n log 1 / \epsilon ). In
                 the same article they improve the seed length to O ( k
                 log k ) and ask whether it can be further improved. In
                 this work, we reduce the seed length of their
                 construction to O (log ( n / \epsilon )log ( k /
                 \&amp;epsiv)) by a simple concatenation. In the
                 analysis, we use and prove a tight equivalence between
                 condensers and extractors with multiplicative error. We
                 note that a similar, but non-tight, equivalence was
                 already proven by Dodis et al. (Dodis et al. 2014)
                 using a weaker variant of extractors called
                 unpredictability extractors. We also remark that this
                 equivalence underlies the work of Ben-Aroya et al.
                 (Ben-Aroya et al. 2016) and later work on explicit
                 two-source extractors, and we believe it is interesting
                 in its own right.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "15",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Aydinlioglu:2019:CAR,
  author =       "Baris Aydinlioglu and Eric Bach",
  title =        "Corrigendum to Affine Relativization: Unifying the
                 Algebrization and Relativization Barriers",
  journal =      j-TOCT,
  volume =       "11",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3317693",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3317693",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "16",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Goldreich:2019:SLT,
  author =       "Oded Goldreich and Tom Gur and Ilan Komargodski",
  title =        "Strong Locally Testable Codes with Relaxed Local
                 Decoders",
  journal =      j-TOCT,
  volume =       "11",
  number =       "3",
  pages =        "17:1--17:??",
  month =        jun,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3319907",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3319907",
  abstract =     "Locally testable codes (LTCs) are error-correcting
                 codes that admit very efficient codeword tests. An LTC
                 is said to be strong if it has a proximity-oblivious
                 tester, that is, a tester that makes only a constant
                 number of queries and rejects non-codewords with a
                 probability that depends solely on their distance from
                 the code. Locally decodable codes (LDCs) are
                 complementary to LTCs. While the latter allow for
                 highly efficient rejection of strings that are far from
                 being codewords, LDCs allow for highly efficient
                 recovery of individual bits of the information that is
                 encoded in strings that are close to being codewords.
                 Constructions of strong-LTCs with nearly-linear length
                 are known, but the existence of a constant-query LDC
                 with polynomial length is a major open problem. In an
                 attempt to bypass this barrier, Ben-Sasson et al.
                 (SICOMP 2006) introduced a natural relaxation of local
                 decodability, called relaxed-LDCs. This notion requires
                 local recovery of nearly all individual
                 information-bits, yet allows for recovery-failure (but
                 not error) on the rest. Ben-Sasson et al. constructed a
                 constant-query relaxed-LDC with nearly-linear length
                 (i.e., length k$^{1 + \alpha }$ for an arbitrarily
                 small constant \alpha > 0, where k is the dimension of
                 the code). This work focuses on obtaining strong
                 testability and relaxed decodability simultaneously. We
                 construct a family of binary linear codes of
                 nearly-linear length that are both strong-LTCs (with
                 one-sided error) and constant-query relaxed-LDCs. This
                 improves upon the previously known constructions, which
                 either obtain only weak LTCs or require polynomial
                 length. Our construction heavily relies on tensor codes
                 and PCPs. In particular, we provide strong canonical
                 PCPs of proximity for membership in any linear code
                 with constant rate and relative distance. Loosely
                 speaking, these are PCPs of proximity wherein the
                 verifier is proximity oblivious (similarly to
                 strong-LTCs) and every valid statement has a unique
                 canonical proof. Furthermore, the verifier is required
                 to reject non-canonical proofs (even for valid
                 statements). As an application, we improve the best
                 known separation result between the complexity of
                 decision and verification in the setting of property
                 testing.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "17",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Agrawal:2019:SCU,
  author =       "Akanksha Agrawal and Daniel Lokshtanov and Saket
                 Saurabh and Meirav Zehavi",
  title =        "Split Contraction: The Untold Story",
  journal =      j-TOCT,
  volume =       "11",
  number =       "3",
  pages =        "18:1--18:??",
  month =        jun,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3319909",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:10 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3319909",
  abstract =     "The edit operation that contracts edges, which is a
                 fundamental operation in the theory of graph minors,
                 has recently gained substantial scientific attention
                 from the viewpoint of Parameterized Complexity. In this
                 article, we examine an important family of graphs,
                 namely, the family of split graphs, which in the
                 context of edge contractions is proven to be
                 significantly less obedient than one might expect.
                 Formally, given a graph G and an integer k, S PLIT
                 CONTRACTION asks whether there exists X \subseteq E ( G
                 ) such that G / X is a split graph and | X | \leq k.
                 Here, G / X is the graph obtained from G by contracting
                 edges in X. Guo and Cai [Theoretical Computer Science,
                 2015] claimed that SPLIT CONTRACTION is fixed-parameter
                 tractable. However, our findings are different. We show
                 that SPLIT CONTRACTION, despite its deceptive
                 simplicity, is W[1]-hard. Our main result establishes
                 the following conditional lower bound: Under the
                 Exponential Time Hypothesis, SPLIT CONTRACTION cannot
                 be solved in time 2$^o$ (l$^2$ )$ \cdot $ n$^O$ (1),
                 where l is the vertex cover number of the input graph.
                 We also verify that this lower bound is essentially
                 tight. To the best of our knowledge, this is the first
                 tight lower bound of the form 2$^o$ (l$^2$ )$ \cdot $
                 n$^{O (1)}$ for problems parameterized by the vertex
                 cover number of the input graph. In particular, our
                 approach to obtain this lower bound borrows the notion
                 of harmonious coloring from Graph Theory, and might be
                 of independent interest.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "18",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Viola:2019:CEP,
  author =       "Emanuele Viola",
  title =        "Constant-Error Pseudorandomness Proofs from Hardness
                 Require Majority",
  journal =      j-TOCT,
  volume =       "11",
  number =       "4",
  pages =        "19:1--19:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3322815",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:11 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/prng.bib;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3322815",
  abstract =     "Research in the 1980s and 1990s showed how to
                 construct a pseudorandom generator from a function that
                 is hard to compute on more than 99\% of the inputs. A
                 more recent line of works showed, however, that if the
                 generator has small error, then the proof of
                 correctness cannot be implemented in subclasses of
                 TC$^0$, and hence the construction cannot be applied to
                 the known hardness results. This article considers a
                 typical class of pseudorandom generator constructions,
                 and proves an analogous result for the case of large
                 error.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "19",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Haviv:2019:MFS,
  author =       "Ishay Haviv",
  title =        "On Minrank and Forbidden Subgraphs",
  journal =      j-TOCT,
  volume =       "11",
  number =       "4",
  pages =        "20:1--20:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3322817",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:11 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3322817",
  abstract =     "The minrank over a field $F$ of a graph $G$ on the
                 vertex set $ \{ 1, 2, \ldots, n \} $ is the minimum
                 possible rank of a matrix $ M \in F^{n \times n}$ such
                 that $ M_{i, i} \neq 0$ for every $i$, and $ M_{i, j} =
                 0$ for every distinct non-adjacent vertices $i$ and $j$
                 in $G$. For an integer $n$, a graph $H$, and a field
                 $F$, let $ g(n, H, F)$ denote the maximum possible
                 minrank over $F$ of an $n$-vertex graph whose
                 complement contains no copy of $H$. In this article, we
                 study this quantity for various graphs $H$ and fields
                 $F$. For finite fields, we prove by a probabilistic
                 argument a general lower bound on $ g(n, H, F)$, which
                 yields a nearly tight bound of $ \Omega (\sqrt n / l o
                 g n)$ for the triangle $ H = K_3$. For the real field,
                 we prove by an explicit construction that for every
                 non-bipartite graph $H$, $ g(n, H, R) \geq n^\delta $
                 for some $ \delta = \delta (H) > 0$. As a by-product of
                 this construction, we disprove a conjecture of
                 Codenotti et al. [11]. The results are motivated by
                 questions in information theory, circuit complexity,
                 and geometry.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "20",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Boppana:2019:BIV,
  author =       "Ravi Boppana and Johan H{\aa}stad and Chin Ho Lee and
                 Emanuele Viola",
  title =        "Bounded Independence versus Symmetric Tests",
  journal =      j-TOCT,
  volume =       "11",
  number =       "4",
  pages =        "21:1--21:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3337783",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:11 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3337783",
  abstract =     "For a test T \subseteq {0, 1}$^n$, define k$^*$ ( T )
                 to be the maximum k such that there exists a k -wise
                 uniform distribution over {0, 1}$^n$ whose support is a
                 subset of T. For H$_t$ = { x \in 0, 1$^n$: | \Sigma
                 $_i$ x$_i$ --- n /2| $<$ = t }, we prove k$^*$ ( H$_t$
                 ) = \Theta ( t$^2$ / n + 1). For S$_{m, c}$ = { x \in
                 0, 1$^n$: \Sigma $_i$ x$_i$ \equiv c (mod m )}, we
                 prove that k$^*$ ( S$_{m, c}$ ) = \Theta ( n / m$^2$ ).
                 For some k = O ( n / m ) we also show that any k -wise
                 uniform distribution puts probability mass at most 1/ m
                 + 1/100 over S$_{m, c}$. Finally, for any fixed odd m
                 we show that there is an integer k = (1 --- \Omega (1))
                 n such that any k -wise uniform distribution lands in T
                 with probability exponentially close to | S$_{m, c}$
                 |/2$^n$; and this result is false for any even m.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "21",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Oliveira:2019:RMB,
  author =       "Mateus {De Oliveira Oliveira} and Pavel Pudl{\'a}k",
  title =        "Representations of Monotone {Boolean} Functions by
                 Linear Programs",
  journal =      j-TOCT,
  volume =       "11",
  number =       "4",
  pages =        "22:1--22:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3337787",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:11 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3337787",
  abstract =     "We introduce the notion of monotone linear programming
                 circuits (MLP circuits), a model of computation for
                 partial Boolean functions. Using this model, we prove
                 the following results.$^1$ (1) MLP circuits are
                 superpolynomially stronger than monotone Boolean
                 circuits. (2) MLP circuits are exponentially stronger
                 than monotone span programs over the reals. (3) MLP
                 circuits can be used to provide monotone feasibility
                 interpolation theorems for Lov{\'a}sz-Schrijver proof
                 systems and for mixed Lov{\'a}sz-Schrijver proof
                 systems. (4) The Lov{\'a}sz-Schrijver proof system
                 cannot be polynomially simulated by the cutting planes
                 proof system. Finally, we establish connections between
                 the problem of proving lower bounds for the size of MLP
                 circuits and the field of extension complexity of
                 polytopes.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "22",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Goldberg:2019:APC,
  author =       "Leslie Ann Goldberg and Mark Jerrum",
  title =        "Approximating Pairwise Correlations in the {Ising}
                 Model",
  journal =      j-TOCT,
  volume =       "11",
  number =       "4",
  pages =        "23:1--23:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3337785",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:11 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3337785",
  abstract =     "In the Ising model, we consider the problem of
                 estimating the covariance of the spins at two specified
                 vertices. In the ferromagnetic case, it is easy to
                 obtain an additive approximation to this covariance by
                 repeatedly sampling from the relevant Gibbs
                 distribution. However, we desire a multiplicative
                 approximation, and it is not clear how to achieve this
                 by sampling, given that the covariance can be
                 exponentially small. Our main contribution is a fully
                 polynomial time randomised approximation scheme (FPRAS)
                 for the covariance in the ferromagnetic case. We also
                 show that the restriction to the ferromagnetic case is
                 essential-there is no FPRAS for multiplicatively
                 estimating the covariance of an antiferromagnetic Ising
                 model unless RP = \#P. In fact, we show that even
                 determining the sign of the covariance is \#P-hard in
                 the antiferromagnetic case.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "23",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Blais:2019:TJT,
  author =       "Eric Blais and Cl{\'e}ment L. Canonne and Talya Eden
                 and Amit Levi and Dana Ron",
  title =        "Tolerant Junta Testing and the Connection to
                 Submodular Optimization and Function Isomorphism",
  journal =      j-TOCT,
  volume =       "11",
  number =       "4",
  pages =        "24:1--24:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3337789",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:11 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3337789",
  abstract =     "A function $ f : \{ - 1, 1 \}^n \to \{ - 1, 1 \} $ is
                 a $k$-junta if it depends on at most $k$ of its
                 variables. We consider the problem of tolerant testing
                 of $k$ juntas, where the testing algorithm must accept
                 any function that is $ \epsilon $-close to some
                 $k$-junta and reject any function that is $ \epsilon
                 '$-far from every $ k'$-junta for some $ \epsilon ' =
                 O(\epsilon)$ and $ k' = O(k)$. Our first result is an
                 algorithm that solves this problem with query
                 complexity polynomial in $k$ and $ 1 / \epsilon $. This
                 result is obtained via a new polynomial-time
                 approximation algorithm for submodular function
                 minimization (SFM) under large cardinality constraints,
                 which holds even when only given an approximate oracle
                 access to the function. Our second result considers the
                 case where $ k' = k$. We show how to obtain a smooth
                 tradeoff between the amount of tolerance and the query
                 complexity in this setting. Specifically, we design an
                 algorithm that, given $ \rho \in (0, 1)$, accepts any
                 function that is $ \epsilon \rho / 16$-close to some
                 $k$-junta and rejects any function that is $ \epsilon
                 $-far from every $k$-junta. The query complexity of the
                 algorithm is $ O(k \log k / \epsilon \rho (1 -
                 \rho)^k)$. Finally, we show how to apply the second
                 result to the problem of tolerant isomorphism testing
                 between two unknown Boolean functions $f$ and $g$. We
                 give an algorithm for this problem whose query
                 complexity only depends on the (unknown) smallest $k$
                 such that either $f$ or $g$ is close to being a
                 $k$-junta.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "24",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Scheder:2019:PMB,
  author =       "Dominik Scheder",
  title =        "{PPSZ} for $ k \geq 5 $: More Is Better",
  journal =      j-TOCT,
  volume =       "11",
  number =       "4",
  pages =        "25:1--25:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3349613",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:11 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3349613",
  abstract =     "We show that for k \geq 5, the PPSZ algorithm for k
                 -SAT runs exponentially faster if there is an
                 exponential number of satisfying assignments. More
                 precisely, we show that for every k \geq 5, there is a
                 strictly increasing function f: [0,1] -{$>$} R with f
                 (0) = 0 that has the following property. If F is a k
                 -CNF formula over n variables and |sat(F)| = 2$^{
                 \delta n}$ solutions, then PPSZ finds a satisfying
                 assignment with probability at least 2$^{- c k n - - -
                 o (n) + f (\delta) n}$. Here, 2$^{- c k n - o (n)}$ is
                 the success probability proved by Paturi et al. [11]
                 for k -CNF formulas with a unique satisfying
                 assignment. Our proof rests on a combinatorial lemma:
                 given a set S \subseteq { 0,1}$^n$, we can partition {
                 0,1}$^n$ into subcubes such that each subcube B
                 contains exactly one element of S. Such a partition B
                 induces a distribution on itself, via Pr [ B ] = |B| /
                 2$^n$ for each B \in B. We are interested in partitions
                 that induce a distribution of high entropy. We show
                 that, in a certain sense, the worst case (min$_{S : |S|
                 = s}$ max$_B$ H ( B )) is achieved if S is a Hamming
                 ball. This lemma implies that every set S of
                 exponential size allows a partition of linear entropy.
                 This in turn leads to an exponential improvement of the
                 success probability of PPSZ.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "25",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Beyersdorff:2019:NRB,
  author =       "Olaf Beyersdorff and Leroy Chew and Mikol{\'a}s
                 Janota",
  title =        "New Resolution-Based {QBF} Calculi and Their Proof
                 Complexity",
  journal =      j-TOCT,
  volume =       "11",
  number =       "4",
  pages =        "26:1--26:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3352155",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:11 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3352155",
  abstract =     "Modern QBF solvers typically use two different
                 paradigms, conflict-driven clause learning (CDCL)
                 solving or expansion solving. Proof systems for
                 quantified Boolean formulas (QBFs) provide a
                 theoretical underpinning for the performance of these
                 solvers, with Q-Resolution and its extensions relating
                 to CDCL solving and \forall Exp+Res relating to
                 expansion solving. This article defines two novel
                 calculi, which are resolution-based and enable
                 unification of some of the principal existing
                 resolution-based QBF calculi, namely Q-resolution,
                 long-distance Q-resolution and the expansion-based
                 calculus \forall Exp+Res. However, the proof complexity
                 of the QBF resolution proof systems is currently not
                 well understood. In this article, we completely
                 determine the relative power of the main QBF resolution
                 systems, settling in particular the relationship
                 between the two different types of resolution-based QBF
                 calculi: proof systems for CDCL-based solvers
                 (Q-resolution, universal, and long-distance
                 Q-resolution) and proof systems for expansion-based
                 solvers ( \forall Exp+Res and its generalizations
                 IR-calc and IRM-calc defined here). The most
                 challenging part of this comparison is to exhibit hard
                 formulas that underlie the exponential separations of
                 the aforementioned proof systems. To this end, we
                 exhibit a new and elegant proof technique for showing
                 lower bounds in QBF proof systems based on strategy
                 extraction. This technique provides a direct transfer
                 of circuit lower bounds to lengths-of-proofs lower
                 bounds. We use our method to show the hardness of a
                 natural class of parity formulas for Q-resolution and
                 universal Q-resolution. Variants of the formulas are
                 hard for even stronger systems such as long-distance
                 Q-resolution and extensions. With a completely
                 different and novel counting argument, we show the
                 hardness of the prominent formulas of Kleine B{\"u}ning
                 et al. [51] for the strong expansion-based calculus
                 IR-calc.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "26",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Allender:2019:NIN,
  author =       "Eric Allender and Shuichi Hirahara",
  title =        "New Insights on the (Non-){Hardness} of Circuit
                 Minimization and Related Problems",
  journal =      j-TOCT,
  volume =       "11",
  number =       "4",
  pages =        "27:1--27:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3349616",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:11 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3349616",
  abstract =     "The Minimum Circuit Size Problem (MCSP) and a related
                 problem (MKTP) that deal with time-bounded Kolmogorov
                 complexity are prominent candidates for NP-intermediate
                 status. We show that, under very modest cryptographic
                 assumptions (such as the existence of one-way
                 functions), the problem of approximating the minimum
                 circuit size (or time-bounded Kolmogorov complexity)
                 within a factor of n$^{1 - o (1)}$ is indeed
                 NP-intermediate. To the best of our knowledge, these
                 problems are the first natural NP-intermediate problems
                 under the existence of an arbitrary one-way function.
                 Our technique is quite general; we use it also to show
                 that approximating the size of the largest clique in a
                 graph within a factor of n$^{1 - o (1)}$ is also
                 NP-intermediate unless NP \subseteq P/poly. We also
                 prove that MKTP is hard for the complexity class DET
                 under non-uniform NC$^0$ reductions. This is
                 surprising, since prior work on MCSP and MKTP had
                 highlighted weaknesses of ``local'' reductions such as
                 \leq $^{NC 0}$ $_m$. We exploit this local reduction to
                 obtain several new consequences: --- MKTP is not in
                 AC$^0$ [ p ]. --- Circuit size lower bounds are
                 equivalent to hardness of a relativized version
                 MKTP$^A$ of MKTP under a class of uniform AC$^0$
                 reductions, for a significant class of sets A. ---
                 Hardness of MCSP$^A$ implies hardness of MCSP$^A$ for a
                 significant class of sets A. This is the first result
                 directly relating the complexity of MCSP$^A$ and
                 MCSP$^A$, for any A.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "27",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Jansen:2019:OSS,
  author =       "Bart M. P. Jansen and Astrid Pieterse",
  title =        "Optimal Sparsification for Some Binary {CSPs} Using
                 Low-Degree Polynomials",
  journal =      j-TOCT,
  volume =       "11",
  number =       "4",
  pages =        "28:1--28:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3349618",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Oct 22 10:25:11 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3349618",
  abstract =     "This article analyzes to what extent it is possible to
                 efficiently reduce the number of clauses in NP-hard
                 satisfiability problems without changing the answer.
                 Upper and lower bounds are established using the
                 concept of kernelization. Existing results show that if
                 NP $ \not \subseteq $ coNP/poly, no efficient
                 preprocessing algorithm can reduce $n$-variable
                 instances of cnf-sat with d literals per clause to
                 equivalent instances with O ( n$^{d - \epsilon }$ )
                 bits for any \epsilon > 0. For the Not-All-Equal sat
                 problem, a compression to size {\~O} ( n$^{d - 1}$ )
                 exists. We put these results in a common framework by
                 analyzing the compressibility of CSPs with a binary
                 domain. We characterize constraint types based on the
                 minimum degree of multivariate polynomials whose roots
                 correspond to the satisfying assignments, obtaining
                 (nearly) matching upper and lower bounds in several
                 settings. Our lower bounds show that not just the
                 number of constraints, but also the encoding size of
                 individual constraints plays an important role. For
                 example, for Exact Satisfiability with unbounded clause
                 length it is possible to efficiently reduce the number
                 of constraints to n +1, yet no polynomial-time
                 algorithm can reduce to an equivalent instance with O (
                 n$^{2 - \epsilon }$ ) bits for any \epsilon > 0, unless
                 NP \subseteq coNP/poly.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "28",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{ODonnell:2020:ENE,
  author =       "Ryan O'Donnell",
  title =        "Editorial from the New {Editor-in-Chief}",
  journal =      j-TOCT,
  volume =       "12",
  number =       "1",
  pages =        "1e:1--1e:1",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3381517",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Feb 26 07:25:37 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3381517",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1e",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Ghosh:2020:FAE,
  author =       "Arijit Ghosh and Sudeshna Kolay and Gopinath Mishra",
  title =        "{FPT} Algorithms for Embedding into Low-Complexity
                 Graphic Metrics",
  journal =      j-TOCT,
  volume =       "12",
  number =       "1",
  pages =        "1:1--1:41",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3369933",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Feb 26 07:25:37 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3369933",
  abstract =     "Given metric spaces $ (X, D_X) $ and $ (Y, D_Y) $, an
                 embedding $ F : X \to Y $ is an injective mapping from
                 X to Y. Expansion e_F and contraction c_F of an
                 embedding $ F : X \to Y $ are defined as $ e_F = m a
                 x_{x_{; 1}, x_2 (\neq x1) \in X} D_Y(F(x_1), F(x_2)) /
                 D_X(x_1, x_2) $ and $ c_F = m a x_{x_1, x_2 (\neq x_1)
                 \in X} D_X(x_1, x_2) / D_Y(F(x_1), F(x_2)) $,
                 respectively, and distortion $ d_F $ is defined as $
                 d_F = e_F \cdot c_F $. Observe that $ d_F \geq 1 $. An
                 embedding $ F : X \to Y $ is noncontracting if $ c_F
                 \leq 1 $. When $ d = 1 $, then $F$ is isometry.\par

                 The Metric Embedding problem takes as input two metric
                 spaces $ (X, D_X)$ and $ (Y, D_Y)$, and a positive
                 integer $d$. The objective is to determine whether
                 there is an embedding $ F : X \to Y$ such that $ d_F
                 \leq d$. Such an embedding is called a distortion $d$
                 embedding. The bijective Metric Embedding problem is a
                 special case of the Metric Embedding problem where $ X
                 = Y $. In parameterized complexity, the Metric
                 Embedding problem, in full generality, is known to be
                 $W$-hard and, therefore, not expected to have an FPT
                 algorithm. In this article, we consider the Gen-Graph
                 Metric Embedding problem, where the two metric spaces
                 are graph metrics. We explore the extent of
                 tractability of the problem in the parameterized
                 complexity setting. We determine whether an unweighted
                 graph metric $ (G, D_G)$ can be embedded, or
                 bijectively embedded, into another unweighted graph
                 metric $ (H, D_H)$, where the graph $H$ has low
                 structural complexity. For example, $H$ is a cycle, or
                 $H$ has bounded treewidth or bounded connected
                 treewidth. The parameters for the algorithms are chosen
                 from the upper bound $d$ on distortion, bound $ \Delta
                 $ on the maximum degree of $H$, treewidth $ \alpha $ of
                 $H$, and connected treewidth $ \alpha_c$ of
                 $H$.\par

                 Our general approach to these problems can be
                 summarized as trying to understand the behavior of the
                 shortest paths in $G$ under a low-distortion embedding
                 into $H$, and the structural relation the mapping of
                 these paths has to shortest paths in $H$.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Kayal:2020:SBR,
  author =       "Neeraj Kayal and Vineet Nair and Chandan Saha",
  title =        "Separation Between Read-once Oblivious Algebraic
                 Branching Programs {(ROABPs)} and Multilinear
                 Depth-three Circuits",
  journal =      j-TOCT,
  volume =       "12",
  number =       "1",
  pages =        "2:1--2:27",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3369928",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Feb 26 07:25:37 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3369928",
  abstract =     "We show an exponential separation between two
                 well-studied models of algebraic computation, namely,
                 read-once oblivious algebraic branching programs
                 (ROABPs) and multilinear depth-three circuits. In
                 particular, we show the following: (1) There exists an
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Hemaspaandra:2020:SVD,
  author =       "Edith Hemaspaandra and Lane A. Hemaspaandra and Curtis
                 Menton",
  title =        "Search versus Decision for Election Manipulation
                 Problems",
  journal =      j-TOCT,
  volume =       "12",
  number =       "1",
  pages =        "3:1--3:42",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3369937",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Feb 26 07:25:37 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3369937",
  abstract =     "Most theoretical definitions about the complexity of
                 manipulating elections focus on the decision problem of
                 recognizing which instances can be successfully
                 manipulated rather than the search problem of finding
                 the successful manipulative actions. Since \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Hermo:2020:ELB,
  author =       "Montserrat Hermo and Ana Ozaki",
  title =        "Exact Learning: On the Boundary between {Horn} and
                 {CNF}",
  journal =      j-TOCT,
  volume =       "12",
  number =       "1",
  pages =        "4:1--4:25",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3369930",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Feb 26 07:25:37 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3369930",
  abstract =     "A major problem in computational learning theory is
                 whether the class of formulas in conjunctive normal
                 form (CNF) is efficiently learnable. Although it is
                 known that this class cannot be polynomially learned
                 using either membership or equivalence \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Kumar:2020:PBD,
  author =       "Mrinal Kumar",
  title =        "On the Power of Border of Depth-3 Arithmetic
                 Circuits",
  journal =      j-TOCT,
  volume =       "12",
  number =       "1",
  pages =        "5:1--5:8",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3371506",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Feb 26 07:25:37 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3371506",
  abstract =     "We show that over the field of complex numbers, every
                 homogeneous polynomial of degree d can be approximated
                 (in the border complexity sense) by a depth-3
                 arithmetic circuit of top fan-in at most 2. This is
                 quite surprising, since there exist \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Fernau:2020:PMV,
  author =       "Henning Fernau and Florin Manea and Robert Mercas and
                 Markus L. Schmid",
  title =        "Pattern Matching with Variables: Efficient Algorithms
                 and Complexity Results",
  journal =      j-TOCT,
  volume =       "12",
  number =       "1",
  pages =        "6:1--6:37",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3369935",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Feb 26 07:25:37 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3369935",
  abstract =     "A pattern $ \alpha $ (i.e., a string of variables and
                 terminals) matches a word $w$, if $w$ can be obtained
                 by uniformly replacing the variables of $ \alpha $ by
                 terminal words. The respective matching problem, i.e.,
                 deciding whether or not a given pattern matches a given
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Goldenberg:2020:TGD,
  author =       "Elazar Goldenberg and Karthik C. S.",
  title =        "Toward a General Direct Product Testing Theorem",
  journal =      j-TOCT,
  volume =       "12",
  number =       "1",
  pages =        "7:1--7:18",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3369939",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Wed Feb 26 07:25:37 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3369939",
  abstract =     "The direct product encoding of a string $ a \in \{ 0,
                 1 \}^n $ on an underlying domain $ V \subseteq
                 (k^{[n]}) $ is a function DP$_V(a)$ that gets as input
                 a set $ S \in V$ and outputs a restricted to $S$. In
                 the direct product testing problem, we are given a
                 function $ F : V \to \{ 0, 1 \}^k$, and our goal is to
                 test whether $F$ is close to a direct product encoding
                 that is, whether there exists some $ a \in \{ 0, 1
                 \}^n$ such that on most sets $S$, we have $ F(S) = D
                 P_V(a)(S)$. A natural test is as follows: select a pair
                 $ (S, S) \in V$ according to some underlying
                 distribution over $ V \times V$, query $F$ on this
                 pair, and check for consistency on their intersection.
                 Note that the preceding distribution may be viewed as a
                 weighted graph over the vertex set $V$ and is referred
                 to as a test graph.\par

                 The testability of direct products was studied over
                 various domains and test graphs: Dinur and Steurer (CCC
                 14) analyzed it when $V$ equals the $k$-th slice of the
                 Boolean hypercube and the test graph is a member of the
                 Johnson graph family. Dinur and Kaufman (FOCS 17)
                 analyzed it for the case where V is the set of faces of
                 a Ramanujan complex, where in this case $ V = O_k(n)$.
                 In this article, we study the testability of direct
                 products in a general setting, addressing the question:
                 what properties of the domain and the test graph allow
                 one to prove a direct product testing
                 theorem?\par

                 Towards this goal, we introduce the notion of
                 coordinate expansion of a test graph. Roughly speaking,
                 a test graph is a coordinate expander if it has global
                 and local expansion, and has certain nice intersection
                 properties on sampling. We show that whenever the test
                 graph has coordinate expansion, it admits a direct
                 product testing theorem. Additionally, for every $k$
                 and $n$, we provide a direct product domain $ V
                 \subseteq (k^n)$ of size $n$, called the sliding window
                 domain, for which we prove direct product
                 testability.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Gurjar:2020:PBO,
  author =       "Rohit Gurjar and Ben Lee Volk",
  title =        "Pseudorandom Bits for Oblivious Branching Programs",
  journal =      j-TOCT,
  volume =       "12",
  number =       "2",
  pages =        "8:1--8:12",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3378663",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 19 10:06:28 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/prng.bib;
                 https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3378663",
  abstract =     "We construct a pseudorandom generator that fools
                 known-order read-$k$ oblivious branching programs and,
                 more generally, any linear length oblivious branching
                 program. For polynomial width branching programs, the
                 seed lengths in our constructions are $ {\~ O}(n^{1 - 1
                 / 2k - 1})$ (for the read-$k$ case) and $ O(n / \log
                 \log n)$ (for the linear length case). Previously, the
                 best construction for these models required seed length
                 $ (1 \Omega (1))n$.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Galesi:2020:CRG,
  author =       "Nicola Galesi and Navid Talebanfard and Jacobo
                 Tor{\'a}n",
  title =        "Cops--Robber Games and the Resolution of {Tseitin}
                 Formulas",
  journal =      j-TOCT,
  volume =       "12",
  number =       "2",
  pages =        "9:1--9:22",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3378667",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 19 10:06:28 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3378667",
  abstract =     "We characterize several complexity measures for the
                 resolution of Tseitin formulas in terms of a two person
                 cop-robber game. Our game is a slight variation of the
                 one Seymour and Thomas used in order to characterize
                 the tree-width parameter. For any \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Beyersdorff:2020:RHQ,
  author =       "Olaf Beyersdorff and Luke Hinde and J{\'a}n Pich",
  title =        "Reasons for Hardness in {QBF} Proof Systems",
  journal =      j-TOCT,
  volume =       "12",
  number =       "2",
  pages =        "10:1--10:27",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3378665",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 19 10:06:28 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3378665",
  abstract =     "We aim to understand inherent reasons for lower bounds
                 for QBF proof systems and revisit and compare two
                 previous approaches in this direction. The first of
                 these relates size lower bounds for strong QBF Frege
                 systems to circuit lower bounds via \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Bulatov:2020:ACC,
  author =       "Andrei A. Bulatov and Stanislav Zivn{\'y}",
  title =        "Approximate Counting {CSP} Seen from the Other Side",
  journal =      j-TOCT,
  volume =       "12",
  number =       "2",
  pages =        "11:1--11:19",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3389390",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 19 10:06:28 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3389390",
  abstract =     "In this article, we study the complexity of counting
                 Constraint Satisfaction Problems (CSPs) of the form
                 \#CSP( C, -), in which the goal is, given a relational
                 structure A from a class C of structures and an
                 arbitrary structure B, to find the number of \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Rosenbaum:2020:BGE,
  author =       "David J. Rosenbaum",
  title =        "Beating the Generator-Enumeration Bound for
                 Solvable-Group Isomorphism",
  journal =      j-TOCT,
  volume =       "12",
  number =       "2",
  pages =        "12:1--12:18",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3389396",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 19 10:06:28 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3389396",
  abstract =     "We consider the isomorphism problem for groups
                 specified by their multiplication tables. Until
                 recently, the best published bound for the worst-case
                 was achieved by the n$^{log}$ p$^{n + O (1)}$
                 generator-enumeration algorithm where n is the order of
                 the group and \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Lagerkvist:2020:SSC,
  author =       "Victor Lagerkvist and Magnus Wahlstr{\"o}m",
  title =        "Sparsification of {SAT} and {CSP} Problems via
                 Tractable Extensions",
  journal =      j-TOCT,
  volume =       "12",
  number =       "2",
  pages =        "13:1--13:29",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3389411",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 19 10:06:28 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3389411",
  abstract =     "Unlike polynomial kernelization in general, for which
                 many non-trivial results and methods exist, only few
                 non-trival algorithms are known for polynomial-time
                 sparsification. Furthermore, excepting problems on
                 restricted inputs (such as graph problems \ldots{}).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "13",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Watson:2020:QSM,
  author =       "Thomas Watson",
  title =        "Quadratic Simulations of {Merlin--Arthur} Games",
  journal =      j-TOCT,
  volume =       "12",
  number =       "2",
  pages =        "14:1--14:11",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3389399",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue May 19 10:06:28 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3389399",
  abstract =     "The known proofs of MA $ \subseteq $ PP incur a
                 quadratic overhead in the running time. We prove that
                 this quadratic overhead is necessary for black-box
                 simulations; in particular, we obtain an oracle
                 relative to which MA-TIME($t$) $ \not \subseteq $
                 P-TIME($ o(t^2)$). We also show that 2-sided-error
                 Merlin Arthur games can be simulated by 1-sided-error
                 Arthur Merlin games with quadratic overhead. We also
                 present a simple, query complexity based proof
                 (provided by Mika G{\"o}{\"o}s) that there is an oracle
                 relative to which MA $ \not \subseteq $ NPBPP (which
                 was previously known to hold by a proof using
                 generics).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "14",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Focke:2020:CAC,
  author =       "Jacob Focke and Leslie Ann Goldberg and Stanislav
                 Zivn{\'y}",
  title =        "The Complexity of Approximately Counting Retractions",
  journal =      j-TOCT,
  volume =       "12",
  number =       "3",
  pages =        "15:1--15:43",
  month =        jul,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3397472",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Jul 24 13:21:36 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3397472",
  abstract =     "Let G be a graph that contains an induced subgraph H.
                 A retraction from G to H is a homomorphism from G to H
                 that is the identity function on H. Retractions are
                 very well studied: Given H, the complexity of deciding
                 whether there is a retraction from an \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "15",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Chiesa:2020:TLA,
  author =       "Alessandro Chiesa and Peter Manohar and Igor Shinkar",
  title =        "Testing Linearity against Non-signaling Strategies",
  journal =      j-TOCT,
  volume =       "12",
  number =       "3",
  pages =        "16:1--16:51",
  month =        jul,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3397474",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Jul 24 13:21:36 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3397474",
  abstract =     "Non-signaling strategies are collections of
                 distributions with certain non-local correlations. They
                 have been studied in physics as a strict generalization
                 of quantum strategies to understand the power and
                 limitations of nature's apparent non-locality.
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "16",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Jukna:2020:CFD,
  author =       "Stasys Jukna",
  title =        "Coin Flipping in Dynamic Programming Is Almost
                 Useless",
  journal =      j-TOCT,
  volume =       "12",
  number =       "3",
  pages =        "17:1--17:26",
  month =        jul,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3397476",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Jul 24 13:21:36 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3397476",
  abstract =     "We consider probabilistic circuits working over the
                 real numbers and using arbitrary semialgebraic
                 functions of bounded description complexity as gates.
                 In particular, such circuits can use all arithmetic
                 operations (+, -, $ \times $, $ \div $), optimization
                 operations (min and max), conditional branching
                 (if-then-else), and many more. We show that
                 probabilistic circuits using any of these operations as
                 gates can be simulated by deterministic circuits with
                 only about a quadratical blowup in size. A slightly
                 larger blowup in circuit size is also shown when
                 derandomizing approximating circuits. The algorithmic
                 consequence, motivating the title, is that randomness
                 cannot substantially speed up dynamic programming
                 algorithms",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "17",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Rahman:2020:CUC,
  author =       "Md Lutfar Rahman and Thomas Watson",
  title =        "Complexity of Unordered {CNF} Games",
  journal =      j-TOCT,
  volume =       "12",
  number =       "3",
  pages =        "18:1--18:18",
  month =        jul,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3397478",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Jul 24 13:21:36 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3397478",
  abstract =     "The classic TQBF problem is to determine who has a
                 winning strategy in a game played on a given
                 conjunctive normal form formula (CNF), where the two
                 players alternate turns picking truth values for the
                 variables in a given order, and the winner is
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "18",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Knop:2020:TCL,
  author =       "Dusan Knop and Micha{\l} Pilipczuk and Marcin
                 Wrochna",
  title =        "Tight Complexity Lower Bounds for Integer Linear
                 Programming with Few Constraints",
  journal =      j-TOCT,
  volume =       "12",
  number =       "3",
  pages =        "19:1--19:19",
  month =        jul,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3397484",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Jul 24 13:21:36 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3397484",
  abstract =     "We consider the standard ILP Feasibility problem:
                 given an integer linear program of the form $ A x = b
                 $, $ x \geq 0 $, where $A$ is an integer matrix with
                 $k$ rows and $l$ columns, $x$ is a vector of $l$
                 variables, and $b$ is a vector of $k$ integers, we ask
                 whether there exists $ x \in N \ell $ that satisfies $
                 A x = b$. Each row of $A$ specifies one linear
                 constraint on $x$; our goal is to study the complexity
                 of ILP Feasibility when both $k$, the number of
                 constraints, and $ ||A||_\infty $, the largest absolute
                 value of an entry in $A$, are small.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "19",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Goos:2020:LBS,
  author =       "Mika G{\"o}{\"o}s and Thomas Watson",
  title =        "A Lower Bound for Sampling Disjoint Sets",
  journal =      j-TOCT,
  volume =       "12",
  number =       "3",
  pages =        "20:1--20:13",
  month =        jul,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3404858",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Jul 24 13:21:36 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3404858",
  abstract =     "Suppose Alice and Bob each start with private
                 randomness and no other input, and they wish to engage
                 in a protocol in which Alice ends up with a set $ x
                 \subseteq [n] $ and Bob ends up with a set $ y
                 \subseteq [n] $, such that $ (x, y) $ is uniformly
                 distributed over all pairs of \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "20",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Cheraghchi:2020:CLB,
  author =       "Mahdi Cheraghchi and Valentine Kabanets and Zhenjian
                 Lu and Dimitrios Myrisiotis",
  title =        "Circuit Lower Bounds for {MCSP} from Local
                 Pseudorandom Generators",
  journal =      j-TOCT,
  volume =       "12",
  number =       "3",
  pages =        "21:1--21:27",
  month =        jul,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3404860",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Fri Jul 24 13:21:36 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3404860",
  abstract =     "The Minimum Circuit Size Problem (MCSP) asks if a
                 given truth table of a Boolean function f can be
                 computed by a Boolean circuit of size at most $ \theta
                 $, for a given parameter $ \theta $. We improve several
                 circuit lower bounds for MCSP, using pseudorandom
                 generators \ldots{}.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "21",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Ganian:2020:EMR,
  author =       "Robert Ganian and Ronald de Haan and Iyad Kanj and
                 Stefan Szeider",
  title =        "On Existential {MSO} and Its Relation to {ETH}",
  journal =      j-TOCT,
  volume =       "12",
  number =       "4",
  pages =        "22:1--22:32",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3417759",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:42 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3417759",
  abstract =     "Impagliazzo et al. proposed a framework, based on the
                 logic fragment defining the complexity class SNP, to
                 identify problems that are equivalent to k-CNF-Sat
                 modulo subexponential-time reducibility
                 (serf-reducibility). The subexponential-time \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "22",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Srinivasan:2020:SES,
  author =       "Srikanth Srinivasan",
  title =        "Strongly Exponential Separation between Monotone {VP}
                 and Monotone {VNP}",
  journal =      j-TOCT,
  volume =       "12",
  number =       "4",
  pages =        "23:1--23:12",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3417758",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:42 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3417758",
  abstract =     "We show that there is a sequence of explicit
                 multilinear polynomials $ P_n(x_1, \ldots, x_n) \in
                 R[x_1, \ldots, x_n] $ with non-negative coefficients
                 that lies in monotone VNP such that any monotone
                 algebraic circuit for P$_n$ must have size $ \exp
                 (\Omega (n))$. This builds on (and \ldots{})",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "23",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Applebaum:2020:PAS,
  author =       "Benny Applebaum and Barak Arkis",
  title =        "On the Power of Amortization in Secret Sharing:
                 $d$-Uniform Secret Sharing and {CDS} with Constant
                 Information Rate",
  journal =      j-TOCT,
  volume =       "12",
  number =       "4",
  pages =        "24:1--24:21",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3417756",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:42 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3417756",
  abstract =     "Consider the following secret-sharing problem: A file
                 $s$ should be distributed between $n$ servers such that
                 $ (d - 1)$-subsets cannot recover the file, $ (d +
                 1)$-subsets can recover the file, and $d$-subsets
                 should be able to recover $s$ if and only if they
                 appear in \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "24",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Srinivasan:2020:DVR,
  author =       "Srikanth Srinivasan and Utkarsh Tripathi and S.
                 Venkitesh",
  title =        "Decoding Variants of {Reed--Muller} Codes over Finite
                 Grids",
  journal =      j-TOCT,
  volume =       "12",
  number =       "4",
  pages =        "25:1--25:11",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3417754",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:42 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3417754",
  abstract =     "In a recent article, Kim and Kopparty (2017) gave a
                 deterministic algorithm for the unique decoding problem
                 for polynomials of bounded total degree over a general
                 grid $ S_1 \times \cdots \times S_m $. We show that
                 their algorithm can be adapted to solve the unique
                 decoding \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "25",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Podolskii:2020:IPS,
  author =       "Vladimir V. Podolskii and Alexander A. Sherstov",
  title =        "Inner Product and Set Disjointness: Beyond
                 Logarithmically Many Parties",
  journal =      j-TOCT,
  volume =       "12",
  number =       "4",
  pages =        "26:1--26:28",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3428671",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:42 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3428671",
  abstract =     "A major goal in complexity theory is to understand the
                 communication complexity of number-on-the-forehead
                 problems $ f : (\{ 0, 1 \}^n)^k \to \{ 0, 1 \} $ with $
                 k > \log n $ parties. We study the problems of inner
                 product and set disjointness and determine their
                 randomized \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "26",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Watson:2020:ZNL,
  author =       "Thomas Watson",
  title =        "A {ZPP NP[1]} Lifting Theorem",
  journal =      j-TOCT,
  volume =       "12",
  number =       "4",
  pages =        "27:1--27:20",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3428673",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:42 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3428673",
  abstract =     "The complexity class ZPP$^{NP[1]}$ (corresponding to
                 zero-error randomized algorithms with access to one NP
                 oracle query) is known to have a number of curious
                 properties. We further explore this class in the
                 settings of time complexity, query complexity, and
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "27",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Goldreich:2020:STM,
  author =       "Oded Goldreich and Dana Ron",
  title =        "The Subgraph Testing Model",
  journal =      j-TOCT,
  volume =       "12",
  number =       "4",
  pages =        "28:1--28:32",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3428675",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:42 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3428675",
  abstract =     "Following Newman (2010), we initiate a study of
                 testing properties of graphs that are presented as
                 subgraphs of a fixed (or an explicitly given) graph.
                 The tester is given free access to a base graph $ G =
                 ([n], E) $ and oracle access to a function $ f : E \to
                 \ldots {} $",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "28",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Hitchcock:2021:PTR,
  author =       "John M. Hitchcock and Adewale Sekoni and Hadi Shafei",
  title =        "Polynomial-Time Random Oracles and Separating
                 Complexity Classes",
  journal =      j-TOCT,
  volume =       "13",
  number =       "1",
  pages =        "1:11--1:16",
  month =        mar,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3434389",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:43 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3434389",
  abstract =     "Bennett and Gill [1981] showed that P$^A$ /= NP$^A$ /=
                 coNP$^A$ for a random oracle A, with probability 1. We
                 investigate whether this result extends to individual
                 polynomial-time random oracles. We consider two notions
                 of random oracles: $p$-random oracles in the \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Jonsson:2021:FGT,
  author =       "Peter Jonsson and Victor Lagerkvist and Biman Roy",
  title =        "Fine-Grained Time Complexity of Constraint
                 Satisfaction Problems",
  journal =      j-TOCT,
  volume =       "13",
  number =       "1",
  pages =        "2:1--2:32",
  month =        mar,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3434387",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:43 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3434387",
  abstract =     "We study the constraint satisfaction problem (CSP)
                 parameterized by a constraint language \Gamma (CSP
                 \Gamma ) and how the choice of \Gamma affects its
                 worst-case time complexity. Under the exponential-time
                 hypothesis (ETH), we rule out the existence of
                 subexponential \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Kretschmer:2021:LBT,
  author =       "William Kretschmer",
  title =        "Lower Bounding the {AND--OR} Tree via Symmetrization",
  journal =      j-TOCT,
  volume =       "13",
  number =       "1",
  pages =        "3:1--3:11",
  month =        mar,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3434385",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:43 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3434385",
  abstract =     "We prove a simple, nearly tight lower bound on the
                 approximate degree of the two-level AND-OR tree using
                 symmetrization arguments. Specifically, we show that
                 $^~$ deg(AND$_m$ ^ OR$_n$ ) =$^~$ \Omega ( \sqrt mn ).
                 We prove this lower bound via reduction to the OR
                 function through \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Becker:2021:AGC,
  author =       "Florent Becker and Tom Besson and J{\'e}r{\^o}me
                 Durand-Lose and Aur{\'e}lien Emmanuel and Mohammad-Hadi
                 Foroughmand-Araabi and Sama Goliaei and Shahrzad
                 Heydarshahi",
  title =        "Abstract Geometrical Computation 10: an Intrinsically
                 Universal Family of Signal Machines",
  journal =      j-TOCT,
  volume =       "13",
  number =       "1",
  pages =        "4:1--4:31",
  month =        mar,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3442359",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:43 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3442359",
  abstract =     "Signal machines form an abstract and idealized model
                 of collision computing. Based on dimensionless signals
                 moving on the real line, they model particle/signal
                 dynamics in Cellular Automata. Each particle, or
                 signal, moves at constant speed in \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Viola:2021:AU,
  author =       "Emanuele Viola",
  title =        "{AC0} Unpredictability",
  journal =      j-TOCT,
  volume =       "13",
  number =       "1",
  pages =        "5:1--5:8",
  month =        mar,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3442362",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:43 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3442362",
  abstract =     "We prove that for every distribution $D$ on $n$ bits
                 with Shannon entropy $ \geq n - a$, at most $ O(2^d a
                 \log^{d + 1} g) / \gamma^5$ of the bits $ D_i$ can be
                 predicted with advantage \gamma by an AC$^0$ circuit of
                 size $g$ and depth $D$ that is a function of all of the
                 bits of $D$ except $ D_i$. This \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Itsykson:2021:CPC,
  author =       "Dmitry Itsykson and Alexander Okhotin and Vsevolod
                 Oparin",
  title =        "Computational and Proof Complexity of Partial String
                 Avoidability",
  journal =      j-TOCT,
  volume =       "13",
  number =       "1",
  pages =        "6:1--6:25",
  month =        mar,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3442365",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:43 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3442365",
  abstract =     "The partial string avoidability problem is stated as
                 follows: given a finite set of strings with possible
                 ``holes'' (wildcard symbols), determine whether there
                 exists a two-sided infinite string containing no
                 substrings from this set, assuming that a hole
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Ambainis:2021:ACA,
  author =       "Andris Ambainis and Martins Kokainis and Krisjanis
                 Prusis and Jevgenijs Vihrovs and Aleksejs Zajakins",
  title =        "All Classical Adversary Methods Are Equivalent for
                 Total Functions",
  journal =      j-TOCT,
  volume =       "13",
  number =       "1",
  pages =        "7:1--7:20",
  month =        mar,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3442357",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 20 18:27:43 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3442357",
  abstract =     "We show that all known classical adversary lower
                 bounds on randomized query complexity are equivalent
                 for total functions and are equal to the fractional
                 block sensitivity fbs($f$). That includes the
                 Kolmogorov complexity bound of Laplante and Magniez and
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Dell:2021:FGR,
  author =       "Holger Dell and John Lapinskas",
  title =        "Fine-Grained Reductions from Approximate Counting to
                 Decision",
  journal =      j-TOCT,
  volume =       "13",
  number =       "2",
  pages =        "8:1--8:24",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3442352",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Jun 5 07:29:21 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3442352",
  abstract =     "In this article, we introduce a general framework for
                 fine-grained reductions of approximate counting
                 problems to their decision versions. (Thus, we use an
                 oracle that decides whether any witness exists to
                 multiplicatively approximate the number of \ldots{}).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Gupta:2021:PMR,
  author =       "Sushmita Gupta and Pranabendu Misra and Saket Saurabh
                 and Meirav Zehavi",
  title =        "Popular Matching in Roommates Setting Is {NP}-hard",
  journal =      j-TOCT,
  volume =       "13",
  number =       "2",
  pages =        "9:1--9:20",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3442354",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Jun 5 07:29:21 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3442354",
  abstract =     "An input to the POPULAR MATCHING problem, in the
                 roommates setting (as opposed to the marriage setting),
                 consists of a graph G (not necessarily bipartite) where
                 each vertex ranks its neighbors in strict order, known
                 as its preference. In the POPULAR M. \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Fomin:2021:CHN,
  author =       "Fedor V. Fomin and Daniel Lokshtanov and Ivan Mihajlin
                 and Saket Saurabh and Meirav Zehavi",
  title =        "Computation of {Hadwiger} Number and Related
                 Contraction Problems: Tight Lower Bounds",
  journal =      j-TOCT,
  volume =       "13",
  number =       "2",
  pages =        "10:1--10:25",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3448639",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Jun 5 07:29:21 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3448639",
  abstract =     "We prove that the Hadwiger number of an n -vertex
                 graph G (the maximum size of a clique minor in G )
                 cannot be computed in time n$^{o (n)}$, unless the
                 Exponential Time Hypothesis (ETH) fails. This resolves
                 a well-known open question in the area of exact
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Cai:2021:TLD,
  author =       "Jin-yi Cai and Artem Govorov",
  title =        "On a Theorem of {Lov{\'a}sz} that {$ (\cdot, H) $}
                 Determines the Isomorphism Type of {$H$}",
  journal =      j-TOCT,
  volume =       "13",
  number =       "2",
  pages =        "11:1--11:25",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3448641",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Jun 5 07:29:21 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3448641",
  abstract =     "Graph homomorphism has been an important research
                 topic since its introduction [20]. Stated in the
                 language of binary relational structures in that paper
                 [20], Lov{\'a}sz proved a fundamental theorem that, for
                 a graph H given by its 0-1 valued adjacency \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Li:2021:SCU,
  author =       "Qian Li and Xiaoming Sun",
  title =        "On the Sensitivity Complexity of $k$-Uniform
                 Hypergraph Properties",
  journal =      j-TOCT,
  volume =       "13",
  number =       "2",
  pages =        "12:1--12:13",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3448643",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Jun 5 07:29:21 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3448643",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Bezakova:2021:CAM,
  author =       "Ivona Bez{\'a}kov{\'a} and Andreas Galanis and Leslie
                 Ann Goldberg and Daniel Stefankovic",
  title =        "The Complexity of Approximating the Matching
                 Polynomial in the Complex Plane",
  journal =      j-TOCT,
  volume =       "13",
  number =       "2",
  pages =        "13:1--13:37",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3448645",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Jun 5 07:29:21 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3448645",
  abstract =     "We study the problem of approximating the value of the
                 matching polynomial on graphs with edge parameter
                 \gamma , where \gamma takes arbitrary values in the
                 complex plane. When \gamma is a positive real, Jerrum
                 and Sinclair showed that the problem admits an FPRAS on
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "13",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Lutz:2021:FIP,
  author =       "Neil Lutz",
  title =        "Fractal Intersections and Products via Algorithmic
                 Dimension",
  journal =      j-TOCT,
  volume =       "13",
  number =       "3",
  pages =        "14:1--14:15",
  month =        sep,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3460948",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Sep 2 08:31:31 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3460948",
  abstract =     "Algorithmic fractal dimensions quantify the
                 algorithmic information density of individual points
                 and may be defined in terms of Kolmogorov complexity.
                 This work uses these dimensions to bound the classical
                 Hausdorff and packing dimensions of \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "14",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Chillara:2021:CMP,
  author =       "Suryajith Chillara",
  title =        "On Computing Multilinear Polynomials Using
                 Multi-$r$-ic Depth Four Circuits",
  journal =      j-TOCT,
  volume =       "13",
  number =       "3",
  pages =        "16:1--16:21",
  month =        sep,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3460952",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Sep 2 08:31:31 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3460952",
  abstract =     "In this article, we are interested in understanding
                 the complexity of computing multilinear polynomials
                 using depth four circuits in which the polynomial
                 computed at every node has a bound on the individual
                 degree of r {$>$}= 1 with respect to all its \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "16",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Monde:2021:CSL,
  author =       "Akihiro Monde and Yukiko Yamauchi and Shuji Kijima and
                 Yamashita Masafumi",
  title =        "Can a Skywalker Localize the Midpoint of a Rope?",
  journal =      j-TOCT,
  volume =       "13",
  number =       "3",
  pages =        "17:1--17:23",
  month =        sep,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3460954",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Sep 2 08:31:31 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3460954",
  abstract =     "This article poses a question about a simple
                 localization problem. The question is if an oblivious
                 walker on a line segment can localize the midpoint of
                 the line segment in a finite number of steps observing
                 the direction (i.e., Left or Right) and the \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "17",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Chaugule:2021:VHP,
  author =       "Prasad Chaugule and Nutan Limaye and Aditya Varre",
  title =        "Variants of Homomorphism Polynomials Complete for
                 Algebraic Complexity Classes",
  journal =      j-TOCT,
  volume =       "13",
  number =       "4",
  pages =        "21:1--21:26",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3470858",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Sep 2 08:31:31 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3470858",
  abstract =     "We present polynomial families complete for the
                 well-studied algebraic complexity classes VF, VBP, VP,
                 and VNP. The polynomial families are based on the
                 homomorphism polynomials studied in the recent works of
                 Durand et al. (2014) and Mahajan et al. (2018).
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "21",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Arunachalam:2021:IBF,
  author =       "Srinivasan Arunachalam and Sourav Chakraborty and
                 Michal Kouck{\'y} and Nitin Saurabh and Ronald {De
                 Wolf}",
  title =        "Improved Bounds on {Fourier} Entropy and Min-entropy",
  journal =      j-TOCT,
  volume =       "13",
  number =       "4",
  pages =        "22:1--22:40",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3470860",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Sep 2 08:31:31 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3470860",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "22",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Kabanets:2021:ALB,
  author =       "Valentine Kabanets and Sajin Koroth and Zhenjian Lu
                 and Dimitrios Myrisiotis and Igor C. Oliveira",
  title =        "Algorithms and Lower Bounds for {De Morgan} Formulas
                 of Low-Communication Leaf Gates",
  journal =      j-TOCT,
  volume =       "13",
  number =       "4",
  pages =        "23:1--23:37",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3470861",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Sep 2 08:31:31 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3470861",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "23",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Bun:2021:SRC,
  author =       "Mark Bun and Nikhil S. Mande and Justin Thaler",
  title =        "Sign-rank Can Increase under Intersection",
  journal =      j-TOCT,
  volume =       "13",
  number =       "4",
  pages =        "24:1--24:17",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3470863",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Sep 2 08:31:31 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3470863",
  abstract =     "The communication class UPP$^{cc}$ is a communication
                 analog of the Turing Machine complexity class PP. It is
                 characterized by a matrix-analytic complexity measure
                 called sign-rank (also called dimension complexity),
                 and is essentially the most powerful \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "24",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Galanis:2021:FAG,
  author =       "Andreas Galanis and Leslie Ann Goldberg and James
                 Stewart",
  title =        "Fast Algorithms for General Spin Systems on Bipartite
                 Expanders",
  journal =      j-TOCT,
  volume =       "13",
  number =       "4",
  pages =        "25:1--25:18",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3470865",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Sep 2 08:31:31 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3470865",
  abstract =     "A spin system is a framework in which the vertices of
                 a graph are assigned spins from a finite set. The
                 interactions between neighbouring spins give rise to
                 weights, so a spin assignment can also be viewed as a
                 weighted graph homomorphism. The problem of \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "25",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Brandts:2021:CPS,
  author =       "Alex Brandts and Marcin Wrochna and Stanislav
                 Zivn{\'y}",
  title =        "The Complexity of Promise {SAT} on Non-{Boolean}
                 Domains",
  journal =      j-TOCT,
  volume =       "13",
  number =       "4",
  pages =        "26:1--26:20",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3470867",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Sep 2 08:31:31 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3470867",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "26",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Gunda:2021:PAC,
  author =       "Spoorthy Gunda and Pallavi Jain and Daniel Lokshtanov
                 and Saket Saurabh and Prafullkumar Tale",
  title =        "On the Parameterized Approximability of Contraction to
                 Classes of Chordal Graphs",
  journal =      j-TOCT,
  volume =       "13",
  number =       "4",
  pages =        "27:1--27:40",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3470869",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Thu Sep 2 08:31:31 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3470869",
  abstract =     "A graph operation that contracts edges is one of the
                 fundamental operations in the theory of graph minors.
                 Parameterized Complexity of editing to a family of
                 graphs by contracting k edges has recently gained
                 substantial scientific attention, and several
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "27",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Levi:2022:ERS,
  author =       "Amit Levi and Ramesh Krishnan S. Pallavoor and Sofya
                 Raskhodnikova and Nithin Varma",
  title =        "Erasure-Resilient Sublinear-Time Graph Algorithms",
  journal =      j-TOCT,
  volume =       "14",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3488250",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Oct 29 07:49:57 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3488250",
  abstract =     "We investigate sublinear-time algorithms that take
                 partially erased graphs represented by adjacency lists
                 as input. Our algorithms make degree and neighbor
                 queries to the input graph and work with a specified
                 fraction of adversarial erasures in adjacency
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Lagerkvist:2022:CFG,
  author =       "Victor Lagerkvist and Magnus Wahlstr{\"o}m",
  title =        "The (Coarse) Fine-Grained Structure of {NP-Hard SAT}
                 and {CSP} Problems",
  journal =      j-TOCT,
  volume =       "14",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3492336",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Oct 29 07:49:57 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3492336",
  abstract =     "We study the fine-grained complexity of NP-complete
                 satisfiability (SAT) problems and constraint
                 satisfaction problems (CSPs) in the context of the
                 strong exponential-time hypothesis (SETH), showing
                 non-trivial lower and upper bounds on the running time.
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Huang:2022:ADW,
  author =       "Xuangui Huang and Emanuele Viola",
  title =        "Approximate Degree, Weight, and Indistinguishability",
  journal =      j-TOCT,
  volume =       "14",
  number =       "1",
  pages =        "3:1--3:26",
  month =        mar,
  year =         "2022",
  DOI =          "https://doi.org/10.1145/3492338",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Oct 29 07:53:44 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://doi.org/10.1145/3492338",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory (TOCT)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Ron:2022:ODF,
  author =       "Dana Ron and Asaf Rosin",
  title =        "Optimal Distribution-Free Sample-Based Testing of
                 Subsequence-Freeness with One-Sided Error",
  journal =      j-TOCT,
  volume =       "14",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3512750",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Oct 29 07:49:57 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3512750",
  abstract =     "In this work, we study the problem of testing
                 subsequence-freeness. For a given subsequence (word) w
                 = w$_1$ \ldots{} w$_k$, a sequence (text) T = t$_1$
                 \ldots{} t$_n$ is said to contain w if there exist
                 indices 1 {$<$}= i$_1$ {$<$} \ldots{} {$<$} i$_k$
                 {$<$}= n such that t$_{ij}$ = w$_j$ for every 1 {$<$}=
                 j {$<$}= k. Otherwise, \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Magniez:2022:QDC,
  author =       "Fr{\'e}d{\'e}ric Magniez and Ashwin Nayak",
  title =        "Quantum Distributed Complexity of Set Disjointness on
                 a Line",
  journal =      j-TOCT,
  volume =       "14",
  number =       "1",
  pages =        "5:1--5:22",
  month =        mar,
  year =         "2022",
  DOI =          "https://doi.org/10.1145/3512751",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Oct 29 07:53:44 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://doi.org/10.1145/3512751",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory (TOCT)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1190",
}

@Article{Volk:2022:PDB,
  author =       "Ben Lee Volk and Mrinal Kumar",
  title =        "A Polynomial Degree Bound on Equations for Non-rigid
                 Matrices and Small Linear Circuits",
  journal =      j-TOCT,
  volume =       "14",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jun,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3543685",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Oct 29 07:49:57 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3543685",
  abstract =     "We show that there is an equation of degree at most
                 poly( n ) for the (Zariski closure of the) set of the
                 non-rigid matrices: That is, we show that for every
                 large enough field F, there is a non-zero n$^2$
                 -variate polynomial P \epsilon F[ x$_{1, 1}$, \ldots{}
                 , x$_{n, n}$ ] of \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Singer:2022:PHI,
  author =       "Noah Singer and Madhu Sudan",
  title =        "Point-hyperplane Incidence Geometry and the Log-rank
                 Conjecture",
  journal =      j-TOCT,
  volume =       "14",
  number =       "2",
  pages =        "7:1--7:??",
  month =        jun,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3543684",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Oct 29 07:49:57 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3543684",
  abstract =     "We study the log-rank conjecture from the perspective
                 of point-hyperplane incidence geometry. We formulate
                 the following conjecture: Given a point set in R$^d$
                 that is covered by constant-sized sets of parallel
                 hyperplanes, there exists an affine subspace \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Datta:2022:PGI,
  author =       "Samir Datta and Nutan Limaye and Prajakta Nimbhorkar
                 and Thomas Thierauf and Fabian Wagner",
  title =        "Planar Graph Isomorphism Is in Log-Space",
  journal =      j-TOCT,
  volume =       "14",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jun,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3543686",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Oct 29 07:58:18 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3543686",
  abstract =     "Graph Isomorphism is the prime example of a
                 computational problem with a wide difference between
                 the best-known lower and upper bounds on its
                 complexity. The gap between the known upper and lower
                 bounds continues to be very significant for many
                 subclasses \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Arvind:2022:PCF,
  author =       "Vikraman Arvind and Frank Fuhlbrueck and Johannes
                 Koebler and Sebastian Kuhnert and Gaurav Rattan",
  title =        "The Parameterized Complexity of Fixing Number and
                 Vertex Individualization in Graphs",
  journal =      j-TOCT,
  volume =       "14",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jun,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3558077",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Oct 29 07:58:18 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3558077",
  abstract =     "In this paper we study the algorithmic complexity of
                 the following problems: Given a vertex-colored graph X
                 = (V,E,c), compute a minimum cardinality set of
                 vertices S \subseteq V such that no nontrivial
                 automorphism of X fixes all vertices in S. A closely
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Galanis:2022:ICH,
  author =       "Andreas Galanis and Heng Guo and Jiaheng Wang",
  title =        "Inapproximability of Counting Hypergraph Colourings",
  journal =      j-TOCT,
  volume =       "14",
  number =       "3--4",
  pages =        "10:1--10:??",
  month =        dec,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3558554",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 11 08:59:53 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3558554",
  abstract =     "Recent developments in approximate counting have made
                 startling progress in developing fast algorithmic
                 methods for approximating the number of \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Bartholdi:2022:GAH,
  author =       "Laurent Bartholdi and Michael Figelius and Markus
                 Lohrey and Armin Wei{\ss}",
  title =        "Groups with {ALOGTIME}-hard Word Problems and
                 {PSPACE}-complete Compressed Word Problems",
  journal =      j-TOCT,
  volume =       "14",
  number =       "3--4",
  pages =        "11:1--11:??",
  month =        dec,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3569708",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 11 08:59:53 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3569708",
  abstract =     "We give lower bounds on the complexity of the word
                 problem for a large class of non-solvable infinite
                 groups that we call strongly efficiently non-solvable
                 groups. \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Nakajima:2022:LOC,
  author =       "Tamio-Vesa Nakajima and Stanislav {\v{Z}}ivn{\'y}",
  title =        "Linearly Ordered Colourings of Hypergraphs",
  journal =      j-TOCT,
  volume =       "14",
  number =       "3--4",
  pages =        "12:1--12:??",
  month =        dec,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3570909",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 11 08:59:53 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3570909",
  abstract =     "A linearly ordered (LO) k -colouring of an r -uniform
                 hypergraph assigns an integer from {1, \ldots{}, k} to
                 every vertex so that, in every edge, the (multi)set of
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Fomin:2022:AMT,
  author =       "Fedor V. Fomin and Petr A. Golovach and Giannos
                 Stamoulis and Dimitrios M. Thilikos",
  title =        "An Algorithmic Meta-Theorem for Graph Modification to
                 Planarity and {FOL}",
  journal =      j-TOCT,
  volume =       "14",
  number =       "3--4",
  pages =        "13:1--13:??",
  month =        dec,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3571278",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Mar 11 08:59:53 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3571278",
  abstract =     "In general, a graph modification problem is defined by
                 a graph modification operation \sqtimes and a target
                 graph property P. Typically, the modification operation
                 \sqtimes may be \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "13",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Chatterjee:2023:CFH,
  author =       "Prerona Chatterjee and Ramprasad Saptharishi",
  title =        "Constructing Faithful Homomorphisms over Fields of
                 Finite Characteristic",
  journal =      j-TOCT,
  volume =       "15",
  number =       "1--2",
  pages =        "1:1--1:??",
  month =        jun,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3580351",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jul 3 08:31:22 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3580351",
  abstract =     "We study the question of algebraic rank or
                 transcendence degree preserving homomorphisms over
                 finite fields. This concept was first introduced by
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Folwarczny:2023:PMF,
  author =       "Luk{\'a}s Folwarczn{\'y}",
  title =        "On Protocols for Monotone Feasible Interpolation",
  journal =      j-TOCT,
  volume =       "15",
  number =       "1--2",
  pages =        "2:1--2:??",
  month =        jun,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3583754",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jul 3 08:31:22 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3583754",
  abstract =     "Dag-like communication protocols, a generalization of
                 the classical tree-like communication protocols, are
                 useful objects in the realm of proof complexity
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Hamoudi:2023:QTS,
  author =       "Yassine Hamoudi and Fr{\'e}d{\'e}ric Magniez",
  title =        "Quantum Time-Space Tradeoff for Finding Multiple
                 Collision Pairs",
  journal =      j-TOCT,
  volume =       "15",
  number =       "1--2",
  pages =        "3:1--3:??",
  month =        jun,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3589986",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Mon Jul 3 08:31:22 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3589986",
  abstract =     "We study the problem of finding K collision pairs in a
                 random function f : [N] -{$>$} [N] by using a quantum
                 computer. We prove that the number of queries to the
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Feldmann:2023:CLF,
  author =       "Andreas Emil Feldmann and D{\'a}niel Marx",
  title =        "The Complexity Landscape of Fixed-Parameter Directed
                 {Steiner} Network Problems",
  journal =      j-TOCT,
  volume =       "15",
  number =       "3--4",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3580376",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Dec 23 05:36:40 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3580376",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Cote:2023:CBP,
  author =       "Hugo C{\^o}t{\'e} and Pierre McKenzie",
  title =        "Catalytic Branching Programs from Groups and General
                 Protocols",
  journal =      j-TOCT,
  volume =       "15",
  number =       "3--4",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3583085",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Dec 23 05:36:40 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3583085",
  abstract =     "CCCatalytic branching programs (catalytic bps) compute
                 the same n -bit Boolean function f at multiple entry
                 points that need to be remembered at the exit
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Zehavi:2023:FCM,
  author =       "Meirav Zehavi",
  title =        "Forgetfulness Can Make You Faster: an {$ O*(8.097^k)
                 $}-time Algorithm for Weighted $3$-set $k$-packing",
  journal =      j-TOCT,
  volume =       "15",
  number =       "3--4",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3599722",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Dec 23 05:36:40 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3599722",
  abstract =     "In this article, we study the classic Weighted 3-Set k
                 -Packing problem: given a universe U, a family S S
                 {\mathcal {S}} of subsets of size 3 of U, a weight
                 function \(w : \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Bandyapadhyay:2023:PCF,
  author =       "Sayan Bandyapadhyay and Fedor V. Fomin and Petr A.
                 Golovach and Kirill Simonov",
  title =        "Parameterized Complexity of Feature Selection for
                 Categorical Data Clustering",
  journal =      j-TOCT,
  volume =       "15",
  number =       "3--4",
  pages =        "7:1--7:??",
  month =        dec,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3604797",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Dec 23 05:36:40 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3604797",
  abstract =     "We develop new algorithmic methods with provable
                 guarantees for feature selection in regard to
                 categorical data clustering. While feature selection is
                 one of the \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Chen:2023:SLB,
  author =       "Hubie Chen and Bart M. P. Jansen and Karolina Okrasa
                 and Astrid Pieterse and Pawe{\l} Rz{\k{a}}{\.z}ewski",
  title =        "Sparsification Lower Bounds for List {$H$}-Coloring",
  journal =      j-TOCT,
  volume =       "15",
  number =       "3--4",
  pages =        "8:1--8:??",
  month =        dec,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3612938",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Sat Dec 23 05:36:40 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3612938",
  abstract =     "We investigate the List H -Coloring problem, the
                 generalization of graph coloring that asks whether an
                 input graph G admits a homomorphism to the undirected
                 graph H",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Montanaro:2024:QCC,
  author =       "Ashley Montanaro and Changpeng Shao",
  title =        "Quantum Communication Complexity of Linear
                 Regression",
  journal =      j-TOCT,
  volume =       "16",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3625225",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3625225",
  abstract =     "Quantum computers may achieve speedups over their
                 classical counterparts for solving linear algebra
                 problems. However, in some cases-such as for low-rank
                 matrices-dequantized algorithms demonstrate that there
                 cannot be an exponential quantum speedup. In \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "1",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Grochow:2024:GIS,
  author =       "Joshua A. Grochow and Youming Qiao",
  title =        "On $p$-Group Isomorphism: Search-to-Decision,
                 Counting-to-Decision, and Nilpotency Class Reductions
                 via Tensors",
  journal =      j-TOCT,
  volume =       "16",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3625308",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3625308",
  abstract =     "In this article, we study some classical
                 complexity-theoretic questions regarding Group
                 Isomorphism (GpI). We focus on p -groups (groups of
                 prime power order) with odd p, which are believed to be
                 a bottleneckcase for GpI, and work in the model of
                 matrix \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "2",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Kurpisz:2024:TSS,
  author =       "Adam Kurpisz and Samuli Lepp{\"a}nen and Monaldo
                 Mastrolilli",
  title =        "Tight Sum-of-squares Lower Bounds for Binary
                 Polynomial Optimization Problems",
  journal =      j-TOCT,
  volume =       "16",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3626106",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3626106",
  abstract =     "For binary polynomial optimization problems of degree
                 2d with n variables Sakaue, Takeda, Kim, and Ito [ 33 ]
                 proved that the $ \lceil \frac {n + 2d - 1}{2} \rceil $
                 th semidefinite (SDP) relaxation in the SoS/Lasserre
                 hierarchy of SDP relaxations provides the \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "3",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Jansen:2024:OPT,
  author =       "Bart M. P. Jansen and Micha{\l} W{\l}odarczyk",
  title =        "Optimal Polynomial-Time Compression for {Boolean} Max
                 {CSP}",
  journal =      j-TOCT,
  volume =       "16",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3624704",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3624704",
  abstract =     "In the Boolean maximum constraint satisfaction
                 problem-Max CSP \Gamma -one is given a collection of
                 weighted applications of constraints from a finite
                 constraint language \Gamma , over a common set of
                 variables, and the goal is to assign Boolean values to
                 the \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "4",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Cai:2024:BDN,
  author =       "Jin-Yi Cai and Daniel P. Szabo",
  title =        "Bounded Degree Nonnegative Counting {CSP}",
  journal =      j-TOCT,
  volume =       "16",
  number =       "2",
  pages =        "5:1--5:??",
  month =        jun,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3632184",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3632184",
  abstract =     "Constraint satisfaction problems (CSP) encompass an
                 enormous variety of computational problems. In
                 particular, all partition functions from statistical
                 physics, such as spin systems, are special cases of
                 counting CSP (\#CSP). We prove a complete complexity
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "5",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Beyersdorff:2024:HQM,
  author =       "Olaf Beyersdorff and Joshua Blinkhorn and Meena
                 Mahajan and Tom{\'a}s Peitl and Gaurav Sood",
  title =        "Hard {QBFs} for Merge Resolution",
  journal =      j-TOCT,
  volume =       "16",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jun,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3638263",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3638263",
  abstract =     "We prove the first genuine quantified Boolean formula
                 (QBF) proof size lower bounds for the proof system
                 Merge Resolution (MRes), a refutational proof system
                 for prenex QBFs with a CNF matrix. Unlike most QBF
                 resolution systems in the literature, proofs \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "6",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Doron:2024:SSS,
  author =       "Dean Doron and Jack Murtagh and Salil Vadhan and David
                 Zuckerman",
  title =        "Small-Space Spectral Sparsification via
                 Bounded-Independence Sampling",
  journal =      j-TOCT,
  volume =       "16",
  number =       "2",
  pages =        "7:1--7:??",
  month =        jun,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3637034",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3637034",
  abstract =     "We give a deterministic, nearly logarithmic-space
                 algorithm for mild spectral sparsification of
                 undirected graphs. Given a weighted, undirected graph G
                 on n vertices described by a binary string of length N,
                 an integer k {$<$}= log n, and an error parameter
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "7",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Majewski:2024:MWI,
  author =       "Konrad Majewski and Tom{\'a}s Masar{\'\i}k and Jana
                 Masar{\'\i}kov{\'a} and Karolina Okrasa and Marcin
                 Pilipczuk and Pawe{\l} Rzazewski and Marek
                 Soko{\l}owski",
  title =        "Max Weight Independent Set in Graphs with No Long
                 Claws: an Analog of the {Gy{\'a}rf{\'a}s}' Path
                 Argument",
  journal =      j-TOCT,
  volume =       "16",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jun,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3636422",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3636422",
  abstract =     "We revisit recent developments for the Maximum Weight
                 Independent Set problem in graphs excluding a
                 subdivided claw S$_{t, t, t}$ as an induced subgraph
                 and provide a subexponential-time algorithm with
                 improved running time $ 2^{\mathcal {O}(\sqrt {nt} \log
                 n)}$ \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "8",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Mande:2024:PDT,
  author =       "Nikhil Shekhar Mande and Swagato Sanyal",
  title =        "On parity decision trees for {Fourier}-sparse
                 {Boolean} functions",
  journal =      j-TOCT,
  volume =       "16",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jun,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3647629",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3647629",
  abstract =     "We study parity decision trees for Boolean functions.
                 The motivation of our study is the log-rank conjecture
                 for XOR functions and its connection to Fourier
                 analysis and parity decision tree complexity. Our
                 contributions are as follows: Let \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "9",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Bodirsky:2024:CDS,
  author =       "Manuel Bodirsky and Bertalan Bodor",
  title =        "A Complexity Dichotomy in Spatial Reasoning via
                 {Ramsey} Theory",
  journal =      j-TOCT,
  volume =       "16",
  number =       "2",
  pages =        "10:1--10:??",
  month =        jun,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3649445",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3649445",
  abstract =     "Constraint satisfaction problems (CSPs) for
                 first-order reducts of finitely bounded homogeneous
                 structures form a large class of computational problems
                 that might exhibit a complexity dichotomy, P versus
                 NP-complete. A powerful method to obtain polynomial-.
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "10",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Rosenkrantz:2024:SDS,
  author =       "Daniel J. Rosenkrantz and Madhav V. Marathe and S. S.
                 Ravi and Richard E. Stearns",
  title =        "Synchronous Dynamical Systems on Directed Acyclic
                 Graphs: Complexity and Algorithms",
  journal =      j-TOCT,
  volume =       "16",
  number =       "2",
  pages =        "11:1--11:??",
  month =        jun,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3653723",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3653723",
  abstract =     "Discrete dynamical systems serve as useful formal
                 models to study diffusion phenomena in social networks.
                 Several recent articles have studied the algorithmic
                 and complexity aspects of some decision problems on
                 synchronous Boolean networks, which are \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "11",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}

@Article{Bhattacharya:2024:FCS,
  author =       "Anup Bhattacharya and Arijit Bishnu and Arijit Ghosh
                 and Gopinath Mishra",
  title =        "Faster Counting and Sampling Algorithms using Colorful
                 Decision Oracle",
  journal =      j-TOCT,
  volume =       "16",
  number =       "2",
  pages =        "12:1--12:??",
  month =        jun,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3657605",
  ISSN =         "1942-3454 (print), 1942-3462 (electronic)",
  ISSN-L =       "1942-3454",
  bibdate =      "Tue Aug 20 08:45:09 MDT 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/toct.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3657605",
  abstract =     "In this work, we consider d -Hyperedge Estimation and
                 d -Hyperedge Sample problems that deal with estimation
                 and uniform sampling of hyperedges in a hypergraph H( U
                 (H), F(H) in the query complexity framework, where U
                 (H) denotes the set of vertices and F(H) \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Comput. Theory",
  articleno =    "12",
  fjournal =     "ACM Transactions on Computation Theory",
  journal-URL =  "https://dl.acm.org/loi/toct",
}