Merge pull request #7607 from bonzini/speedup

Various speedups from profiling QEMU's meson.build
pull/7634/head
Jussi Pakkanen 4 years ago committed by GitHub
commit e9a71ebf60
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 13
      mesonbuild/arglist.py
  2. 37
      mesonbuild/backend/ninjabackend.py
  3. 3
      mesonbuild/build.py
  4. 12
      mesonbuild/compilers/mixins/clike.py
  5. 5
      mesonbuild/mesonlib.py

@ -119,7 +119,7 @@ class CompilerArgs(collections.abc.MutableSequence):
# This correctly deduplicates the entries after _can_dedup definition # This correctly deduplicates the entries after _can_dedup definition
# Note: This function is designed to work without delete operations, as deletions are worsening the performance a lot. # Note: This function is designed to work without delete operations, as deletions are worsening the performance a lot.
def flush_pre_post(self) -> None: def flush_pre_post(self) -> None:
pre_flush = collections.deque() # type: T.Deque[str] new = list() # type: T.List[str]
pre_flush_set = set() # type: T.Set[str] pre_flush_set = set() # type: T.Set[str]
post_flush = collections.deque() # type: T.Deque[str] post_flush = collections.deque() # type: T.Deque[str]
post_flush_set = set() # type: T.Set[str] post_flush_set = set() # type: T.Set[str]
@ -128,7 +128,7 @@ class CompilerArgs(collections.abc.MutableSequence):
for a in self.pre: for a in self.pre:
dedup = self._can_dedup(a) dedup = self._can_dedup(a)
if a not in pre_flush_set: if a not in pre_flush_set:
pre_flush.append(a) new.append(a)
if dedup is Dedup.OVERRIDEN: if dedup is Dedup.OVERRIDEN:
pre_flush_set.add(a) pre_flush_set.add(a)
for a in reversed(self.post): for a in reversed(self.post):
@ -140,12 +140,15 @@ class CompilerArgs(collections.abc.MutableSequence):
#pre and post will overwrite every element that is in the container #pre and post will overwrite every element that is in the container
#only copy over args that are in _container but not in the post flush or pre flush set #only copy over args that are in _container but not in the post flush or pre flush set
if pre_flush_set or post_flush_set:
for a in self._container: for a in self._container:
if a not in post_flush_set and a not in pre_flush_set: if a not in post_flush_set and a not in pre_flush_set:
pre_flush.append(a) new.append(a)
else:
new.extend(self._container)
new.extend(post_flush)
self._container = list(pre_flush) + list(post_flush) self._container = new
self.pre.clear() self.pre.clear()
self.post.clear() self.post.clear()

@ -114,13 +114,17 @@ rsp_threshold = get_rsp_threshold()
# from, etc.), so it must not be shell quoted. # from, etc.), so it must not be shell quoted.
raw_names = {'DEPFILE_UNQUOTED', 'DESC', 'pool', 'description', 'targetdep'} raw_names = {'DEPFILE_UNQUOTED', 'DESC', 'pool', 'description', 'targetdep'}
NINJA_QUOTE_BUILD_PAT = re.compile(r"[$ :\n]")
NINJA_QUOTE_VAR_PAT = re.compile(r"[$ \n]")
def ninja_quote(text, is_build_line=False): def ninja_quote(text, is_build_line=False):
if is_build_line: if is_build_line:
qcs = ('$', ' ', ':') quote_re = NINJA_QUOTE_BUILD_PAT
else: else:
qcs = ('$', ' ') quote_re = NINJA_QUOTE_VAR_PAT
for char in qcs: # Fast path for when no quoting is necessary
text = text.replace(char, '$' + char) if not quote_re.search(text):
return text
if '\n' in text: if '\n' in text:
errmsg = '''Ninja does not support newlines in rules. The content was: errmsg = '''Ninja does not support newlines in rules. The content was:
@ -128,7 +132,7 @@ def ninja_quote(text, is_build_line=False):
Please report this error with a test case to the Meson bug tracker.'''.format(text) Please report this error with a test case to the Meson bug tracker.'''.format(text)
raise MesonException(errmsg) raise MesonException(errmsg)
return text return quote_re.sub(r'$\g<0>', text)
@unique @unique
class Quoting(Enum): class Quoting(Enum):
@ -261,18 +265,20 @@ class NinjaRule:
# expand variables in command # expand variables in command
command = ' '.join([self._quoter(x) for x in self.command + self.args]) command = ' '.join([self._quoter(x) for x in self.command + self.args])
expanded_command = '' estimate = len(command)
for m in re.finditer(r'(\${\w*})|(\$\w*)|([^$]*)', command): for m in re.finditer(r'(\${\w*}|\$\w*)?[^$]*', command):
chunk = m.group() if m.start(1) != -1:
if chunk.startswith('$'): estimate -= m.end(1) - m.start(1) + 1
chunk = m.group(1)
if chunk[1] == '{':
chunk = chunk[2:-1]
else:
chunk = chunk[1:] chunk = chunk[1:]
chunk = re.sub(r'{(.*)}', r'\1', chunk)
chunk = ninja_vars.get(chunk, []) # undefined ninja variables are empty chunk = ninja_vars.get(chunk, []) # undefined ninja variables are empty
chunk = ' '.join(chunk) estimate += len(' '.join(chunk))
expanded_command += chunk
# determine command length # determine command length
return len(expanded_command) return estimate
class NinjaBuildElement: class NinjaBuildElement:
def __init__(self, all_outputs, outfilenames, rulename, infilenames, implicit_outs=None): def __init__(self, all_outputs, outfilenames, rulename, infilenames, implicit_outs=None):
@ -380,10 +386,9 @@ class NinjaBuildElement:
newelems = [] newelems = []
for i in elems: for i in elems:
if not should_quote or i == '&&': # Hackety hack hack if not should_quote or i == '&&': # Hackety hack hack
quoter = ninja_quote newelems.append(ninja_quote(i))
else: else:
quoter = lambda x: ninja_quote(qf(x)) newelems.append(ninja_quote(qf(i)))
newelems.append(quoter(i))
line += ' '.join(newelems) line += ' '.join(newelems)
line += '\n' line += '\n'
outfile.write(line) outfile.write(line)

@ -774,6 +774,7 @@ class BuildTarget(Target):
def extract_objects(self, srclist): def extract_objects(self, srclist):
obj_src = [] obj_src = []
sources_set = set(self.sources)
for src in srclist: for src in srclist:
if isinstance(src, str): if isinstance(src, str):
src = File(False, self.subdir, src) src = File(False, self.subdir, src)
@ -782,7 +783,7 @@ class BuildTarget(Target):
else: else:
raise MesonException('Object extraction arguments must be strings or Files.') raise MesonException('Object extraction arguments must be strings or Files.')
# FIXME: It could be a generated source # FIXME: It could be a generated source
if src not in self.sources: if src not in sources_set:
raise MesonException('Tried to extract unknown source {}.'.format(src)) raise MesonException('Tried to extract unknown source {}.'.format(src))
obj_src.append(src) obj_src.append(src)
return ExtractedObjects(self, obj_src) return ExtractedObjects(self, obj_src)

@ -40,7 +40,9 @@ from .visualstudio import VisualStudioLikeCompiler
if T.TYPE_CHECKING: if T.TYPE_CHECKING:
from ...environment import Environment from ...environment import Environment
SOREGEX = re.compile(r'.*\.so(\.[0-9]+)?(\.[0-9]+)?(\.[0-9]+)?$') GROUP_FLAGS = re.compile(r'''\.so (?:\.[0-9]+)? (?:\.[0-9]+)? (?:\.[0-9]+)?$ |
^(?:-Wl,)?-l |
\.a$''', re.X)
class CLikeCompilerArgs(arglist.CompilerArgs): class CLikeCompilerArgs(arglist.CompilerArgs):
prepend_prefixes = ('-I', '-L') prepend_prefixes = ('-I', '-L')
@ -69,8 +71,7 @@ class CLikeCompilerArgs(arglist.CompilerArgs):
group_start = -1 group_start = -1
group_end = -1 group_end = -1
for i, each in enumerate(new): for i, each in enumerate(new):
if not each.startswith(('-Wl,-l', '-l')) and not each.endswith('.a') and \ if not GROUP_FLAGS.search(each):
not SOREGEX.match(each):
continue continue
group_end = i group_end = i
if group_start < 0: if group_start < 0:
@ -85,6 +86,9 @@ class CLikeCompilerArgs(arglist.CompilerArgs):
default_dirs = self.compiler.get_default_include_dirs() default_dirs = self.compiler.get_default_include_dirs()
bad_idx_list = [] # type: T.List[int] bad_idx_list = [] # type: T.List[int]
for i, each in enumerate(new): for i, each in enumerate(new):
if not each.startswith('-isystem'):
continue
# Remove the -isystem and the path if the path is a default path # Remove the -isystem and the path if the path is a default path
if (each == '-isystem' and if (each == '-isystem' and
i < (len(new) - 1) and i < (len(new) - 1) and
@ -92,7 +96,7 @@ class CLikeCompilerArgs(arglist.CompilerArgs):
bad_idx_list += [i, i + 1] bad_idx_list += [i, i + 1]
elif each.startswith('-isystem=') and each[9:] in default_dirs: elif each.startswith('-isystem=') and each[9:] in default_dirs:
bad_idx_list += [i] bad_idx_list += [i]
elif each.startswith('-isystem') and each[8:] in default_dirs: elif each[8:] in default_dirs:
bad_idx_list += [i] bad_idx_list += [i]
for i in reversed(bad_idx_list): for i in reversed(bad_idx_list):
new.pop(i) new.pop(i)

@ -242,6 +242,7 @@ class File:
self.is_built = is_built self.is_built = is_built
self.subdir = subdir self.subdir = subdir
self.fname = fname self.fname = fname
self.hash = hash((is_built, subdir, fname))
def __str__(self) -> str: def __str__(self) -> str:
return self.relative_name() return self.relative_name()
@ -291,10 +292,12 @@ class File:
def __eq__(self, other) -> bool: def __eq__(self, other) -> bool:
if not isinstance(other, File): if not isinstance(other, File):
return NotImplemented return NotImplemented
if self.hash != other.hash:
return False
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built) return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self) -> int: def __hash__(self) -> int:
return hash((self.fname, self.subdir, self.is_built)) return self.hash
@lru_cache(maxsize=None) @lru_cache(maxsize=None)
def relative_name(self) -> str: def relative_name(self) -> str:

Loading…
Cancel
Save