3 * Implementation of GiNaC's indexed expressions. */
6 * GiNaC Copyright (C) 1999-2002 Johannes Gutenberg University Mainz, Germany
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #include "relational.h"
41 GINAC_IMPLEMENT_REGISTERED_CLASS(indexed, exprseq)
44 // default ctor, dtor, copy ctor, assignment operator and helpers
47 indexed::indexed() : symtree(sy_none())
49 tinfo_key = TINFO_indexed;
52 void indexed::copy(const indexed & other)
54 inherited::copy(other);
55 symtree = other.symtree;
58 DEFAULT_DESTROY(indexed)
64 indexed::indexed(const ex & b) : inherited(b), symtree(sy_none())
66 tinfo_key = TINFO_indexed;
70 indexed::indexed(const ex & b, const ex & i1) : inherited(b, i1), symtree(sy_none())
72 tinfo_key = TINFO_indexed;
76 indexed::indexed(const ex & b, const ex & i1, const ex & i2) : inherited(b, i1, i2), symtree(sy_none())
78 tinfo_key = TINFO_indexed;
82 indexed::indexed(const ex & b, const ex & i1, const ex & i2, const ex & i3) : inherited(b, i1, i2, i3), symtree(sy_none())
84 tinfo_key = TINFO_indexed;
88 indexed::indexed(const ex & b, const ex & i1, const ex & i2, const ex & i3, const ex & i4) : inherited(b, i1, i2, i3, i4), symtree(sy_none())
90 tinfo_key = TINFO_indexed;
94 indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2) : inherited(b, i1, i2), symtree(symm)
96 tinfo_key = TINFO_indexed;
100 indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2, const ex & i3) : inherited(b, i1, i2, i3), symtree(symm)
102 tinfo_key = TINFO_indexed;
106 indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2, const ex & i3, const ex & i4) : inherited(b, i1, i2, i3, i4), symtree(symm)
108 tinfo_key = TINFO_indexed;
112 indexed::indexed(const ex & b, const exvector & v) : inherited(b), symtree(sy_none())
114 seq.insert(seq.end(), v.begin(), v.end());
115 tinfo_key = TINFO_indexed;
119 indexed::indexed(const ex & b, const symmetry & symm, const exvector & v) : inherited(b), symtree(symm)
121 seq.insert(seq.end(), v.begin(), v.end());
122 tinfo_key = TINFO_indexed;
126 indexed::indexed(const symmetry & symm, const exprseq & es) : inherited(es), symtree(symm)
128 tinfo_key = TINFO_indexed;
131 indexed::indexed(const symmetry & symm, const exvector & v, bool discardable) : inherited(v, discardable), symtree(symm)
133 tinfo_key = TINFO_indexed;
136 indexed::indexed(const symmetry & symm, exvector * vp) : inherited(vp), symtree(symm)
138 tinfo_key = TINFO_indexed;
145 indexed::indexed(const archive_node &n, const lst &sym_lst) : inherited(n, sym_lst)
147 if (!n.find_ex("symmetry", symtree, sym_lst)) {
148 // GiNaC versions <= 0.9.0 had an unsigned "symmetry" property
150 n.find_unsigned("symmetry", symm);
162 const_cast<symmetry &>(ex_to<symmetry>(symtree)).validate(seq.size() - 1);
166 void indexed::archive(archive_node &n) const
168 inherited::archive(n);
169 n.add_ex("symmetry", symtree);
172 DEFAULT_UNARCHIVE(indexed)
175 // functions overriding virtual functions from base classes
178 void indexed::print(const print_context & c, unsigned level) const
180 GINAC_ASSERT(seq.size() > 0);
182 if (is_a<print_tree>(c)) {
184 c.s << std::string(level, ' ') << class_name()
185 << std::hex << ", hash=0x" << hashvalue << ", flags=0x" << flags << std::dec
186 << ", " << seq.size()-1 << " indices"
187 << ", symmetry=" << symtree << std::endl;
188 unsigned delta_indent = static_cast<const print_tree &>(c).delta_indent;
189 seq[0].print(c, level + delta_indent);
190 printindices(c, level + delta_indent);
194 bool is_tex = is_a<print_latex>(c);
195 const ex & base = seq[0];
197 if (precedence() <= level)
198 c.s << (is_tex ? "{(" : "(");
201 base.print(c, precedence());
204 printindices(c, level);
205 if (precedence() <= level)
206 c.s << (is_tex ? ")}" : ")");
210 bool indexed::info(unsigned inf) const
212 if (inf == info_flags::indexed) return true;
213 if (inf == info_flags::has_indices) return seq.size() > 1;
214 return inherited::info(inf);
217 struct idx_is_not : public std::binary_function<ex, unsigned, bool> {
218 bool operator() (const ex & e, unsigned inf) const {
219 return !(ex_to<idx>(e).get_value().info(inf));
223 bool indexed::all_index_values_are(unsigned inf) const
225 // No indices? Then no property can be fulfilled
230 return find_if(seq.begin() + 1, seq.end(), bind2nd(idx_is_not(), inf)) == seq.end();
233 int indexed::compare_same_type(const basic & other) const
235 GINAC_ASSERT(is_a<indexed>(other));
236 return inherited::compare_same_type(other);
239 ex indexed::eval(int level) const
241 // First evaluate children, then we will end up here again
243 return indexed(ex_to<symmetry>(symtree), evalchildren(level));
245 const ex &base = seq[0];
247 // If the base object is 0, the whole object is 0
251 // If the base object is a product, pull out the numeric factor
252 if (is_ex_exactly_of_type(base, mul) && is_ex_exactly_of_type(base.op(base.nops() - 1), numeric)) {
254 ex f = ex_to<numeric>(base.op(base.nops() - 1));
256 return f * thisexprseq(v);
259 // Canonicalize indices according to the symmetry properties
260 if (seq.size() > 2) {
262 GINAC_ASSERT(is_exactly_a<symmetry>(symtree));
263 int sig = canonicalize(v.begin() + 1, ex_to<symmetry>(symtree));
264 if (sig != INT_MAX) {
265 // Something has changed while sorting indices, more evaluations later
268 return ex(sig) * thisexprseq(v);
272 // Let the class of the base object perform additional evaluations
273 return ex_to<basic>(base).eval_indexed(*this);
276 ex indexed::thisexprseq(const exvector & v) const
278 return indexed(ex_to<symmetry>(symtree), v);
281 ex indexed::thisexprseq(exvector * vp) const
283 return indexed(ex_to<symmetry>(symtree), vp);
286 ex indexed::expand(unsigned options) const
288 GINAC_ASSERT(seq.size() > 0);
290 if ((options & expand_options::expand_indexed) && is_ex_exactly_of_type(seq[0], add)) {
292 // expand_indexed expands (a+b).i -> a.i + b.i
293 const ex & base = seq[0];
295 for (unsigned i=0; i<base.nops(); i++) {
298 sum += thisexprseq(s).expand();
303 return inherited::expand(options);
307 // virtual functions which can be overridden by derived classes
313 // non-virtual functions in this class
316 void indexed::printindices(const print_context & c, unsigned level) const
318 if (seq.size() > 1) {
320 exvector::const_iterator it=seq.begin() + 1, itend = seq.end();
322 if (is_a<print_latex>(c)) {
324 // TeX output: group by variance
326 bool covariant = true;
328 while (it != itend) {
329 bool cur_covariant = (is_ex_of_type(*it, varidx) ? ex_to<varidx>(*it).is_covariant() : true);
330 if (first || cur_covariant != covariant) { // Variance changed
331 // The empty {} prevents indices from ending up on top of each other
334 covariant = cur_covariant;
350 while (it != itend) {
358 /** Check whether all indices are of class idx and validate the symmetry
359 * tree. This function is used internally to make sure that all constructed
360 * indexed objects really carry indices and not some other classes. */
361 void indexed::validate(void) const
363 GINAC_ASSERT(seq.size() > 0);
364 exvector::const_iterator it = seq.begin() + 1, itend = seq.end();
365 while (it != itend) {
366 if (!is_ex_of_type(*it, idx))
367 throw(std::invalid_argument("indices of indexed object must be of type idx"));
371 if (!symtree.is_zero()) {
372 if (!is_ex_exactly_of_type(symtree, symmetry))
373 throw(std::invalid_argument("symmetry of indexed object must be of type symmetry"));
374 const_cast<symmetry &>(ex_to<symmetry>(symtree)).validate(seq.size() - 1);
378 /** Implementation of ex::diff() for an indexed object always returns 0.
381 ex indexed::derivative(const symbol & s) const
390 /** Check whether two sorted index vectors are consistent (i.e. equal). */
391 static bool indices_consistent(const exvector & v1, const exvector & v2)
393 // Number of indices must be the same
394 if (v1.size() != v2.size())
397 return equal(v1.begin(), v1.end(), v2.begin(), ex_is_equal());
400 exvector indexed::get_indices(void) const
402 GINAC_ASSERT(seq.size() >= 1);
403 return exvector(seq.begin() + 1, seq.end());
406 exvector indexed::get_dummy_indices(void) const
408 exvector free_indices, dummy_indices;
409 find_free_and_dummy(seq.begin() + 1, seq.end(), free_indices, dummy_indices);
410 return dummy_indices;
413 exvector indexed::get_dummy_indices(const indexed & other) const
415 exvector indices = get_free_indices();
416 exvector other_indices = other.get_free_indices();
417 indices.insert(indices.end(), other_indices.begin(), other_indices.end());
418 exvector dummy_indices;
419 find_dummy_indices(indices, dummy_indices);
420 return dummy_indices;
423 bool indexed::has_dummy_index_for(const ex & i) const
425 exvector::const_iterator it = seq.begin() + 1, itend = seq.end();
426 while (it != itend) {
427 if (is_dummy_pair(*it, i))
434 exvector indexed::get_free_indices(void) const
436 exvector free_indices, dummy_indices;
437 find_free_and_dummy(seq.begin() + 1, seq.end(), free_indices, dummy_indices);
441 exvector add::get_free_indices(void) const
443 exvector free_indices;
444 for (unsigned i=0; i<nops(); i++) {
446 free_indices = op(i).get_free_indices();
448 exvector free_indices_of_term = op(i).get_free_indices();
449 if (!indices_consistent(free_indices, free_indices_of_term))
450 throw (std::runtime_error("add::get_free_indices: inconsistent indices in sum"));
456 exvector mul::get_free_indices(void) const
458 // Concatenate free indices of all factors
460 for (unsigned i=0; i<nops(); i++) {
461 exvector free_indices_of_factor = op(i).get_free_indices();
462 un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
465 // And remove the dummy indices
466 exvector free_indices, dummy_indices;
467 find_free_and_dummy(un, free_indices, dummy_indices);
471 exvector ncmul::get_free_indices(void) const
473 // Concatenate free indices of all factors
475 for (unsigned i=0; i<nops(); i++) {
476 exvector free_indices_of_factor = op(i).get_free_indices();
477 un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
480 // And remove the dummy indices
481 exvector free_indices, dummy_indices;
482 find_free_and_dummy(un, free_indices, dummy_indices);
486 exvector power::get_free_indices(void) const
488 // Return free indices of basis
489 return basis.get_free_indices();
492 /** Rename dummy indices in an expression.
494 * @param e Expression to work on
495 * @param local_dummy_indices The set of dummy indices that appear in the
497 * @param global_dummy_indices The set of dummy indices that have appeared
498 * before and which we would like to use in "e", too. This gets updated
500 static ex rename_dummy_indices(const ex & e, exvector & global_dummy_indices, exvector & local_dummy_indices)
502 unsigned global_size = global_dummy_indices.size(),
503 local_size = local_dummy_indices.size();
505 // Any local dummy indices at all?
509 if (global_size < local_size) {
511 // More local indices than we encountered before, add the new ones
513 int old_global_size = global_size;
514 int remaining = local_size - global_size;
515 exvector::const_iterator it = local_dummy_indices.begin(), itend = local_dummy_indices.end();
516 while (it != itend && remaining > 0) {
517 if (find_if(global_dummy_indices.begin(), global_dummy_indices.end(), bind2nd(ex_is_equal(), *it)) == global_dummy_indices.end()) {
518 global_dummy_indices.push_back(*it);
525 // If this is the first set of local indices, do nothing
526 if (old_global_size == 0)
529 GINAC_ASSERT(local_size <= global_size);
531 // Construct lists of index symbols
532 exlist local_syms, global_syms;
533 for (unsigned i=0; i<local_size; i++)
534 local_syms.push_back(local_dummy_indices[i].op(0));
535 shaker_sort(local_syms.begin(), local_syms.end(), ex_is_less(), ex_swap());
536 for (unsigned i=0; i<global_size; i++)
537 global_syms.push_back(global_dummy_indices[i].op(0));
538 shaker_sort(global_syms.begin(), global_syms.end(), ex_is_less(), ex_swap());
540 // Remove common indices
541 exlist local_uniq, global_uniq;
542 set_difference(local_syms.begin(), local_syms.end(), global_syms.begin(), global_syms.end(), std::back_insert_iterator<exlist>(local_uniq), ex_is_less());
543 set_difference(global_syms.begin(), global_syms.end(), local_syms.begin(), local_syms.end(), std::back_insert_iterator<exlist>(global_uniq), ex_is_less());
545 // Replace remaining non-common local index symbols by global ones
546 if (local_uniq.empty())
549 while (global_uniq.size() > local_uniq.size())
550 global_uniq.pop_back();
551 return e.subs(lst(local_uniq), lst(global_uniq));
555 /** Given a set of indices, extract those of class varidx. */
556 static void find_variant_indices(const exvector & v, exvector & variant_indices)
558 exvector::const_iterator it1, itend;
559 for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
560 if (is_exactly_a<varidx>(*it1))
561 variant_indices.push_back(*it1);
565 /** Raise/lower dummy indices in a single indexed objects to canonicalize their
568 * @param e Object to work on
569 * @param variant_dummy_indices The set of indices that might need repositioning (will be changed by this function)
570 * @param moved_indices The set of indices that have been repositioned (will be changed by this function)
571 * @return true if 'e' was changed */
572 bool reposition_dummy_indices(ex & e, exvector & variant_dummy_indices, exvector & moved_indices)
574 bool something_changed = false;
576 // If a dummy index is encountered for the first time in the
577 // product, pull it up, otherwise, pull it down
578 exvector::const_iterator it2, it2start, it2end;
579 for (it2start = ex_to<indexed>(e).seq.begin(), it2end = ex_to<indexed>(e).seq.end(), it2 = it2start + 1; it2 != it2end; ++it2) {
580 if (!is_exactly_a<varidx>(*it2))
583 exvector::iterator vit, vitend;
584 for (vit = variant_dummy_indices.begin(), vitend = variant_dummy_indices.end(); vit != vitend; ++vit) {
585 if (it2->op(0).is_equal(vit->op(0))) {
586 if (ex_to<varidx>(*it2).is_covariant()) {
588 *it2 == ex_to<varidx>(*it2).toggle_variance(),
589 ex_to<varidx>(*it2).toggle_variance() == *it2
591 something_changed = true;
592 it2 = ex_to<indexed>(e).seq.begin() + (it2 - it2start);
593 it2start = ex_to<indexed>(e).seq.begin();
594 it2end = ex_to<indexed>(e).seq.end();
596 moved_indices.push_back(*vit);
597 variant_dummy_indices.erase(vit);
602 for (vit = moved_indices.begin(), vitend = moved_indices.end(); vit != vitend; ++vit) {
603 if (it2->op(0).is_equal(vit->op(0))) {
604 if (ex_to<varidx>(*it2).is_contravariant()) {
605 e = e.subs(*it2 == ex_to<varidx>(*it2).toggle_variance());
606 something_changed = true;
607 it2 = ex_to<indexed>(e).seq.begin() + (it2 - it2start);
608 it2start = ex_to<indexed>(e).seq.begin();
609 it2end = ex_to<indexed>(e).seq.end();
618 return something_changed;
621 /* Ordering that only compares the base expressions of indexed objects. */
622 struct ex_base_is_less : public std::binary_function<ex, ex, bool> {
623 bool operator() (const ex &lh, const ex &rh) const
625 return (is_a<indexed>(lh) ? lh.op(0) : lh).compare(is_a<indexed>(rh) ? rh.op(0) : rh) < 0;
629 /** Simplify product of indexed expressions (commutative, noncommutative and
630 * simple squares), return list of free indices. */
631 ex simplify_indexed_product(const ex & e, exvector & free_indices, exvector & dummy_indices, const scalar_products & sp)
633 // Remember whether the product was commutative or noncommutative
634 // (because we chop it into factors and need to reassemble later)
635 bool non_commutative = is_ex_exactly_of_type(e, ncmul);
637 // Collect factors in an exvector, store squares twice
639 v.reserve(e.nops() * 2);
641 if (is_ex_exactly_of_type(e, power)) {
642 // We only get called for simple squares, split a^2 -> a*a
643 GINAC_ASSERT(e.op(1).is_equal(_ex2));
644 v.push_back(e.op(0));
645 v.push_back(e.op(0));
647 for (unsigned i=0; i<e.nops(); i++) {
649 if (is_ex_exactly_of_type(f, power) && f.op(1).is_equal(_ex2)) {
650 v.push_back(f.op(0));
651 v.push_back(f.op(0));
652 } else if (is_ex_exactly_of_type(f, ncmul)) {
653 // Noncommutative factor found, split it as well
654 non_commutative = true; // everything becomes noncommutative, ncmul will sort out the commutative factors later
655 for (unsigned j=0; j<f.nops(); j++)
656 v.push_back(f.op(j));
662 // Perform contractions
663 bool something_changed = false;
664 GINAC_ASSERT(v.size() > 1);
665 exvector::iterator it1, itend = v.end(), next_to_last = itend - 1;
666 for (it1 = v.begin(); it1 != next_to_last; it1++) {
669 if (!is_ex_of_type(*it1, indexed))
672 bool first_noncommutative = (it1->return_type() != return_types::commutative);
674 // Indexed factor found, get free indices and look for contraction
676 exvector free1, dummy1;
677 find_free_and_dummy(ex_to<indexed>(*it1).seq.begin() + 1, ex_to<indexed>(*it1).seq.end(), free1, dummy1);
679 exvector::iterator it2;
680 for (it2 = it1 + 1; it2 != itend; it2++) {
682 if (!is_ex_of_type(*it2, indexed))
685 bool second_noncommutative = (it2->return_type() != return_types::commutative);
687 // Find free indices of second factor and merge them with free
688 // indices of first factor
690 find_free_and_dummy(ex_to<indexed>(*it2).seq.begin() + 1, ex_to<indexed>(*it2).seq.end(), un, dummy1);
691 un.insert(un.end(), free1.begin(), free1.end());
693 // Check whether the two factors share dummy indices
694 exvector free, dummy;
695 find_free_and_dummy(un, free, dummy);
696 unsigned num_dummies = dummy.size();
697 if (num_dummies == 0)
700 // At least one dummy index, is it a defined scalar product?
701 bool contracted = false;
703 if (sp.is_defined(*it1, *it2)) {
704 *it1 = sp.evaluate(*it1, *it2);
706 goto contraction_done;
710 // Try to contract the first one with the second one
711 contracted = ex_to<basic>(it1->op(0)).contract_with(it1, it2, v);
714 // That didn't work; maybe the second object knows how to
715 // contract itself with the first one
716 contracted = ex_to<basic>(it2->op(0)).contract_with(it2, it1, v);
720 if (first_noncommutative || second_noncommutative
721 || is_ex_exactly_of_type(*it1, add) || is_ex_exactly_of_type(*it2, add)
722 || is_ex_exactly_of_type(*it1, mul) || is_ex_exactly_of_type(*it2, mul)
723 || is_ex_exactly_of_type(*it1, ncmul) || is_ex_exactly_of_type(*it2, ncmul)) {
725 // One of the factors became a sum or product:
726 // re-expand expression and run again
727 // Non-commutative products are always re-expanded to give
728 // simplify_ncmul() the chance to re-order and canonicalize
730 ex r = (non_commutative ? ex(ncmul(v, true)) : ex(mul(v)));
731 return simplify_indexed(r, free_indices, dummy_indices, sp);
734 // Both objects may have new indices now or they might
735 // even not be indexed objects any more, so we have to
737 something_changed = true;
743 // Find free indices (concatenate them all and call find_free_and_dummy())
744 // and all dummy indices that appear
745 exvector un, individual_dummy_indices;
746 for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
747 exvector free_indices_of_factor;
748 if (is_ex_of_type(*it1, indexed)) {
749 exvector dummy_indices_of_factor;
750 find_free_and_dummy(ex_to<indexed>(*it1).seq.begin() + 1, ex_to<indexed>(*it1).seq.end(), free_indices_of_factor, dummy_indices_of_factor);
751 individual_dummy_indices.insert(individual_dummy_indices.end(), dummy_indices_of_factor.begin(), dummy_indices_of_factor.end());
753 free_indices_of_factor = it1->get_free_indices();
754 un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
756 exvector local_dummy_indices;
757 find_free_and_dummy(un, free_indices, local_dummy_indices);
758 local_dummy_indices.insert(local_dummy_indices.end(), individual_dummy_indices.begin(), individual_dummy_indices.end());
760 // Filter out the dummy indices with variance
761 exvector variant_dummy_indices;
762 find_variant_indices(local_dummy_indices, variant_dummy_indices);
764 // Any indices with variance present at all?
765 if (!variant_dummy_indices.empty()) {
767 // Yes, bring the product into a canonical order that only depends on
768 // the base expressions of indexed objects
769 if (!non_commutative)
770 std::sort(v.begin(), v.end(), ex_base_is_less());
772 exvector moved_indices;
774 // Iterate over all indexed objects in the product
775 for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
776 if (!is_ex_of_type(*it1, indexed))
779 if (reposition_dummy_indices(*it1, variant_dummy_indices, moved_indices))
780 something_changed = true;
785 if (something_changed)
786 r = non_commutative ? ex(ncmul(v, true)) : ex(mul(v));
790 // The result should be symmetric with respect to exchange of dummy
791 // indices, so if the symmetrization vanishes, the whole expression is
792 // zero. This detects things like eps.i.j.k * p.j * p.k = 0.
793 if (local_dummy_indices.size() >= 2) {
795 for (int i=0; i<local_dummy_indices.size(); i++)
796 dummy_syms.append(local_dummy_indices[i].op(0));
797 if (r.symmetrize(dummy_syms).is_zero()) {
798 free_indices.clear();
803 // Dummy index renaming
804 r = rename_dummy_indices(r, dummy_indices, local_dummy_indices);
806 // Product of indexed object with a scalar?
807 if (is_ex_exactly_of_type(r, mul) && r.nops() == 2
808 && is_ex_exactly_of_type(r.op(1), numeric) && is_ex_of_type(r.op(0), indexed))
809 return ex_to<basic>(r.op(0).op(0)).scalar_mul_indexed(r.op(0), ex_to<numeric>(r.op(1)));
814 /** This structure stores the original and symmetrized versions of terms
815 * obtained during the simplification of sums. */
821 symminfo(const ex & symmterm_, const ex & orig_)
823 if (is_exactly_a<mul>(orig_) && is_exactly_a<numeric>(orig_.op(orig_.nops()-1))) {
824 ex tmp = orig_.op(orig_.nops()-1);
829 if (is_exactly_a<mul>(symmterm_) && is_exactly_a<numeric>(symmterm_.op(symmterm_.nops()-1))) {
830 coeff = symmterm_.op(symmterm_.nops()-1);
831 symmterm = symmterm_ / coeff;
834 symmterm = symmterm_;
838 symminfo(const symminfo & other)
840 symmterm = other.symmterm;
845 const symminfo & operator=(const symminfo & other)
847 if (this != &other) {
848 symmterm = other.symmterm;
860 class symminfo_is_less {
862 bool operator() (const symminfo & si1, const symminfo & si2)
864 int comp = si1.symmterm.compare(si2.symmterm);
865 if (comp < 0) return true;
866 if (comp > 0) return false;
867 comp = si1.orig.compare(si2.orig);
868 if (comp < 0) return true;
869 if (comp > 0) return false;
870 comp = si1.coeff.compare(si2.coeff);
871 if (comp < 0) return true;
876 /** Simplify indexed expression, return list of free indices. */
877 ex simplify_indexed(const ex & e, exvector & free_indices, exvector & dummy_indices, const scalar_products & sp)
879 // Expand the expression
880 ex e_expanded = e.expand();
882 // Simplification of single indexed object: just find the free indices
883 // and perform dummy index renaming/repositioning
884 if (is_ex_of_type(e_expanded, indexed)) {
886 // Find the dummy indices
887 const indexed &i = ex_to<indexed>(e_expanded);
888 exvector local_dummy_indices;
889 find_free_and_dummy(i.seq.begin() + 1, i.seq.end(), free_indices, local_dummy_indices);
891 // Filter out the dummy indices with variance
892 exvector variant_dummy_indices;
893 find_variant_indices(local_dummy_indices, variant_dummy_indices);
895 // Any indices with variance present at all?
896 if (!variant_dummy_indices.empty()) {
898 // Yes, reposition them
899 exvector moved_indices;
900 reposition_dummy_indices(e_expanded, variant_dummy_indices, moved_indices);
903 // Rename the dummy indices
904 return rename_dummy_indices(e_expanded, dummy_indices, local_dummy_indices);
907 // Simplification of sum = sum of simplifications, check consistency of
908 // free indices in each term
909 if (is_ex_exactly_of_type(e_expanded, add)) {
912 free_indices.clear();
914 for (unsigned i=0; i<e_expanded.nops(); i++) {
915 exvector free_indices_of_term;
916 ex term = simplify_indexed(e_expanded.op(i), free_indices_of_term, dummy_indices, sp);
917 if (!term.is_zero()) {
919 free_indices = free_indices_of_term;
923 if (!indices_consistent(free_indices, free_indices_of_term))
924 throw (std::runtime_error("simplify_indexed: inconsistent indices in sum"));
925 if (is_ex_of_type(sum, indexed) && is_ex_of_type(term, indexed))
926 sum = ex_to<basic>(sum.op(0)).add_indexed(sum, term);
933 // If the sum turns out to be zero, we are finished
935 free_indices.clear();
939 // Symmetrizing over the dummy indices may cancel terms
940 int num_terms_orig = (is_exactly_a<add>(sum) ? sum.nops() : 1);
941 if (num_terms_orig > 1 && dummy_indices.size() >= 2) {
943 // Construct list of all dummy index symbols
945 for (int i=0; i<dummy_indices.size(); i++)
946 dummy_syms.append(dummy_indices[i].op(0));
948 // Symmetrize each term separately and store the resulting
949 // terms in a list of symminfo structures
950 std::vector<symminfo> v;
951 for (int i=0; i<sum.nops(); i++) {
952 ex sum_symm = sum.op(i).symmetrize(dummy_syms);
953 if (is_exactly_a<add>(sum_symm))
954 for (int j=0; j<sum_symm.nops(); j++)
955 v.push_back(symminfo(sum_symm.op(j), sum.op(i)));
957 v.push_back(symminfo(sum_symm, sum.op(i)));
960 // Now add up all the unsymmetrized versions of the terms that
961 // did not cancel out in the symmetrization
963 std::sort(v.begin(), v.end(), symminfo_is_less());
964 for (std::vector<symminfo>::iterator i=v.begin(); i!=v.end(); ) {
965 std::vector<symminfo>::iterator j = i;
966 for (j++; j!=v.end() && i->symmterm == j->symmterm; j++) ;
967 for (std::vector<symminfo>::iterator k=i; k!=j; k++)
968 result.push_back((k->coeff)*(i->orig));
971 ex sum_symm = (new add(result))->setflag(status_flags::dynallocated);
972 if (sum_symm.is_zero())
973 free_indices.clear();
980 // Simplification of products
981 if (is_ex_exactly_of_type(e_expanded, mul)
982 || is_ex_exactly_of_type(e_expanded, ncmul)
983 || (is_ex_exactly_of_type(e_expanded, power) && is_ex_of_type(e_expanded.op(0), indexed) && e_expanded.op(1).is_equal(_ex2)))
984 return simplify_indexed_product(e_expanded, free_indices, dummy_indices, sp);
986 // Cannot do anything
987 free_indices.clear();
991 /** Simplify/canonicalize expression containing indexed objects. This
992 * performs contraction of dummy indices where possible and checks whether
993 * the free indices in sums are consistent.
995 * @return simplified expression */
996 ex ex::simplify_indexed(void) const
998 exvector free_indices, dummy_indices;
1000 return GiNaC::simplify_indexed(*this, free_indices, dummy_indices, sp);
1003 /** Simplify/canonicalize expression containing indexed objects. This
1004 * performs contraction of dummy indices where possible, checks whether
1005 * the free indices in sums are consistent, and automatically replaces
1006 * scalar products by known values if desired.
1008 * @param sp Scalar products to be replaced automatically
1009 * @return simplified expression */
1010 ex ex::simplify_indexed(const scalar_products & sp) const
1012 exvector free_indices, dummy_indices;
1013 return GiNaC::simplify_indexed(*this, free_indices, dummy_indices, sp);
1016 /** Symmetrize expression over its free indices. */
1017 ex ex::symmetrize(void) const
1019 return GiNaC::symmetrize(*this, get_free_indices());
1022 /** Antisymmetrize expression over its free indices. */
1023 ex ex::antisymmetrize(void) const
1025 return GiNaC::antisymmetrize(*this, get_free_indices());
1028 /** Symmetrize expression by cyclic permutation over its free indices. */
1029 ex ex::symmetrize_cyclic(void) const
1031 return GiNaC::symmetrize_cyclic(*this, get_free_indices());
1038 void scalar_products::add(const ex & v1, const ex & v2, const ex & sp)
1040 spm[make_key(v1, v2)] = sp;
1043 void scalar_products::add_vectors(const lst & l)
1045 // Add all possible pairs of products
1046 unsigned num = l.nops();
1047 for (unsigned i=0; i<num; i++) {
1049 for (unsigned j=0; j<num; j++) {
1056 void scalar_products::clear(void)
1061 /** Check whether scalar product pair is defined. */
1062 bool scalar_products::is_defined(const ex & v1, const ex & v2) const
1064 return spm.find(make_key(v1, v2)) != spm.end();
1067 /** Return value of defined scalar product pair. */
1068 ex scalar_products::evaluate(const ex & v1, const ex & v2) const
1070 return spm.find(make_key(v1, v2))->second;
1073 void scalar_products::debugprint(void) const
1075 std::cerr << "map size=" << spm.size() << std::endl;
1076 spmap::const_iterator i = spm.begin(), end = spm.end();
1078 const spmapkey & k = i->first;
1079 std::cerr << "item key=(" << k.first << "," << k.second;
1080 std::cerr << "), value=" << i->second << std::endl;
1085 /** Make key from object pair. */
1086 spmapkey scalar_products::make_key(const ex & v1, const ex & v2)
1088 // If indexed, extract base objects
1089 ex s1 = is_ex_of_type(v1, indexed) ? v1.op(0) : v1;
1090 ex s2 = is_ex_of_type(v2, indexed) ? v2.op(0) : v2;
1092 // Enforce canonical order in pair
1093 if (s1.compare(s2) > 0)
1094 return spmapkey(s2, s1);
1096 return spmapkey(s1, s2);
1099 } // namespace GiNaC